name
string
code
string
asm
string
file
string
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)0, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::linearBounds(embree::Vec3fa const&, float, float, embree::LinearSpace3<embree::Vec3fa> const&, unsigned long, embree::BBox<float> const&) const::'lambda'(unsigned long)::operator()(unsigned long) const
__forceinline LBBox3fa linearBounds(const Vec3fa& ofs, const float scale, const float r_scale0, const LinearSpace3fa& space, size_t primID, const BBox1f& dt) const { return LBBox3fa([&] (size_t itime) { return bounds(ofs, scale, r_scale0, space, primID, itime); }, dt, this->time_range, fnumTimeSegments); }
pushq %rbx movq %rdi, %rax movq 0x28(%rsi), %rcx movq (%rsi), %r10 movq 0x8(%rsi), %rdi movq 0x10(%rsi), %r8 vbroadcastss (%rdi), %xmm1 movq 0x20(%rsi), %rdi vmulss (%r8), %xmm1, %xmm0 movq 0x68(%rcx), %r8 imulq (%rdi), %r8 movq 0x58(%rcx), %rdi movl (%rdi,%r8), %r11d movq 0x188(%rcx), %rdi imulq $0x38, %rdx, %r8 movq (%rdi,%r8), %rdx movq 0x10(%rdi,%r8), %rbx leal 0x1(%r11), %r9d leal 0x2(%r11), %r8d leal 0x3(%r11), %edi imulq %rbx, %r11 vmovaps (%rdx,%r11), %xmm2 imulq %rbx, %r9 vmovaps (%rdx,%r9), %xmm5 imulq %rbx, %r8 vmovaps (%rdx,%r8), %xmm6 movq 0x18(%rsi), %rsi imulq %rbx, %rdi vmovaps (%rdx,%rdi), %xmm9 vmovaps (%r10), %xmm10 vsubps %xmm10, %xmm2, %xmm2 vmulps %xmm2, %xmm1, %xmm2 vbroadcastss %xmm2, %xmm4 vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1] vmovaps (%rsi), %xmm13 vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2] vmovaps 0x10(%rsi), %xmm14 vmovaps 0x20(%rsi), %xmm15 vmulps %xmm2, %xmm15, %xmm3 vfmadd231ps %xmm7, %xmm14, %xmm3 # xmm3 = (xmm14 * xmm7) + xmm3 vfmadd231ps %xmm4, %xmm13, %xmm3 # xmm3 = (xmm13 * xmm4) + xmm3 vmovss 0x24c(%rcx), %xmm2 vmulss 0xc(%rdx,%r11), %xmm2, %xmm4 vmulss %xmm4, %xmm0, %xmm4 vsubps %xmm10, %xmm5, %xmm5 vmulps %xmm5, %xmm1, %xmm5 vbroadcastss %xmm5, %xmm8 vshufps $0x55, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[1,1,1,1] vshufps $0xaa, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2] vmulps %xmm5, %xmm15, %xmm7 vfmadd231ps %xmm11, %xmm14, %xmm7 # xmm7 = (xmm14 * xmm11) + xmm7 vfmadd231ps %xmm8, %xmm13, %xmm7 # xmm7 = (xmm13 * xmm8) + xmm7 vmulss 0xc(%rdx,%r9), %xmm2, %xmm5 vmulss %xmm5, %xmm0, %xmm8 vsubps %xmm10, %xmm6, %xmm5 vmulps %xmm5, %xmm1, %xmm5 vbroadcastss %xmm5, %xmm6 vshufps $0x55, %xmm5, %xmm5, %xmm12 # xmm12 = xmm5[1,1,1,1] vshufps $0xaa, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2] vmulps %xmm5, %xmm15, %xmm11 vfmadd231ps %xmm12, %xmm14, %xmm11 # xmm11 = (xmm14 * xmm12) + xmm11 vfmadd231ps %xmm6, %xmm13, %xmm11 # xmm11 = (xmm13 * xmm6) + xmm11 vmulss 0xc(%rdx,%r8), %xmm2, %xmm5 vmulss %xmm5, %xmm0, %xmm12 vsubps %xmm10, %xmm9, %xmm5 vmulps %xmm5, %xmm1, %xmm1 vbroadcastss %xmm1, %xmm5 vshufps $0x55, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[1,1,1,1] vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2] vmulps %xmm1, %xmm15, %xmm15 vfmadd231ps %xmm6, %xmm14, %xmm15 # xmm15 = (xmm14 * xmm6) + xmm15 vfmadd231ps %xmm5, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm5) + xmm15 vmulss 0xc(%rdx,%rdi), %xmm2, %xmm1 vmulss %xmm1, %xmm0, %xmm16 movl 0x248(%rcx), %ecx cmpq $0x4, %rcx jne 0x1ea7c94 vbroadcastss %xmm12, %xmm0 vblendps $0x8, %xmm0, %xmm11, %xmm1 # xmm1 = xmm11[0,1,2],xmm0[3] leaq 0x2888b8(%rip), %rcx # 0x213036c vmovups 0x110(%rcx), %xmm2 vbroadcastss %xmm3, %xmm5 vshufps $0x55, %xmm3, %xmm3, %xmm6 # xmm6 = xmm3[1,1,1,1] vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2] vbroadcastss %xmm4, %xmm4 vmovups 0x594(%rcx), %xmm9 vbroadcastss %xmm7, %xmm10 vshufps $0x55, %xmm7, %xmm7, %xmm12 # xmm12 = xmm7[1,1,1,1] vshufps $0xaa, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[2,2,2,2] vbroadcastss %xmm8, %xmm8 vmovups 0xa18(%rcx), %xmm13 vbroadcastss %xmm11, %xmm14 vshufps $0x55, %xmm11, %xmm11, %xmm17 # xmm17 = xmm11[1,1,1,1] vmovups 0xe9c(%rcx), %xmm18 vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2] vbroadcastss %xmm15, %xmm19 vshufps $0x55, %xmm15, %xmm15, %xmm20 # xmm20 = xmm15[1,1,1,1] vshufps $0xaa, %xmm15, %xmm15, %xmm15 # xmm15 = xmm15[2,2,2,2] vbroadcastss %xmm16, %xmm16 vmulps %xmm18, %xmm19, %xmm19 vmulps %xmm18, %xmm20, %xmm20 vmulps %xmm18, %xmm15, %xmm15 vmulps %xmm18, %xmm16, %xmm16 vfmadd231ps %xmm14, %xmm13, %xmm19 # xmm19 = (xmm13 * xmm14) + xmm19 vfmadd231ps %xmm17, %xmm13, %xmm20 # xmm20 = (xmm13 * xmm17) + xmm20 vfmadd231ps %xmm11, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm11) + xmm15 vfmadd231ps %xmm13, %xmm0, %xmm16 # xmm16 = (xmm0 * xmm13) + xmm16 vfmadd231ps %xmm10, %xmm9, %xmm19 # xmm19 = (xmm9 * xmm10) + xmm19 vfmadd231ps %xmm12, %xmm9, %xmm20 # xmm20 = (xmm9 * xmm12) + xmm20 vfmadd231ps %xmm7, %xmm9, %xmm15 # xmm15 = (xmm9 * xmm7) + xmm15 vfmadd231ps %xmm8, %xmm9, %xmm16 # xmm16 = (xmm9 * xmm8) + xmm16 vfmadd231ps %xmm5, %xmm2, %xmm19 # xmm19 = (xmm2 * xmm5) + xmm19 vfmadd231ps %xmm6, %xmm2, %xmm20 # xmm20 = (xmm2 * xmm6) + xmm20 vfmadd231ps %xmm3, %xmm2, %xmm15 # xmm15 = (xmm2 * xmm3) + xmm15 vfmadd231ps %xmm4, %xmm2, %xmm16 # xmm16 = (xmm2 * xmm4) + xmm16 vshufps $0xb1, %xmm19, %xmm19, %xmm2 # xmm2 = xmm19[1,0,3,2] vminps %xmm19, %xmm2, %xmm3 vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0] vminps %xmm3, %xmm4, %xmm3 vshufps $0xb1, %xmm20, %xmm20, %xmm4 # xmm4 = xmm20[1,0,3,2] vminps %xmm20, %xmm4, %xmm5 vshufpd $0x1, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,0] vminps %xmm5, %xmm6, %xmm5 vinsertps $0x1c, %xmm5, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm5[0],zero,zero vshufps $0xb1, %xmm15, %xmm15, %xmm5 # xmm5 = xmm15[1,0,3,2] vminps %xmm15, %xmm5, %xmm6 vshufpd $0x1, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,0] vminps %xmm6, %xmm7, %xmm6 vinsertps $0x20, %xmm6, %xmm3, %xmm3 # xmm3 = xmm3[0,1],xmm6[0],xmm3[3] vmaxps %xmm19, %xmm2, %xmm2 vshufpd $0x1, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,0] vmaxps %xmm2, %xmm6, %xmm2 vmaxps %xmm20, %xmm4, %xmm4 vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0] vmaxps %xmm4, %xmm6, %xmm4 vinsertps $0x1c, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0],zero,zero vmaxps %xmm15, %xmm5, %xmm4 vshufpd $0x1, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,0] vmaxps %xmm4, %xmm5, %xmm4 vinsertps $0x20, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,1],xmm4[0],xmm2[3] vandps 0x792b0(%rip){1to4}, %xmm16, %xmm4 # 0x1f20ec4 vprolq $0x20, %xmm4, %xmm5 vmaxps %xmm4, %xmm5, %xmm4 vbroadcastss %xmm4, %xmm5 vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2] vmaxps %xmm5, %xmm4, %xmm4 vminps %xmm1, %xmm3, %xmm3 vmaxps %xmm1, %xmm2, %xmm1 vandps 0x79285(%rip){1to4}, %xmm0, %xmm0 # 0x1f20ec4 vmaxps %xmm0, %xmm4, %xmm2 vsubps %xmm2, %xmm3, %xmm0 vaddps %xmm2, %xmm1, %xmm1 vbroadcastss 0x79270(%rip), %xmm2 # 0x1f20ec4 vandps %xmm2, %xmm0, %xmm3 vandps %xmm2, %xmm1, %xmm2 vmaxps %xmm2, %xmm3, %xmm2 vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3] vmaxss %xmm2, %xmm3, %xmm3 vshufpd $0x1, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,0] vmaxss %xmm3, %xmm2, %xmm2 vmulss 0x4936b(%rip), %xmm2, %xmm2 # 0x1ef0fe4 vbroadcastss %xmm2, %xmm2 vsubps %xmm2, %xmm0, %xmm0 vaddps %xmm2, %xmm1, %xmm1 vmovaps %xmm0, (%rax) vmovaps %xmm1, 0x10(%rax) popq %rbx vzeroupper retq testl %ecx, %ecx js 0x1ea7e95 vpbroadcastd %ecx, %ymm0 vmovdqu %ymm0, -0x20(%rsp) vbroadcastss %xmm3, %ymm0 vmovups %ymm0, -0x40(%rsp) vbroadcastss 0x6aa48(%rip), %ymm14 # 0x1f12704 vpermps %ymm3, %ymm14, %ymm2 vbroadcastss 0x79211(%rip), %ymm17 # 0x1f20edc vpermps %ymm3, %ymm17, %ymm3 vbroadcastss %xmm4, %ymm4 vbroadcastss %xmm7, %ymm5 vpermps %ymm7, %ymm14, %ymm6 vpermps %ymm7, %ymm17, %ymm7 vbroadcastss %xmm8, %ymm8 vbroadcastss %xmm11, %ymm9 vpermps %ymm11, %ymm14, %ymm10 vpermps %ymm11, %ymm17, %ymm11 vbroadcastss %xmm12, %ymm12 vbroadcastss %xmm15, %ymm13 vpermps %ymm15, %ymm14, %ymm14 vpermps %ymm15, %ymm17, %ymm15 vbroadcastss %xmm16, %ymm16 movq %rcx, %rdx shlq $0x6, %rdx leaq (%rdx,%rcx,4), %rdx addq 0x27d1b0(%rip), %rdx # 0x2124ed8 vbroadcastss 0x44e52(%rip), %ymm20 # 0x1eecb84 vbroadcastss 0x43ce4(%rip), %ymm17 # 0x1eeba20 vxorps %xmm21, %xmm21, %xmm21 xorl %esi, %esi vmovaps %ymm17, %ymm22 vmovaps %ymm17, %ymm29 vmovaps %ymm20, %ymm26 vmovaps %ymm20, %ymm28 vpbroadcastd %esi, %ymm19 vpord 0xb2bb4(%rip), %ymm19, %ymm19 # 0x1f5a920 vmovups (%rdx,%rsi,4), %ymm23 vmovups 0x484(%rdx,%rsi,4), %ymm24 vmovups 0x908(%rdx,%rsi,4), %ymm25 vpcmpgtd -0x20(%rsp), %ymm19, %k1 vmovups 0xd8c(%rdx,%rsi,4), %ymm19 vmulps %ymm19, %ymm13, %ymm18 vmulps %ymm19, %ymm14, %ymm0 vmulps %ymm19, %ymm15, %ymm1 vmulps %ymm19, %ymm16, %ymm19 vfmadd231ps %ymm9, %ymm25, %ymm18 # ymm18 = (ymm25 * ymm9) + ymm18 vfmadd231ps %ymm10, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm10) + ymm0 vfmadd231ps %ymm11, %ymm25, %ymm1 # ymm1 = (ymm25 * ymm11) + ymm1 vfmadd231ps %ymm25, %ymm12, %ymm19 # ymm19 = (ymm12 * ymm25) + ymm19 vfmadd231ps %ymm5, %ymm24, %ymm18 # ymm18 = (ymm24 * ymm5) + ymm18 vfmadd231ps %ymm6, %ymm24, %ymm0 # ymm0 = (ymm24 * ymm6) + ymm0 vfmadd231ps %ymm7, %ymm24, %ymm1 # ymm1 = (ymm24 * ymm7) + ymm1 vfmadd231ps %ymm24, %ymm8, %ymm19 # ymm19 = (ymm8 * ymm24) + ymm19 vfmadd231ps -0x40(%rsp), %ymm23, %ymm18 # ymm18 = (ymm23 * mem) + ymm18 vfmadd231ps %ymm2, %ymm23, %ymm0 # ymm0 = (ymm23 * ymm2) + ymm0 vfmadd231ps %ymm3, %ymm23, %ymm1 # ymm1 = (ymm23 * ymm3) + ymm1 vfmadd231ps %ymm23, %ymm4, %ymm19 # ymm19 = (ymm4 * ymm23) + ymm19 vandps 0x790bc(%rip){1to8}, %ymm19, %ymm19 # 0x1f20ec4 vminps %ymm18, %ymm29, %ymm30 vmovaps %ymm29, %ymm30 {%k1} vminps %ymm0, %ymm22, %ymm27 vmovaps %ymm22, %ymm27 {%k1} vminps %ymm1, %ymm17, %ymm31 vmovaps %ymm17, %ymm31 {%k1} vmaxps %ymm18, %ymm28, %ymm24 vmovaps %ymm28, %ymm24 {%k1} vmaxps %ymm0, %ymm26, %ymm25 vmovaps %ymm26, %ymm25 {%k1} vmaxps %ymm1, %ymm20, %ymm23 vmovaps %ymm20, %ymm23 {%k1} vmaxps %ymm19, %ymm21, %ymm19 vmovaps %ymm21, %ymm19 {%k1} addq $0x8, %rsi vmovaps %ymm31, %ymm17 vmovaps %ymm27, %ymm22 vmovaps %ymm30, %ymm29 vmovaps %ymm23, %ymm20 vmovaps %ymm25, %ymm26 vmovaps %ymm24, %ymm28 vmovaps %ymm19, %ymm21 cmpq %rcx, %rsi jbe 0x1ea7d5c jmp 0x1ea7ec7 vbroadcastss 0x43b81(%rip), %ymm30 # 0x1eeba20 vbroadcastss 0x44cdb(%rip), %ymm24 # 0x1eecb84 vxorps %xmm19, %xmm19, %xmm19 vmovaps %ymm24, %ymm25 vmovaps %ymm24, %ymm23 vmovaps %ymm30, %ymm27 vmovaps %ymm30, %ymm31 vshufps $0xb1, %ymm30, %ymm30, %ymm0 # ymm0 = ymm30[1,0,3,2,5,4,7,6] vminps %ymm0, %ymm30, %ymm0 vshufpd $0x5, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2] vminps %ymm1, %ymm0, %ymm0 vextractf128 $0x1, %ymm0, %xmm1 vminps %xmm1, %xmm0, %xmm0 vshufps $0xb1, %ymm27, %ymm27, %ymm1 # ymm1 = ymm27[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm27, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm2 vminps %xmm2, %xmm1, %xmm1 vunpcklps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vshufps $0xb1, %ymm31, %ymm31, %ymm1 # ymm1 = ymm31[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm31, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm2 vminps %xmm2, %xmm1, %xmm1 vinsertps $0x28, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm1[0],zero vshufps $0xb1, %ymm24, %ymm24, %ymm1 # ymm1 = ymm24[1,0,3,2,5,4,7,6] vmaxps %ymm1, %ymm24, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vmaxps %ymm2, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm2 vmaxps %xmm2, %xmm1, %xmm1 vshufps $0xb1, %ymm25, %ymm25, %ymm2 # ymm2 = ymm25[1,0,3,2,5,4,7,6] vmaxps %ymm2, %ymm25, %ymm2 vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2] vmaxps %ymm3, %ymm2, %ymm2 vextractf128 $0x1, %ymm2, %xmm3 vmaxps %xmm3, %xmm2, %xmm2 vunpcklps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] vshufps $0xb1, %ymm23, %ymm23, %ymm2 # ymm2 = ymm23[1,0,3,2,5,4,7,6] vmaxps %ymm2, %ymm23, %ymm2 vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2] vmaxps %ymm3, %ymm2, %ymm2 vextractf128 $0x1, %ymm2, %xmm3 vmaxps %xmm3, %xmm2, %xmm2 vinsertps $0x28, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1],xmm2[0],zero vshufps $0xb1, %ymm19, %ymm19, %ymm2 # ymm2 = ymm19[1,0,3,2,5,4,7,6] vmaxps %ymm2, %ymm19, %ymm2 vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2] vmaxps %ymm3, %ymm2, %ymm2 vextractf128 $0x1, %ymm2, %xmm3 vmaxps %xmm3, %xmm2, %xmm2 vbroadcastss %xmm2, %xmm2 vsubps %xmm2, %xmm0, %xmm0 jmp 0x1ea7c47 nop
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::createPrimRefArrayMB(embree::PrimRef*, embree::BBox<float> const&, embree::range<unsigned long> const&, unsigned long, unsigned int) const
PrimInfo createPrimRefArrayMB(PrimRef* prims, const BBox1f& time_range, const range<size_t>& r, size_t k, unsigned int geomID) const { PrimInfo pinfo(empty); const BBox1f t0t1 = BBox1f::intersect(this->time_range, time_range); if (t0t1.empty()) return pinfo; for (size_t j=r.begin(); j<r.end(); j++) { if (!valid(ctype, j, this->timeSegmentRange(t0t1))) continue; const LBBox3fa lbounds = linearBounds(j,t0t1); if (lbounds.bounds0.empty() || lbounds.bounds1.empty()) continue; // checks oriented curves with invalid normals which cause NaNs here const PrimRef prim(lbounds.bounds(),geomID,unsigned(j)); pinfo.add_primref(prim); prims[k++] = prim; } return pinfo; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x1d8, %rsp # imm = 0x1D8 movq %rdi, %rax vbroadcastss 0x424ed(%rip), %xmm0 # 0x1eeba20 vmovaps %xmm0, (%rdi) vbroadcastss 0x43644(%rip), %xmm1 # 0x1eecb84 vmovaps %xmm1, 0x10(%rdi) movq %rdx, 0x100(%rsp) vmovaps %xmm0, 0x20(%rdi) vmovaps %xmm1, 0x30(%rdi) vxorps %xmm0, %xmm0, %xmm0 vmovaps %xmm0, 0x40(%rdi) vmovsd 0x2c(%rsi), %xmm0 vmovsd (%rcx), %xmm1 vcmpltps %xmm1, %xmm0, %k1 vinsertps $0x50, %xmm0, %xmm1, %xmm2 # xmm2 = xmm1[0],xmm0[1],xmm1[2,3] vinsertps $0x50, %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[1],xmm0[2,3] vmovaps %xmm2, %xmm4 {%k1} vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3] vucomiss %xmm5, %xmm4 ja 0x1eaa6c2 movq %r9, 0x60(%rsp) vmovaps (%rax), %xmm0 vmovaps %xmm0, 0xa0(%rsp) vmovaps 0x10(%rax), %xmm0 vmovaps 0x20(%rax), %xmm1 vmovaps %xmm1, 0x80(%rsp) movq (%r8), %r14 vmovaps 0x30(%rax), %xmm1 vmovaps %xmm1, 0x70(%rsp) movq %rax, 0xf8(%rsp) movq 0x48(%rax), %rax cmpq 0x8(%r8), %r14 vmovaps %xmm0, 0x90(%rsp) movq %rax, 0x58(%rsp) jae 0x1eaa67d vmovss 0x4734f(%rip), %xmm29 # 0x1ef0940 vxorps %xmm6, %xmm6, %xmm6 vmovss 0x47345(%rip), %xmm31 # 0x1ef0944 vxorps %xmm22, %xmm22, %xmm22 vbroadcastss 0x778b6(%rip), %xmm15 # 0x1f20ec4 vmovss 0x479c8(%rip), %xmm16 # 0x1ef0fe0 vbroadcastss 0x77dda(%rip), %xmm17 # 0x1f213fc vbroadcastss 0x479b4(%rip), %xmm18 # 0x1ef0fe0 movq %r8, 0x68(%rsp) vmovaps %xmm4, 0xc0(%rsp) vmovaps %xmm5, 0xb0(%rsp) movq %rsi, 0x10(%rsp) vmovsd 0x2c(%rsi), %xmm0 vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3] vsubss %xmm0, %xmm1, %xmm1 vsubss %xmm0, %xmm4, %xmm2 vdivss %xmm1, %xmm2, %xmm2 vsubss %xmm0, %xmm5, %xmm0 vdivss %xmm1, %xmm0, %xmm0 vmulss %xmm29, %xmm2, %xmm1 vmulss %xmm31, %xmm0, %xmm0 movq 0x58(%rsi), %rax movq 0x68(%rsi), %rcx imulq %r14, %rcx movl (%rax,%rcx), %eax leal 0x3(%rax), %ecx movq 0x188(%rsi), %rdx cmpq %rcx, 0x18(%rdx) jbe 0x1eaa670 vmovss 0x28(%rsi), %xmm3 vmulss %xmm1, %xmm3, %xmm1 vroundss $0x9, %xmm1, %xmm1, %xmm1 vmaxss %xmm1, %xmm6, %xmm1 vcvttss2si %xmm1, %esi vmulss %xmm0, %xmm3, %xmm0 vroundss $0xa, %xmm0, %xmm0, %xmm0 vminss %xmm3, %xmm0, %xmm0 vcvttss2si %xmm0, %edi cmpl %edi, %esi seta %r8b vmovss %xmm3, 0xc(%rsp) ja 0x1eaa179 movslq %esi, %rsi movslq %edi, %rdi movq %rdi, 0x20(%rsp) leal 0x1(%rax), %r9d leal 0x2(%rax), %r10d movq 0x10(%rsp), %rdi movq 0x1a8(%rdi), %r12 vmovss 0x24c(%rdi), %xmm0 vmovss %xmm0, 0x30(%rsp) leaq 0x27dbe1(%rip), %rdi # 0x21272e4 vmovups 0x1dc(%rdi), %ymm24 vmovups 0x660(%rdi), %ymm25 vmovups 0xae4(%rdi), %ymm26 vmovups 0xf68(%rdi), %ymm27 vmovups 0x13ec(%rdi), %ymm5 vmovups 0x1870(%rdi), %ymm6 vmovups 0x1cf4(%rdi), %ymm7 vmovups 0x2178(%rdi), %ymm8 imulq $0x38, %rsi, %r13 addq $0x10, %r13 movq -0x10(%rdx,%r13), %rbx movq (%rdx,%r13), %r11 movq %r11, %rbp imulq %rax, %rbp vmovss 0xc(%rbx,%rbp), %xmm9 vandps %xmm15, %xmm9, %xmm9 vucomiss %xmm9, %xmm16 jbe 0x1eaa16f movq %r11, %rdi imulq %r9, %rdi vmovss 0xc(%rbx,%rdi), %xmm9 vandps %xmm15, %xmm9, %xmm9 vucomiss %xmm9, %xmm16 jbe 0x1eaa16f movq %r11, %r15 imulq %r10, %r15 vmovss 0xc(%rbx,%r15), %xmm9 vandps %xmm15, %xmm9, %xmm9 vucomiss %xmm9, %xmm16 jbe 0x1eaa16f imulq %rcx, %r11 vmovss 0xc(%rbx,%r11), %xmm9 vandps %xmm15, %xmm9, %xmm9 vucomiss %xmm9, %xmm16 jbe 0x1eaa16f vmovaps (%rbx,%rbp), %xmm9 vcmpnleps %xmm17, %xmm9, %k1 vcmpltps %xmm18, %xmm9, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %ebp testb $0x7, %bpl jne 0x1eaa16f vmovaps (%rbx,%rdi), %xmm10 vcmpnleps %xmm17, %xmm10, %k1 vcmpltps %xmm18, %xmm10, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %edi testb $0x7, %dil jne 0x1eaa16f vmovaps (%rbx,%r15), %xmm12 vcmpnleps %xmm17, %xmm12, %k1 vcmpltps %xmm18, %xmm12, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %edi testb $0x7, %dil jne 0x1eaa16f vmovaps (%rbx,%r11), %xmm13 vcmpnleps %xmm17, %xmm13, %k1 vcmpltps %xmm18, %xmm13, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %edi testb $0x7, %dil jne 0x1eaa16f movq -0x10(%r12,%r13), %rbx movq (%r12,%r13), %r15 movq %r15, %rdi imulq %rax, %rdi vmovups (%rbx,%rdi), %xmm11 vcmpnleps %xmm17, %xmm11, %k1 vcmpltps %xmm18, %xmm11, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %edi testb $0x7, %dil jne 0x1eaa16f movq %r15, %rdi imulq %r9, %rdi vmovups (%rbx,%rdi), %xmm14 vcmpnleps %xmm17, %xmm14, %k1 vcmpltps %xmm18, %xmm14, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %edi testb $0x7, %dil jne 0x1eaa16f vshufps $0xff, %xmm9, %xmm9, %xmm15 # xmm15 = xmm9[3,3,3,3] vmovss 0x30(%rsp), %xmm0 vmulss %xmm0, %xmm15, %xmm15 vinsertps $0x30, %xmm15, %xmm9, %xmm15 # xmm15 = xmm9[0,1,2],xmm15[0] vshufps $0xff, %xmm10, %xmm10, %xmm9 # xmm9 = xmm10[3,3,3,3] vmulss %xmm0, %xmm9, %xmm9 vinsertps $0x30, %xmm9, %xmm10, %xmm16 # xmm16 = xmm10[0,1,2],xmm9[0] vshufps $0xff, %xmm12, %xmm12, %xmm9 # xmm9 = xmm12[3,3,3,3] vmulss %xmm0, %xmm9, %xmm9 vinsertps $0x30, %xmm9, %xmm12, %xmm17 # xmm17 = xmm12[0,1,2],xmm9[0] vshufps $0xff, %xmm13, %xmm13, %xmm9 # xmm9 = xmm13[3,3,3,3] vmulss %xmm0, %xmm9, %xmm9 vinsertps $0x30, %xmm9, %xmm13, %xmm13 # xmm13 = xmm13[0,1,2],xmm9[0] movq %r15, %rdi imulq %r10, %rdi vmovups (%rbx,%rdi), %xmm18 imulq %rcx, %r15 vmovups (%rbx,%r15), %xmm19 vbroadcastss 0x7758f(%rip), %xmm1 # 0x1f20ec0 vmulps %xmm1, %xmm13, %xmm12 vxorps %xmm9, %xmm9, %xmm9 vfmadd213ps %xmm12, %xmm17, %xmm9 # xmm9 = (xmm17 * xmm9) + xmm12 vaddps %xmm9, %xmm16, %xmm9 vfmadd231ps %xmm1, %xmm15, %xmm9 # xmm9 = (xmm15 * xmm1) + xmm9 vmulps %xmm22, %xmm13, %xmm10 vbroadcastss 0x43226(%rip), %xmm2 # 0x1eecb80 vfmadd231ps %xmm2, %xmm17, %xmm10 # xmm10 = (xmm17 * xmm2) + xmm10 vfmadd231ps %xmm22, %xmm16, %xmm10 # xmm10 = (xmm16 * xmm22) + xmm10 vfnmadd231ps %xmm2, %xmm15, %xmm10 # xmm10 = -(xmm15 * xmm2) + xmm10 vmulps %xmm1, %xmm19, %xmm20 vxorps %xmm21, %xmm21, %xmm21 vfmadd213ps %xmm20, %xmm18, %xmm21 # xmm21 = (xmm18 * xmm21) + xmm20 vaddps %xmm21, %xmm14, %xmm21 vfmadd231ps %xmm1, %xmm11, %xmm21 # xmm21 = (xmm11 * xmm1) + xmm21 vxorps %xmm0, %xmm0, %xmm0 vmulps %xmm0, %xmm19, %xmm22 vfmadd231ps %xmm2, %xmm18, %xmm22 # xmm22 = (xmm18 * xmm2) + xmm22 vfmadd231ps %xmm0, %xmm14, %xmm22 # xmm22 = (xmm14 * xmm0) + xmm22 vfnmadd231ps %xmm2, %xmm11, %xmm22 # xmm22 = -(xmm11 * xmm2) + xmm22 vaddps %xmm12, %xmm17, %xmm12 vfmadd231ps %xmm0, %xmm16, %xmm12 # xmm12 = (xmm16 * xmm0) + xmm12 vfmadd231ps %xmm1, %xmm15, %xmm12 # xmm12 = (xmm15 * xmm1) + xmm12 vmulps %xmm2, %xmm13, %xmm13 vfmadd231ps %xmm17, %xmm0, %xmm13 # xmm13 = (xmm0 * xmm17) + xmm13 vfnmadd231ps %xmm16, %xmm2, %xmm13 # xmm13 = -(xmm2 * xmm16) + xmm13 vfmadd231ps %xmm15, %xmm0, %xmm13 # xmm13 = (xmm0 * xmm15) + xmm13 vaddps %xmm20, %xmm18, %xmm16 vfmadd231ps %xmm0, %xmm14, %xmm16 # xmm16 = (xmm14 * xmm0) + xmm16 vfmadd231ps %xmm1, %xmm11, %xmm16 # xmm16 = (xmm11 * xmm1) + xmm16 vmulps %xmm2, %xmm19, %xmm17 vfmadd231ps %xmm18, %xmm0, %xmm17 # xmm17 = (xmm0 * xmm18) + xmm17 vfnmadd231ps %xmm14, %xmm2, %xmm17 # xmm17 = -(xmm2 * xmm14) + xmm17 vfmadd231ps %xmm11, %xmm0, %xmm17 # xmm17 = (xmm0 * xmm11) + xmm17 vshufps $0xc9, %xmm10, %xmm10, %xmm11 # xmm11 = xmm10[1,2,0,3] vshufps $0xc9, %xmm21, %xmm21, %xmm14 # xmm14 = xmm21[1,2,0,3] vmulps %xmm14, %xmm10, %xmm14 vfmsub231ps %xmm21, %xmm11, %xmm14 # xmm14 = (xmm11 * xmm21) - xmm14 vshufps $0xc9, %xmm14, %xmm14, %xmm15 # xmm15 = xmm14[1,2,0,3] vshufps $0xc9, %xmm22, %xmm22, %xmm14 # xmm14 = xmm22[1,2,0,3] vmulps %xmm14, %xmm10, %xmm14 vfmsub231ps %xmm22, %xmm11, %xmm14 # xmm14 = (xmm11 * xmm22) - xmm14 vxorps %xmm22, %xmm22, %xmm22 vshufps $0xc9, %xmm14, %xmm14, %xmm0 # xmm0 = xmm14[1,2,0,3] vshufps $0xc9, %xmm13, %xmm13, %xmm14 # xmm14 = xmm13[1,2,0,3] vshufps $0xc9, %xmm16, %xmm16, %xmm11 # xmm11 = xmm16[1,2,0,3] vmulps %xmm11, %xmm13, %xmm11 vfmsub231ps %xmm16, %xmm14, %xmm11 # xmm11 = (xmm14 * xmm16) - xmm11 vshufps $0xc9, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[1,2,0,3] vshufps $0xc9, %xmm17, %xmm17, %xmm16 # xmm16 = xmm17[1,2,0,3] vmulps %xmm16, %xmm13, %xmm16 vfmsub231ps %xmm17, %xmm14, %xmm16 # xmm16 = (xmm14 * xmm17) - xmm16 vshufps $0xc9, %xmm16, %xmm16, %xmm14 # xmm14 = xmm16[1,2,0,3] vdpps $0x7f, %xmm15, %xmm15, %xmm1 vmovss %xmm1, %xmm22, %xmm16 # xmm16 = xmm1[0],xmm22[1,2,3] vrsqrt14ss %xmm16, %xmm22, %xmm17 vmovss 0x42c95(%rip), %xmm3 # 0x1eec718 vmulss %xmm3, %xmm17, %xmm18 vmovss 0x42c89(%rip), %xmm20 # 0x1eec71c vmulss %xmm20, %xmm1, %xmm19 vmulss %xmm17, %xmm19, %xmm19 vmulss %xmm17, %xmm17, %xmm17 vmulss %xmm17, %xmm19, %xmm17 vaddss %xmm17, %xmm18, %xmm17 vdpps $0x7f, %xmm0, %xmm15, %xmm2 vbroadcastss %xmm17, %xmm17 vmulps %xmm17, %xmm15, %xmm18 vbroadcastss %xmm1, %xmm19 vmulps %xmm0, %xmm19, %xmm0 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm15, %xmm2 vsubps %xmm2, %xmm0, %xmm0 vrcp14ss %xmm16, %xmm22, %xmm2 vmovss 0x4750c(%rip), %xmm19 # 0x1ef0ff8 vfnmadd213ss %xmm19, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm19 vmulss %xmm1, %xmm2, %xmm1 vbroadcastss %xmm1, %xmm1 vmulps %xmm0, %xmm1, %xmm0 vmulps %xmm0, %xmm17, %xmm0 vdpps $0x7f, %xmm11, %xmm11, %xmm1 vmovss %xmm1, %xmm22, %xmm2 # xmm2 = xmm1[0],xmm22[1,2,3] vrsqrt14ss %xmm2, %xmm22, %xmm15 vmulss %xmm3, %xmm15, %xmm16 vmulss %xmm20, %xmm1, %xmm17 vmulss %xmm15, %xmm17, %xmm17 vmulss %xmm15, %xmm15, %xmm15 vmulss %xmm15, %xmm17, %xmm15 vaddss %xmm15, %xmm16, %xmm15 vbroadcastss %xmm15, %xmm15 vdpps $0x7f, %xmm14, %xmm11, %xmm3 vmulps %xmm15, %xmm11, %xmm16 vbroadcastss %xmm1, %xmm17 vmulps %xmm14, %xmm17, %xmm14 vbroadcastss %xmm3, %xmm3 vmulps %xmm3, %xmm11, %xmm3 vsubps %xmm3, %xmm14, %xmm3 vrcp14ss %xmm2, %xmm22, %xmm2 vfnmadd213ss %xmm19, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm19 vmulss %xmm1, %xmm2, %xmm1 vbroadcastss %xmm1, %xmm1 vmulps %xmm1, %xmm3, %xmm1 vmulps %xmm1, %xmm15, %xmm1 vshufps $0xff, %xmm9, %xmm9, %xmm2 # xmm2 = xmm9[3,3,3,3] vmulps %xmm18, %xmm2, %xmm3 vsubps %xmm3, %xmm9, %xmm14 vshufps $0xff, %xmm10, %xmm10, %xmm11 # xmm11 = xmm10[3,3,3,3] vmulps %xmm18, %xmm11, %xmm11 vmulps %xmm0, %xmm2, %xmm0 vaddps %xmm0, %xmm11, %xmm0 vsubps %xmm0, %xmm10, %xmm2 vaddps %xmm3, %xmm9, %xmm9 vaddps %xmm0, %xmm10, %xmm11 vshufps $0xff, %xmm12, %xmm12, %xmm0 # xmm0 = xmm12[3,3,3,3] vmulps %xmm16, %xmm0, %xmm3 vsubps %xmm3, %xmm12, %xmm15 vshufps $0xff, %xmm13, %xmm13, %xmm10 # xmm10 = xmm13[3,3,3,3] vmulps %xmm16, %xmm10, %xmm10 vmulps %xmm1, %xmm0, %xmm0 vaddps %xmm0, %xmm10, %xmm0 vsubps %xmm0, %xmm13, %xmm1 vaddps %xmm3, %xmm12, %xmm10 vaddps %xmm0, %xmm13, %xmm12 vbroadcastss 0x482d1(%rip), %xmm30 # 0x1ef1ebc vmulps %xmm30, %xmm2, %xmm0 vaddps %xmm0, %xmm14, %xmm0 vmulps %xmm30, %xmm1, %xmm1 vsubps %xmm1, %xmm15, %xmm1 vbroadcastss %xmm14, %ymm2 vbroadcastss 0x68af6(%rip), %ymm29 # 0x1f12704 vpermps %ymm14, %ymm29, %ymm3 vbroadcastss 0x772be(%rip), %ymm31 # 0x1f20edc vpermps %ymm14, %ymm31, %ymm14 vbroadcastss %xmm0, %ymm16 vpermps %ymm0, %ymm29, %ymm17 vpermps %ymm0, %ymm31, %ymm0 vbroadcastss %xmm1, %ymm18 vpermps %ymm1, %ymm29, %ymm19 vpermps %ymm1, %ymm31, %ymm1 vbroadcastss %xmm15, %ymm20 vpermps %ymm15, %ymm29, %ymm21 vpermps %ymm15, %ymm31, %ymm15 vmulps %ymm27, %ymm20, %ymm13 vfmadd231ps %ymm18, %ymm26, %ymm13 # ymm13 = (ymm26 * ymm18) + ymm13 vmulps %ymm8, %ymm20, %ymm20 vfmadd231ps %ymm18, %ymm7, %ymm20 # ymm20 = (ymm7 * ymm18) + ymm20 vmulps %ymm27, %ymm21, %ymm18 vfmadd231ps %ymm19, %ymm26, %ymm18 # ymm18 = (ymm26 * ymm19) + ymm18 vmulps %ymm8, %ymm21, %ymm21 vfmadd231ps %ymm19, %ymm7, %ymm21 # ymm21 = (ymm7 * ymm19) + ymm21 vmulps %ymm27, %ymm15, %ymm19 vfmadd231ps %ymm1, %ymm26, %ymm19 # ymm19 = (ymm26 * ymm1) + ymm19 vfmadd231ps %ymm16, %ymm25, %ymm13 # ymm13 = (ymm25 * ymm16) + ymm13 vfmadd231ps %ymm17, %ymm25, %ymm18 # ymm18 = (ymm25 * ymm17) + ymm18 vfmadd231ps %ymm0, %ymm25, %ymm19 # ymm19 = (ymm25 * ymm0) + ymm19 vfmadd231ps %ymm2, %ymm24, %ymm13 # ymm13 = (ymm24 * ymm2) + ymm13 vfmadd231ps %ymm3, %ymm24, %ymm18 # ymm18 = (ymm24 * ymm3) + ymm18 vfmadd231ps %ymm14, %ymm24, %ymm19 # ymm19 = (ymm24 * ymm14) + ymm19 vmulps %ymm8, %ymm15, %ymm15 vfmadd231ps %ymm1, %ymm7, %ymm15 # ymm15 = (ymm7 * ymm1) + ymm15 vfmadd231ps %ymm16, %ymm6, %ymm20 # ymm20 = (ymm6 * ymm16) + ymm20 vfmadd231ps %ymm17, %ymm6, %ymm21 # ymm21 = (ymm6 * ymm17) + ymm21 vfmadd231ps %ymm0, %ymm6, %ymm15 # ymm15 = (ymm6 * ymm0) + ymm15 vfmadd231ps %ymm2, %ymm5, %ymm20 # ymm20 = (ymm5 * ymm2) + ymm20 vfmadd231ps %ymm3, %ymm5, %ymm21 # ymm21 = (ymm5 * ymm3) + ymm21 vfmadd231ps %ymm14, %ymm5, %ymm15 # ymm15 = (ymm5 * ymm14) + ymm15 vbroadcastss 0x777e0(%rip), %ymm28 # 0x1f214d0 vmulps %ymm28, %ymm20, %ymm0 vmulps %ymm28, %ymm21, %ymm1 vmulps %ymm28, %ymm15, %ymm2 vxorps %xmm4, %xmm4, %xmm4 vblendps $0x1, %ymm4, %ymm0, %ymm3 # ymm3 = ymm4[0],ymm0[1,2,3,4,5,6,7] vblendps $0x1, %ymm4, %ymm1, %ymm14 # ymm14 = ymm4[0],ymm1[1,2,3,4,5,6,7] vblendps $0x1, %ymm4, %ymm2, %ymm15 # ymm15 = ymm4[0],ymm2[1,2,3,4,5,6,7] vsubps %ymm3, %ymm13, %ymm3 vsubps %ymm14, %ymm18, %ymm14 vsubps %ymm15, %ymm19, %ymm15 vblendps $0x80, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[0,1,2,3,4,5,6],ymm4[7] vblendps $0x80, %ymm4, %ymm1, %ymm1 # ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7] vblendps $0x80, %ymm4, %ymm2, %ymm2 # ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7] vaddps %ymm0, %ymm13, %ymm0 vaddps %ymm1, %ymm18, %ymm1 vaddps %ymm2, %ymm19, %ymm2 vbroadcastss 0x41ccc(%rip), %ymm23 # 0x1eeba20 vminps %ymm13, %ymm23, %ymm16 vminps %ymm18, %ymm23, %ymm17 vminps %ymm19, %ymm23, %ymm20 vminps %ymm0, %ymm3, %ymm21 vminps %ymm21, %ymm16, %ymm16 vminps %ymm1, %ymm14, %ymm21 vminps %ymm21, %ymm17, %ymm17 vminps %ymm2, %ymm15, %ymm21 vminps %ymm21, %ymm20, %ymm20 vmaxps %ymm0, %ymm3, %ymm0 vmulps %xmm30, %xmm11, %xmm3 vaddps %xmm3, %xmm9, %xmm3 vmulps %xmm30, %xmm12, %xmm11 vsubps %xmm11, %xmm10, %xmm21 vbroadcastss 0x42dd6(%rip), %ymm30 # 0x1eecb84 vmaxps %ymm13, %ymm30, %ymm11 vmaxps %ymm18, %ymm30, %ymm12 vmaxps %ymm19, %ymm30, %ymm13 vmaxps %ymm0, %ymm11, %ymm0 vmaxps %ymm1, %ymm14, %ymm1 vmaxps %ymm1, %ymm12, %ymm1 vmaxps %ymm2, %ymm15, %ymm2 vmaxps %ymm2, %ymm13, %ymm2 vshufps $0xb1, %ymm16, %ymm16, %ymm11 # ymm11 = ymm16[1,0,3,2,5,4,7,6] vminps %ymm11, %ymm16, %ymm11 vshufpd $0x5, %ymm11, %ymm11, %ymm12 # ymm12 = ymm11[1,0,3,2] vminps %ymm12, %ymm11, %ymm11 vextractf128 $0x1, %ymm11, %xmm12 vminps %xmm12, %xmm11, %xmm11 vshufps $0xb1, %ymm17, %ymm17, %ymm12 # ymm12 = ymm17[1,0,3,2,5,4,7,6] vminps %ymm12, %ymm17, %ymm12 vshufpd $0x5, %ymm12, %ymm12, %ymm13 # ymm13 = ymm12[1,0,3,2] vminps %ymm13, %ymm12, %ymm12 vextractf128 $0x1, %ymm12, %xmm13 vminps %xmm13, %xmm12, %xmm12 vunpcklps %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1] vshufps $0xb1, %ymm20, %ymm20, %ymm12 # ymm12 = ymm20[1,0,3,2,5,4,7,6] vminps %ymm12, %ymm20, %ymm12 vshufpd $0x5, %ymm12, %ymm12, %ymm13 # ymm13 = ymm12[1,0,3,2] vminps %ymm13, %ymm12, %ymm12 vextractf128 $0x1, %ymm12, %xmm13 vminps %xmm13, %xmm12, %xmm12 vinsertps $0x28, %xmm12, %xmm11, %xmm12 # xmm12 = xmm11[0,1],xmm12[0],zero vshufps $0xb1, %ymm0, %ymm0, %ymm11 # ymm11 = ymm0[1,0,3,2,5,4,7,6] vmaxps %ymm11, %ymm0, %ymm0 vshufpd $0x5, %ymm0, %ymm0, %ymm11 # ymm11 = ymm0[1,0,3,2] vmaxps %ymm11, %ymm0, %ymm0 vextractf128 $0x1, %ymm0, %xmm11 vmaxps %xmm11, %xmm0, %xmm0 vshufps $0xb1, %ymm1, %ymm1, %ymm11 # ymm11 = ymm1[1,0,3,2,5,4,7,6] vmaxps %ymm11, %ymm1, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm11 # ymm11 = ymm1[1,0,3,2] vmaxps %ymm11, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm11 vmaxps %xmm11, %xmm1, %xmm1 vunpcklps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vshufps $0xb1, %ymm2, %ymm2, %ymm1 # ymm1 = ymm2[1,0,3,2,5,4,7,6] vmaxps %ymm1, %ymm2, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vmaxps %ymm2, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm2 vmaxps %xmm2, %xmm1, %xmm1 vinsertps $0x28, %xmm1, %xmm0, %xmm11 # xmm11 = xmm0[0,1],xmm1[0],zero vbroadcastss %xmm9, %ymm0 vpermps %ymm9, %ymm29, %ymm1 vpermps %ymm9, %ymm31, %ymm2 vbroadcastss %xmm3, %ymm13 vpermps %ymm3, %ymm29, %ymm14 vpermps %ymm3, %ymm31, %ymm3 vbroadcastss %xmm21, %ymm15 vpermps %ymm21, %ymm29, %ymm16 vpermps %ymm21, %ymm31, %ymm17 vbroadcastss %xmm10, %ymm18 vpermps %ymm10, %ymm29, %ymm19 vpermps %ymm10, %ymm31, %ymm10 vmulps %ymm27, %ymm18, %ymm9 vfmadd231ps %ymm15, %ymm26, %ymm9 # ymm9 = (ymm26 * ymm15) + ymm9 vmulps %ymm8, %ymm18, %ymm18 vfmadd231ps %ymm15, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm15) + ymm18 vmulps %ymm27, %ymm19, %ymm15 vfmadd231ps %ymm16, %ymm26, %ymm15 # ymm15 = (ymm26 * ymm16) + ymm15 vmulps %ymm8, %ymm19, %ymm19 vfmadd231ps %ymm16, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm16) + ymm19 vmulps %ymm27, %ymm10, %ymm16 vfmadd231ps %ymm17, %ymm26, %ymm16 # ymm16 = (ymm26 * ymm17) + ymm16 vfmadd231ps %ymm13, %ymm25, %ymm9 # ymm9 = (ymm25 * ymm13) + ymm9 vfmadd231ps %ymm14, %ymm25, %ymm15 # ymm15 = (ymm25 * ymm14) + ymm15 vfmadd231ps %ymm3, %ymm25, %ymm16 # ymm16 = (ymm25 * ymm3) + ymm16 vfmadd231ps %ymm0, %ymm24, %ymm9 # ymm9 = (ymm24 * ymm0) + ymm9 vfmadd231ps %ymm1, %ymm24, %ymm15 # ymm15 = (ymm24 * ymm1) + ymm15 vfmadd231ps %ymm2, %ymm24, %ymm16 # ymm16 = (ymm24 * ymm2) + ymm16 vmulps %ymm8, %ymm10, %ymm10 vfmadd231ps %ymm17, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm17) + ymm10 vfmadd231ps %ymm13, %ymm6, %ymm18 # ymm18 = (ymm6 * ymm13) + ymm18 vfmadd231ps %ymm14, %ymm6, %ymm19 # ymm19 = (ymm6 * ymm14) + ymm19 vfmadd231ps %ymm3, %ymm6, %ymm10 # ymm10 = (ymm6 * ymm3) + ymm10 vfmadd231ps %ymm0, %ymm5, %ymm18 # ymm18 = (ymm5 * ymm0) + ymm18 vfmadd231ps %ymm1, %ymm5, %ymm19 # ymm19 = (ymm5 * ymm1) + ymm19 vfmadd231ps %ymm2, %ymm5, %ymm10 # ymm10 = (ymm5 * ymm2) + ymm10 vmulps %ymm28, %ymm18, %ymm0 vmulps %ymm28, %ymm19, %ymm1 vmulps %ymm28, %ymm10, %ymm2 vblendps $0x1, %ymm4, %ymm0, %ymm3 # ymm3 = ymm4[0],ymm0[1,2,3,4,5,6,7] vblendps $0x1, %ymm4, %ymm1, %ymm10 # ymm10 = ymm4[0],ymm1[1,2,3,4,5,6,7] vblendps $0x1, %ymm4, %ymm2, %ymm13 # ymm13 = ymm4[0],ymm2[1,2,3,4,5,6,7] vsubps %ymm3, %ymm9, %ymm3 vsubps %ymm10, %ymm15, %ymm10 vsubps %ymm13, %ymm16, %ymm13 vblendps $0x80, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[0,1,2,3,4,5,6],ymm4[7] vblendps $0x80, %ymm4, %ymm1, %ymm1 # ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7] vblendps $0x80, %ymm4, %ymm2, %ymm2 # ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7] vaddps %ymm0, %ymm9, %ymm0 vaddps %ymm1, %ymm15, %ymm1 vaddps %ymm2, %ymm16, %ymm2 vminps %ymm9, %ymm23, %ymm14 vminps %ymm15, %ymm23, %ymm17 vminps %ymm16, %ymm23, %ymm18 vminps %ymm0, %ymm3, %ymm19 vminps %ymm19, %ymm14, %ymm14 vminps %ymm1, %ymm10, %ymm19 vminps %ymm19, %ymm17, %ymm17 vminps %ymm2, %ymm13, %ymm19 vminps %ymm19, %ymm18, %ymm18 vmaxps %ymm0, %ymm3, %ymm0 vmaxps %ymm9, %ymm30, %ymm3 vmaxps %ymm0, %ymm3, %ymm0 vmaxps %ymm1, %ymm10, %ymm1 vmaxps %ymm15, %ymm30, %ymm3 vbroadcastss 0x76e9b(%rip), %xmm15 # 0x1f20ec4 vmaxps %ymm1, %ymm3, %ymm1 vmaxps %ymm2, %ymm13, %ymm2 vmaxps %ymm16, %ymm30, %ymm3 vmovss 0x46f9f(%rip), %xmm16 # 0x1ef0fe0 vmaxps %ymm2, %ymm3, %ymm2 vshufps $0xb1, %ymm14, %ymm14, %ymm3 # ymm3 = ymm14[1,0,3,2,5,4,7,6] vminps %ymm3, %ymm14, %ymm3 vshufpd $0x5, %ymm3, %ymm3, %ymm9 # ymm9 = ymm3[1,0,3,2] vminps %ymm9, %ymm3, %ymm3 vextractf128 $0x1, %ymm3, %xmm9 vminps %xmm9, %xmm3, %xmm3 vshufps $0xb1, %ymm17, %ymm17, %ymm9 # ymm9 = ymm17[1,0,3,2,5,4,7,6] vminps %ymm9, %ymm17, %ymm9 vbroadcastss 0x77381(%rip), %xmm17 # 0x1f213fc vshufpd $0x5, %ymm9, %ymm9, %ymm10 # ymm10 = ymm9[1,0,3,2] vminps %ymm10, %ymm9, %ymm9 vextractf128 $0x1, %ymm9, %xmm10 vminps %xmm10, %xmm9, %xmm9 vunpcklps %xmm9, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1] vshufps $0xb1, %ymm18, %ymm18, %ymm9 # ymm9 = ymm18[1,0,3,2,5,4,7,6] vminps %ymm9, %ymm18, %ymm9 vbroadcastss 0x46f33(%rip), %xmm18 # 0x1ef0fe0 vshufpd $0x5, %ymm9, %ymm9, %ymm10 # ymm10 = ymm9[1,0,3,2] vminps %ymm10, %ymm9, %ymm9 vextractf128 $0x1, %ymm9, %xmm10 vminps %xmm10, %xmm9, %xmm9 vinsertps $0x28, %xmm9, %xmm3, %xmm3 # xmm3 = xmm3[0,1],xmm9[0],zero vminps %xmm3, %xmm12, %xmm3 vshufps $0xb1, %ymm0, %ymm0, %ymm9 # ymm9 = ymm0[1,0,3,2,5,4,7,6] vmaxps %ymm9, %ymm0, %ymm0 vshufpd $0x5, %ymm0, %ymm0, %ymm9 # ymm9 = ymm0[1,0,3,2] vmaxps %ymm9, %ymm0, %ymm0 vextractf128 $0x1, %ymm0, %xmm9 vmaxps %xmm9, %xmm0, %xmm0 vshufps $0xb1, %ymm1, %ymm1, %ymm9 # ymm9 = ymm1[1,0,3,2,5,4,7,6] vmaxps %ymm9, %ymm1, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm9 # ymm9 = ymm1[1,0,3,2] vmaxps %ymm9, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm9 vmaxps %xmm9, %xmm1, %xmm1 vunpcklps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] vshufps $0xb1, %ymm2, %ymm2, %ymm1 # ymm1 = ymm2[1,0,3,2,5,4,7,6] vmaxps %ymm1, %ymm2, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vmaxps %ymm2, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm2 vmaxps %xmm2, %xmm1, %xmm1 vinsertps $0x28, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm1[0],zero vmaxps %xmm0, %xmm11, %xmm0 vcmpltps %xmm18, %xmm0, %k1 vcmpnleps %xmm17, %xmm3, %k0 {%k1} vmovss 0xc(%rsp), %xmm3 knotw %k0, %k0 kmovd %k0, %edi testb $0x7, %dil jne 0x1eaa16f incq %rsi addq $0x38, %r13 cmpq 0x20(%rsp), %rsi seta %r8b jbe 0x1ea9750 jmp 0x1eaa179 testb $0x1, %r8b je 0x1eaa504 movq %r14, 0x108(%rsp) leaq 0x108(%rsp), %rax movq %rax, 0x110(%rsp) movq 0x10(%rsp), %rax movq %rax, 0x118(%rsp) vmovss 0x2c(%rax), %xmm0 vmovss 0x30(%rax), %xmm1 vmovaps 0xc0(%rsp), %xmm2 vsubss %xmm0, %xmm2, %xmm2 vsubss %xmm0, %xmm1, %xmm1 vdivss %xmm1, %xmm2, %xmm4 vmovaps 0xb0(%rsp), %xmm2 vsubss %xmm0, %xmm2, %xmm0 vdivss %xmm1, %xmm0, %xmm0 vmovss %xmm4, 0x8(%rsp) vmulss %xmm4, %xmm3, %xmm2 vmovss %xmm2, 0x40(%rsp) vmovss %xmm0, 0xd0(%rsp) vmulss %xmm0, %xmm3, %xmm1 vroundss $0x9, %xmm2, %xmm2, %xmm0 vmovss %xmm1, 0x30(%rsp) vroundss $0xa, %xmm1, %xmm1, %xmm1 vmaxss 0x4181f(%rip), %xmm0, %xmm4 # 0x1eeba24 vmovss %xmm4, 0xe0(%rsp) vminss %xmm3, %xmm1, %xmm2 vcvttss2si %xmm4, %ebp vmovss %xmm2, 0x20(%rsp) vcvttss2si %xmm2, %r13d vcvttss2si %xmm0, %ebx testl %ebx, %ebx movl $0xffffffff, %eax # imm = 0xFFFFFFFF cmovsl %eax, %ebx vcvttss2si %xmm1, %eax vcvttss2si %xmm3, %r15d incl %r15d cmpl %r15d, %eax cmovll %eax, %r15d movslq %ebp, %rdx leaq 0x140(%rsp), %rdi leaq 0x110(%rsp), %r12 movq %r12, %rsi vzeroupper callq 0x1eae97c movslq %r13d, %rdx leaq 0x120(%rsp), %rdi movq %r12, %rsi callq 0x1eae97c movl %r15d, %eax subl %ebx, %eax vmovss 0x40(%rsp), %xmm0 vsubss 0xe0(%rsp), %xmm0, %xmm0 cmpl $0x1, %eax jne 0x1eaa324 vxorps %xmm9, %xmm9, %xmm9 vmaxss %xmm9, %xmm0, %xmm0 vmovss 0x42474(%rip), %xmm6 # 0x1eec714 vsubss %xmm0, %xmm6, %xmm1 vbroadcastss %xmm0, %xmm0 vmovaps 0x120(%rsp), %xmm2 vmovaps 0x130(%rsp), %xmm3 vmulps %xmm2, %xmm0, %xmm7 vbroadcastss %xmm1, %xmm1 vmovaps 0x140(%rsp), %xmm4 vmovaps 0x150(%rsp), %xmm5 vfmadd231ps %xmm4, %xmm1, %xmm7 # xmm7 = (xmm1 * xmm4) + xmm7 vmulps %xmm3, %xmm0, %xmm8 vfmadd231ps %xmm1, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm1) + xmm8 vmovss 0x20(%rsp), %xmm0 vsubss 0x30(%rsp), %xmm0, %xmm0 vmaxss %xmm9, %xmm0, %xmm0 vsubss %xmm0, %xmm6, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm4, %xmm0, %xmm9 vbroadcastss %xmm1, %xmm1 vfmadd231ps %xmm2, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm2) + xmm9 vmulps %xmm5, %xmm0, %xmm10 vfmadd231ps %xmm3, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm3) + xmm10 vxorps %xmm6, %xmm6, %xmm6 vxorps %xmm22, %xmm22, %xmm22 jmp 0x1eaa547 incl %ebp movslq %ebp, %rdx leaq 0x1b0(%rsp), %rdi movq %r12, %rsi vmovss %xmm0, 0x40(%rsp) callq 0x1eae97c decl %r13d movslq %r13d, %rdx leaq 0x190(%rsp), %rdi movq %r12, %rsi callq 0x1eae97c vxorps %xmm3, %xmm3, %xmm3 vmovss 0x40(%rsp), %xmm0 vmaxss %xmm3, %xmm0, %xmm0 vbroadcastss %xmm0, %xmm1 vmulps 0x1b0(%rsp), %xmm1, %xmm7 vmovss 0x4239b(%rip), %xmm2 # 0x1eec714 vsubss %xmm0, %xmm2, %xmm0 vbroadcastss %xmm0, %xmm0 vfmadd231ps 0x140(%rsp), %xmm0, %xmm7 # xmm7 = (xmm0 * mem) + xmm7 vmulps 0x1c0(%rsp), %xmm1, %xmm8 vfmadd231ps 0x150(%rsp), %xmm0, %xmm8 # xmm8 = (xmm0 * mem) + xmm8 vmovss 0x20(%rsp), %xmm0 vsubss 0x30(%rsp), %xmm0, %xmm0 vmaxss %xmm3, %xmm0, %xmm0 vsubss %xmm0, %xmm2, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps 0x190(%rsp), %xmm0, %xmm9 vbroadcastss %xmm1, %xmm1 vfmadd231ps 0x120(%rsp), %xmm1, %xmm9 # xmm9 = (xmm1 * mem) + xmm9 vmulps 0x1a0(%rsp), %xmm0, %xmm10 vfmadd231ps 0x130(%rsp), %xmm1, %xmm10 # xmm10 = (xmm1 * mem) + xmm10 leal 0x1(%rbx), %eax cmpl %r15d, %eax jge 0x1eaa53d vmovss 0x8(%rsp), %xmm1 vmovss 0xd0(%rsp), %xmm0 vsubss %xmm1, %xmm0, %xmm0 vmovss %xmm0, 0x1c(%rsp) movl %eax, %r13d notl %ebx addl %r15d, %ebx leaq 0x170(%rsp), %r15 vmovaps %xmm10, 0xe0(%rsp) vmovaps %xmm9, 0x40(%rsp) vmovaps %xmm8, 0x20(%rsp) vmovaps %xmm7, 0x30(%rsp) vcvtsi2ss %r13d, %xmm15, %xmm0 vdivss 0xc(%rsp), %xmm0, %xmm0 vsubss %xmm1, %xmm0, %xmm0 vdivss 0x1c(%rsp), %xmm0, %xmm0 vsubss %xmm0, %xmm2, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm0, %xmm9, %xmm2 vbroadcastss %xmm1, %xmm1 vfmadd231ps %xmm7, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm7) + xmm2 vmovaps %xmm2, 0xd0(%rsp) vmulps %xmm0, %xmm10, %xmm0 vfmadd231ps %xmm1, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm1) + xmm0 vmovaps %xmm0, 0x160(%rsp) movq %r15, %rdi movq %r12, %rsi movq %r13, %rdx callq 0x1eae97c vmovaps 0xe0(%rsp), %xmm10 vmovaps 0x40(%rsp), %xmm9 vmovaps 0x20(%rsp), %xmm8 vmovaps 0x30(%rsp), %xmm7 vxorps %xmm22, %xmm22, %xmm22 vmovss 0x42263(%rip), %xmm2 # 0x1eec714 vmovaps 0x170(%rsp), %xmm0 vsubps 0xd0(%rsp), %xmm0, %xmm0 vmovaps 0x180(%rsp), %xmm1 vsubps 0x160(%rsp), %xmm1, %xmm1 vminps %xmm22, %xmm0, %xmm0 vmaxps %xmm22, %xmm1, %xmm1 vaddps %xmm0, %xmm7, %xmm7 vaddps %xmm0, %xmm9, %xmm9 vaddps %xmm1, %xmm8, %xmm8 vaddps %xmm1, %xmm10, %xmm10 vmovss 0x8(%rsp), %xmm1 incq %r13 decl %ebx jne 0x1eaa418 jmp 0x1eaa543 movq 0x68(%rsp), %r8 movq 0x10(%rsp), %rsi vmovaps 0xc0(%rsp), %xmm4 vmovaps 0xb0(%rsp), %xmm5 vxorps %xmm6, %xmm6, %xmm6 vmovss 0x46412(%rip), %xmm29 # 0x1ef0940 vmovss 0x4640c(%rip), %xmm31 # 0x1ef0944 jmp 0x1eaa670 vxorps %xmm22, %xmm22, %xmm22 vxorps %xmm6, %xmm6, %xmm6 vcmpleps %xmm8, %xmm7, %k0 knotw %k0, %k0 kmovd %k0, %eax testb $0x7, %al movq 0x68(%rsp), %r8 vmovaps 0xc0(%rsp), %xmm4 vmovaps 0xb0(%rsp), %xmm5 vmovss 0x463c7(%rip), %xmm29 # 0x1ef0940 vmovss 0x463c1(%rip), %xmm31 # 0x1ef0944 movq 0x10(%rsp), %rsi vbroadcastss 0x76933(%rip), %xmm15 # 0x1f20ec4 vmovss 0x46a45(%rip), %xmm16 # 0x1ef0fe0 vbroadcastss 0x76e57(%rip), %xmm17 # 0x1f213fc vbroadcastss 0x46a31(%rip), %xmm18 # 0x1ef0fe0 jne 0x1eaa670 vcmpleps %xmm10, %xmm9, %k0 knotw %k0, %k0 kmovd %k0, %eax testb $0x7, %al jne 0x1eaa670 vminps %xmm9, %xmm7, %xmm0 vmovss 0x210(%rsp), %xmm1 vinsertps $0x30, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0] vmaxps %xmm10, %xmm8, %xmm1 vmovd %r14d, %xmm2 vinsertps $0x30, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0] vaddps %xmm1, %xmm0, %xmm2 vmovaps 0xa0(%rsp), %xmm3 vminps %xmm0, %xmm3, %xmm3 vmovaps %xmm3, 0xa0(%rsp) vmovaps 0x90(%rsp), %xmm3 vmaxps %xmm1, %xmm3, %xmm3 vmovaps %xmm3, 0x90(%rsp) vmovaps 0x80(%rsp), %xmm3 vminps %xmm2, %xmm3, %xmm3 vmovaps %xmm3, 0x80(%rsp) vmovaps 0x70(%rsp), %xmm3 vmaxps %xmm2, %xmm3, %xmm3 vmovaps %xmm3, 0x70(%rsp) incq 0x58(%rsp) movq 0x60(%rsp), %rdx leaq 0x1(%rdx), %rax shlq $0x5, %rdx movq 0x100(%rsp), %rcx vmovaps %xmm0, (%rcx,%rdx) vmovaps %xmm1, 0x10(%rcx,%rdx) movq %rax, 0x60(%rsp) incq %r14 cmpq 0x8(%r8), %r14 jb 0x1ea9648 movq 0xf8(%rsp), %rax vmovaps 0xa0(%rsp), %xmm0 vmovaps %xmm0, (%rax) vmovaps 0x90(%rsp), %xmm0 vmovaps %xmm0, 0x10(%rax) vmovaps 0x80(%rsp), %xmm0 vmovaps %xmm0, 0x20(%rax) vmovaps 0x70(%rsp), %xmm0 vmovaps %xmm0, 0x30(%rax) movq 0x58(%rsp), %rcx movq %rcx, 0x48(%rax) addq $0x1d8, %rsp # imm = 0x1D8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq nop
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::computeAlignedSpace(unsigned long) const
LinearSpace3fa computeAlignedSpace(const size_t primID) const { Vec3fa axisz(0,0,1); Vec3fa axisy(0,1,0); const Curve3ff curve = getCurveScaledRadius(primID); const Vec3fa p0 = curve.begin(); const Vec3fa p3 = curve.end(); const Vec3fa d0 = curve.eval_du(0.0f); //const Vec3fa d1 = curve.eval_du(1.0f); const Vec3fa axisz_ = normalize(p3 - p0); const Vec3fa axisy_ = cross(axisz_,d0); if (sqr_length(p3-p0) > 1E-18f) { axisz = axisz_; axisy = axisy_; } if (sqr_length(axisy) > 1E-18) { axisy = normalize(axisy); Vec3fa axisx = normalize(cross(axisy,axisz)); return LinearSpace3fa(axisx,axisy,axisz); } return frame(axisz); }
movq %rdi, %rax imulq 0x68(%rsi), %rdx movq 0x58(%rsi), %rcx movq 0x188(%rsi), %rdi movl (%rcx,%rdx), %r8d movq (%rdi), %rcx movq 0x10(%rdi), %r9 leal 0x1(%r8), %r10d leal 0x2(%r8), %r11d leal 0x3(%r8), %edx movq %r8, %rdi imulq %r9, %r10 vmovaps (%rcx,%r10), %xmm0 imulq %r9, %r11 vmovaps (%rcx,%r11), %xmm2 imulq %r9, %rdi imulq %r9, %rdx vmovss 0x24c(%rsi), %xmm4 vmulss 0xc(%rcx,%r10), %xmm4, %xmm1 vinsertps $0x30, %xmm1, %xmm0, %xmm1 # xmm1 = xmm0[0,1,2],xmm1[0] vmulss 0xc(%rcx,%r11), %xmm4, %xmm0 vinsertps $0x30, %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm0[0] vxorps %xmm3, %xmm3, %xmm3 vsubps %xmm1, %xmm2, %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm5 vmovss %xmm5, %xmm3, %xmm6 # xmm6 = xmm5[0],xmm3[1,2,3] vrsqrt14ss %xmm6, %xmm3, %xmm6 vmulss 0x40ce7(%rip), %xmm6, %xmm7 # 0x1eec718 vmulss 0x40ce3(%rip), %xmm5, %xmm8 # 0x1eec71c vmulss %xmm6, %xmm8, %xmm8 vmulss %xmm6, %xmm6, %xmm6 vmulss %xmm6, %xmm8, %xmm6 vaddss %xmm6, %xmm7, %xmm6 vbroadcastss %xmm6, %xmm6 vmulps %xmm6, %xmm0, %xmm0 vucomiss 0x4558e(%rip), %xmm5 # 0x1ef0fe8 ja 0x1eaba68 vmovsd 0x40c8c(%rip), %xmm1 # 0x1eec6f0 jbe 0x1eabac0 jmp 0x1eabac8 vmovaps (%rcx,%rdi), %xmm5 vmovaps (%rcx,%rdx), %xmm6 vmulss 0xc(%rcx,%rdi), %xmm4, %xmm7 vinsertps $0x30, %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm7[0] vmulss 0xc(%rcx,%rdx), %xmm4, %xmm4 vinsertps $0x30, %xmm4, %xmm6, %xmm4 # xmm4 = xmm6[0,1,2],xmm4[0] vmulps %xmm3, %xmm4, %xmm4 vbroadcastss 0x40c85(%rip), %xmm6 # 0x1eec71c vfnmadd213ps %xmm4, %xmm6, %xmm2 # xmm2 = -(xmm6 * xmm2) + xmm4 vfmadd213ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm2 vfmadd231ps %xmm5, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm5) + xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3] vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3] vmulps %xmm3, %xmm1, %xmm1 vfmsub231ps %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm2) - xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[1,2,0,3] ja 0x1eabac8 vmovaps 0x40c38(%rip), %xmm0 # 0x1eec700 vdpps $0x7f, %xmm1, %xmm1, %xmm2 vcvtss2sd %xmm2, %xmm2, %xmm3 vucomisd 0x45536(%rip), %xmm3 # 0x1ef1010 jbe 0x1eabb6d vxorps %xmm3, %xmm3, %xmm3 vmovss %xmm2, %xmm3, %xmm4 # xmm4 = xmm2[0],xmm3[1,2,3] vrsqrt14ss %xmm4, %xmm3, %xmm4 vmovss 0x40c22(%rip), %xmm5 # 0x1eec718 vmulss %xmm5, %xmm4, %xmm6 vmovss 0x4107e(%rip), %xmm7 # 0x1eecb80 vmulss %xmm7, %xmm2, %xmm2 vmulss %xmm4, %xmm2, %xmm2 vmulss %xmm4, %xmm4, %xmm4 vmulss %xmm4, %xmm2, %xmm2 vsubss %xmm2, %xmm6, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm1, %xmm1 vshufps $0xc9, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,2,0,3] vshufps $0xc9, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,2,0,3] vmulps %xmm4, %xmm0, %xmm4 vfmsub231ps %xmm2, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm2) - xmm4 vshufps $0xc9, %xmm4, %xmm4, %xmm2 # xmm2 = xmm4[1,2,0,3] vdpps $0x7f, %xmm2, %xmm2, %xmm4 vmovss %xmm4, %xmm3, %xmm6 # xmm6 = xmm4[0],xmm3[1,2,3] vrsqrt14ss %xmm6, %xmm3, %xmm3 vmulss %xmm5, %xmm3, %xmm5 vmulss %xmm7, %xmm4, %xmm4 vmulss %xmm3, %xmm4, %xmm4 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm4, %xmm3 vsubss %xmm3, %xmm5, %xmm3 vbroadcastss %xmm3, %xmm3 vmulps %xmm2, %xmm3, %xmm2 jmp 0x1eabc5c vshufpd $0x1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0] vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3] vbroadcastss 0x75341(%rip), %xmm3 # 0x1f20ec0 vxorps %xmm3, %xmm2, %xmm2 vxorps %xmm4, %xmm4, %xmm4 vunpckhps %xmm4, %xmm0, %xmm5 # xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3] vmovss %xmm2, %xmm4, %xmm2 # xmm2 = xmm2[0],xmm4[1,2,3] vshufps $0x41, %xmm2, %xmm5, %xmm2 # xmm2 = xmm5[1,0],xmm2[0,1] vdpps $0x7f, %xmm2, %xmm2, %xmm5 vxorpd %xmm3, %xmm1, %xmm1 vinsertps $0x2a, %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[0],zero,xmm0[0],zero vdpps $0x7f, %xmm1, %xmm1, %xmm3 vcmpltps %xmm5, %xmm3, %k0 vpmovm2d %k0, %xmm3 vpbroadcastd %xmm3, %xmm3 vpmovd2m %xmm3, %k1 vpcmpeqd %xmm3, %xmm3, %xmm3 vmovaps %xmm3, %xmm3 {%k1} {z} vblendvps %xmm3, %xmm2, %xmm1, %xmm1 vdpps $0x7f, %xmm1, %xmm1, %xmm2 vmovss %xmm2, %xmm4, %xmm3 # xmm3 = xmm2[0],xmm4[1,2,3] vrsqrt14ss %xmm3, %xmm4, %xmm3 vmovss 0x40b2e(%rip), %xmm5 # 0x1eec718 vmulss %xmm5, %xmm3, %xmm6 vmovss 0x40f8a(%rip), %xmm7 # 0x1eecb80 vmulss %xmm7, %xmm2, %xmm2 vmulss %xmm3, %xmm2, %xmm2 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm2, %xmm2 vsubss %xmm2, %xmm6, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm1, %xmm2 vshufps $0xc9, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[1,2,0,3] vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3] vmulps %xmm3, %xmm2, %xmm3 vfmsub231ps %xmm1, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm1) - xmm3 vshufps $0xc9, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[1,2,0,3] vdpps $0x7f, %xmm1, %xmm1, %xmm3 vmovss %xmm3, %xmm4, %xmm6 # xmm6 = xmm3[0],xmm4[1,2,3] vrsqrt14ss %xmm6, %xmm4, %xmm4 vmulss %xmm5, %xmm4, %xmm5 vmulss %xmm7, %xmm3, %xmm3 vmulss %xmm4, %xmm3, %xmm3 vmulss %xmm4, %xmm4, %xmm4 vmulss %xmm4, %xmm3, %xmm3 vsubss %xmm3, %xmm5, %xmm3 vbroadcastss %xmm3, %xmm3 vmulps %xmm1, %xmm3, %xmm1 vmovaps %xmm2, (%rax) vmovaps %xmm1, 0x10(%rax) vmovaps %xmm0, 0x20(%rax) retq nop
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::computeAlignedSpaceMB(unsigned long, embree::BBox<float>) const
LinearSpace3fa computeAlignedSpaceMB(const size_t primID, const BBox1f time_range) const { Vec3fa axisz(0,0,1); Vec3fa axisy(0,1,0); const range<int> tbounds = this->timeSegmentRange(time_range); if (tbounds.size() == 0) return frame(axisz); const size_t t = (tbounds.begin()+tbounds.end())/2; const Curve3ff curve = getCurveScaledRadius(primID,t); const Vec3fa p0 = curve.begin(); const Vec3fa p3 = curve.end(); const Vec3fa d0 = curve.eval_du(0.0f); //const Vec3fa d1 = curve.eval_du(1.0f); const Vec3fa axisz_ = normalize(p3 - p0); const Vec3fa axisy_ = cross(axisz_,d0); if (sqr_length(p3-p0) > 1E-18f) { axisz = axisz_; axisy = axisy_; } if (sqr_length(axisy) > 1E-18) { axisy = normalize(axisy); Vec3fa axisx = normalize(cross(axisy,axisz)); return LinearSpace3fa(axisx,axisy,axisz); } return frame(axisz); }
movq %rdi, %rax vmovsd 0x2c(%rsi), %xmm1 vmovss 0x28(%rsi), %xmm2 vsubss %xmm1, %xmm0, %xmm3 vmovshdup %xmm1, %xmm4 # xmm4 = xmm1[1,1,3,3] vsubss %xmm1, %xmm4, %xmm4 vdivss %xmm4, %xmm3, %xmm3 vmovshdup %xmm0, %xmm0 # xmm0 = xmm0[1,1,3,3] vsubss %xmm1, %xmm0, %xmm0 vdivss %xmm4, %xmm0, %xmm0 vmulss 0x44ca3(%rip), %xmm3, %xmm1 # 0x1ef0940 vmulss %xmm1, %xmm2, %xmm1 vroundss $0x9, %xmm1, %xmm1, %xmm1 vxorps %xmm3, %xmm3, %xmm3 vmaxss %xmm1, %xmm3, %xmm1 vcvttss2si %xmm1, %ecx vmulss 0x44c89(%rip), %xmm0, %xmm0 # 0x1ef0944 vmulss %xmm0, %xmm2, %xmm0 vroundss $0xa, %xmm0, %xmm0, %xmm0 vminss %xmm2, %xmm0, %xmm0 vcvttss2si %xmm0, %edi cmpl %ecx, %edi jne 0x1eabdb3 vmovaps 0x75803(%rip), %xmm0 # 0x1f214e0 vdpps $0x7f, %xmm0, %xmm0, %xmm1 vmovss 0x44ce1(%rip), %xmm2 # 0x1ef09cc vdpps $0x7f, %xmm2, %xmm2, %xmm3 vcmpltps %xmm1, %xmm3, %k0 vpmovm2d %k0, %xmm1 vpbroadcastd %xmm1, %xmm1 vpmovd2m %xmm1, %k1 vpcmpeqd %xmm1, %xmm1, %xmm1 vmovaps %xmm1, %xmm1 {%k1} {z} vblendvps %xmm1, %xmm0, %xmm2, %xmm0 vxorps %xmm2, %xmm2, %xmm2 vdpps $0x7f, %xmm0, %xmm0, %xmm1 vmovss %xmm1, %xmm2, %xmm3 # xmm3 = xmm1[0],xmm2[1,2,3] vrsqrt14ss %xmm3, %xmm2, %xmm3 vmovss 0x409e3(%rip), %xmm4 # 0x1eec718 vmulss %xmm4, %xmm3, %xmm5 vmovss 0x40e3f(%rip), %xmm6 # 0x1eecb80 vmulss %xmm6, %xmm1, %xmm1 vmulss %xmm3, %xmm1, %xmm1 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm1, %xmm1 vsubss %xmm1, %xmm5, %xmm1 vbroadcastss %xmm1, %xmm1 vmulps %xmm1, %xmm0, %xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,2,0,3] vmulps 0x45205(%rip), %xmm1, %xmm5 # 0x1ef0f70 vmovaps 0x4098d(%rip), %xmm0 # 0x1eec700 vfmadd231ps %xmm3, %xmm0, %xmm5 # xmm5 = (xmm0 * xmm3) + xmm5 vshufps $0xc9, %xmm5, %xmm5, %xmm3 # xmm3 = xmm5[1,2,0,3] vdpps $0x7f, %xmm3, %xmm3, %xmm5 vmovss %xmm5, %xmm2, %xmm7 # xmm7 = xmm5[0],xmm2[1,2,3] vrsqrt14ss %xmm7, %xmm2, %xmm2 vmulss %xmm4, %xmm2, %xmm4 vmulss %xmm6, %xmm5, %xmm5 vmulss %xmm2, %xmm5, %xmm5 vmulss %xmm2, %xmm2, %xmm2 vmulss %xmm2, %xmm5, %xmm2 vsubss %xmm2, %xmm4, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm3, %xmm2, %xmm2 jmp 0x1eac075 addl %edi, %ecx movl %ecx, %edi shrl $0x1f, %edi addl %ecx, %edi sarl %edi movslq %edi, %rcx imulq 0x68(%rsi), %rdx movq 0x58(%rsi), %rdi movq 0x188(%rsi), %r8 movl (%rdi,%rdx), %edi imulq $0x38, %rcx, %rdx movq (%r8,%rdx), %rcx movq 0x10(%r8,%rdx), %r8 leal 0x1(%rdi), %r9d leal 0x2(%rdi), %r10d leal 0x3(%rdi), %edx imulq %r8, %r9 vmovaps (%rcx,%r9), %xmm0 imulq %r8, %r10 vmovaps (%rcx,%r10), %xmm2 imulq %r8, %rdi imulq %r8, %rdx vmovss 0x24c(%rsi), %xmm4 vmulss 0xc(%rcx,%r9), %xmm4, %xmm1 vinsertps $0x30, %xmm1, %xmm0, %xmm1 # xmm1 = xmm0[0,1,2],xmm1[0] vmulss 0xc(%rcx,%r10), %xmm4, %xmm0 vinsertps $0x30, %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm0[0] vxorps %xmm3, %xmm3, %xmm3 vsubps %xmm1, %xmm2, %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm5 vmovss %xmm5, %xmm3, %xmm6 # xmm6 = xmm5[0],xmm3[1,2,3] vrsqrt14ss %xmm6, %xmm3, %xmm6 vmulss 0x408ce(%rip), %xmm6, %xmm7 # 0x1eec718 vmulss 0x408ca(%rip), %xmm5, %xmm8 # 0x1eec71c vmulss %xmm6, %xmm8, %xmm8 vmulss %xmm6, %xmm6, %xmm6 vmulss %xmm6, %xmm8, %xmm6 vaddss %xmm6, %xmm7, %xmm6 vbroadcastss %xmm6, %xmm6 vmulps %xmm6, %xmm0, %xmm0 vucomiss 0x45175(%rip), %xmm5 # 0x1ef0fe8 ja 0x1eabe81 vmovsd 0x40873(%rip), %xmm1 # 0x1eec6f0 jbe 0x1eabed9 jmp 0x1eabee1 vmovaps (%rcx,%rdi), %xmm5 vmovaps (%rcx,%rdx), %xmm6 vmulss 0xc(%rcx,%rdi), %xmm4, %xmm7 vinsertps $0x30, %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm7[0] vmulss 0xc(%rcx,%rdx), %xmm4, %xmm4 vinsertps $0x30, %xmm4, %xmm6, %xmm4 # xmm4 = xmm6[0,1,2],xmm4[0] vmulps %xmm3, %xmm4, %xmm4 vbroadcastss 0x4086c(%rip), %xmm6 # 0x1eec71c vfnmadd213ps %xmm4, %xmm6, %xmm2 # xmm2 = -(xmm6 * xmm2) + xmm4 vfmadd213ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm2 vfmadd231ps %xmm5, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm5) + xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3] vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3] vmulps %xmm3, %xmm1, %xmm1 vfmsub231ps %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm2) - xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[1,2,0,3] ja 0x1eabee1 vmovaps 0x4081f(%rip), %xmm0 # 0x1eec700 vdpps $0x7f, %xmm1, %xmm1, %xmm2 vcvtss2sd %xmm2, %xmm2, %xmm3 vucomisd 0x4511d(%rip), %xmm3 # 0x1ef1010 jbe 0x1eabf86 vxorps %xmm3, %xmm3, %xmm3 vmovss %xmm2, %xmm3, %xmm4 # xmm4 = xmm2[0],xmm3[1,2,3] vrsqrt14ss %xmm4, %xmm3, %xmm4 vmovss 0x40809(%rip), %xmm5 # 0x1eec718 vmulss %xmm5, %xmm4, %xmm6 vmovss 0x40c65(%rip), %xmm7 # 0x1eecb80 vmulss %xmm7, %xmm2, %xmm2 vmulss %xmm4, %xmm2, %xmm2 vmulss %xmm4, %xmm4, %xmm4 vmulss %xmm4, %xmm2, %xmm2 vsubss %xmm2, %xmm6, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm1, %xmm2 vshufps $0xc9, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,2,0,3] vshufps $0xc9, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,2,0,3] vmulps %xmm4, %xmm0, %xmm4 vfmsub231ps %xmm1, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm1) - xmm4 vshufps $0xc9, %xmm4, %xmm4, %xmm1 # xmm1 = xmm4[1,2,0,3] vdpps $0x7f, %xmm1, %xmm1, %xmm4 vmovss %xmm4, %xmm3, %xmm6 # xmm6 = xmm4[0],xmm3[1,2,3] vrsqrt14ss %xmm6, %xmm3, %xmm3 vmulss %xmm5, %xmm3, %xmm5 vmulss %xmm7, %xmm4, %xmm4 vmulss %xmm3, %xmm4, %xmm4 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm4, %xmm3 vsubss %xmm3, %xmm5, %xmm3 vbroadcastss %xmm3, %xmm3 vmulps %xmm1, %xmm3, %xmm1 jmp 0x1eac075 vshufpd $0x1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0] vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3] vbroadcastss 0x74f28(%rip), %xmm3 # 0x1f20ec0 vxorps %xmm3, %xmm2, %xmm2 vxorps %xmm4, %xmm4, %xmm4 vunpckhps %xmm4, %xmm0, %xmm5 # xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3] vmovss %xmm2, %xmm4, %xmm2 # xmm2 = xmm2[0],xmm4[1,2,3] vshufps $0x41, %xmm2, %xmm5, %xmm2 # xmm2 = xmm5[1,0],xmm2[0,1] vdpps $0x7f, %xmm2, %xmm2, %xmm5 vxorpd %xmm3, %xmm1, %xmm1 vinsertps $0x2a, %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[0],zero,xmm0[0],zero vdpps $0x7f, %xmm1, %xmm1, %xmm3 vcmpltps %xmm5, %xmm3, %k0 vpmovm2d %k0, %xmm3 vpbroadcastd %xmm3, %xmm3 vpmovd2m %xmm3, %k1 vpcmpeqd %xmm3, %xmm3, %xmm3 vmovaps %xmm3, %xmm3 {%k1} {z} vblendvps %xmm3, %xmm2, %xmm1, %xmm1 vdpps $0x7f, %xmm1, %xmm1, %xmm2 vmovss %xmm2, %xmm4, %xmm3 # xmm3 = xmm2[0],xmm4[1,2,3] vrsqrt14ss %xmm3, %xmm4, %xmm3 vmovss 0x40715(%rip), %xmm5 # 0x1eec718 vmulss %xmm5, %xmm3, %xmm6 vmovss 0x40b71(%rip), %xmm7 # 0x1eecb80 vmulss %xmm7, %xmm2, %xmm2 vmulss %xmm3, %xmm2, %xmm2 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm2, %xmm2 vsubss %xmm2, %xmm6, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm1, %xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3] vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3] vmulps %xmm3, %xmm1, %xmm3 vfmsub231ps %xmm2, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm2) - xmm3 vshufps $0xc9, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[1,2,0,3] vdpps $0x7f, %xmm2, %xmm2, %xmm3 vmovss %xmm3, %xmm4, %xmm6 # xmm6 = xmm3[0],xmm4[1,2,3] vrsqrt14ss %xmm6, %xmm4, %xmm4 vmulss %xmm5, %xmm4, %xmm5 vmulss %xmm7, %xmm3, %xmm3 vmulss %xmm4, %xmm3, %xmm3 vmulss %xmm4, %xmm4, %xmm4 vmulss %xmm4, %xmm3, %xmm3 vsubss %xmm3, %xmm5, %xmm3 vbroadcastss %xmm3, %xmm3 vmulps %xmm2, %xmm3, %xmm2 vmovaps %xmm1, (%rax) vmovaps %xmm2, 0x10(%rax) vmovaps %xmm0, 0x20(%rax) retq
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::computeDirection(unsigned int, unsigned long) const
Vec3fa computeDirection(unsigned int primID, size_t time) const { const Curve3ff c = getCurveScaledRadius(primID,time); const Vec3fa p0 = c.begin(); const Vec3fa p3 = c.end(); const Vec3fa axis1 = p3 - p0; return axis1; }
movl %edx, %eax imulq 0x68(%rsi), %rax movq 0x58(%rsi), %rdx movq 0x188(%rsi), %r8 movl (%rdx,%rax), %edx imulq $0x38, %rcx, %rax movq (%r8,%rax), %rcx movq 0x10(%r8,%rax), %rax leal 0x1(%rdx), %r8d imulq %rax, %r8 vmovaps (%rcx,%r8), %xmm0 addl $0x2, %edx imulq %rax, %rdx vmovaps (%rcx,%rdx), %xmm1 movq %rdi, %rax vmovss 0x24c(%rsi), %xmm2 vmulss 0xc(%rcx,%r8), %xmm2, %xmm3 vinsertps $0x30, %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm3[0] vmulss 0xc(%rcx,%rdx), %xmm2, %xmm2 vinsertps $0x30, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0] vsubps %xmm0, %xmm1, %xmm0 vmovaps %xmm0, (%rdi) retq nop
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::vlinearBounds(unsigned long, embree::BBox<float> const&) const
LBBox3fa vlinearBounds(size_t primID, const BBox1f& time_range) const { return linearBounds(primID,time_range); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x138, %rsp # imm = 0x138 leaq 0xc8(%rsp), %rax movq %rdx, (%rax) leaq 0x60(%rsp), %r14 movq %rax, (%r14) movq %rsi, 0x8(%r14) vmovss 0x28(%rsi), %xmm4 vmovss 0x2c(%rsi), %xmm0 vmovss (%rcx), %xmm1 vmovss 0x4(%rcx), %xmm2 vsubss %xmm0, %xmm1, %xmm1 vmovss 0x30(%rsi), %xmm3 vsubss %xmm0, %xmm3, %xmm3 vdivss %xmm3, %xmm1, %xmm1 vsubss %xmm0, %xmm2, %xmm0 vdivss %xmm3, %xmm0, %xmm2 vmovss %xmm1, 0x4(%rsp) vmulss %xmm1, %xmm4, %xmm0 vmovss %xmm0, 0x20(%rsp) vroundss $0x9, %xmm0, %xmm0, %xmm0 vmovss %xmm2, 0x50(%rsp) vmulss %xmm2, %xmm4, %xmm1 vmovss %xmm1, 0x40(%rsp) vroundss $0xa, %xmm1, %xmm1, %xmm1 vxorps %xmm2, %xmm2, %xmm2 vmaxss %xmm2, %xmm0, %xmm3 vminss %xmm4, %xmm1, %xmm2 vmovss %xmm3, 0x10(%rsp) vcvttss2si %xmm3, %r15d vmovss %xmm2, 0x30(%rsp) vcvttss2si %xmm2, %r12d vcvttss2si %xmm0, %eax movq %rdi, %rbx testl %eax, %eax movl $0xffffffff, %r13d # imm = 0xFFFFFFFF cmovnsl %eax, %r13d vcvttss2si %xmm1, %eax vmovss %xmm4, 0xc(%rsp) vcvttss2si %xmm4, %ebp incl %ebp cmpl %ebp, %eax cmovll %eax, %ebp movslq %r15d, %rdx leaq 0x90(%rsp), %rdi movq %r14, %rsi callq 0x1eae97c movslq %r12d, %rdx leaq 0x70(%rsp), %rdi movq %r14, %rsi callq 0x1eae97c movl %ebp, %eax subl %r13d, %eax cmpl $0x1, %eax jne 0x1eae023 vmovss 0x20(%rsp), %xmm0 vsubss 0x10(%rsp), %xmm0, %xmm0 vxorps %xmm8, %xmm8, %xmm8 vmaxss %xmm8, %xmm0, %xmm0 vmovss 0x3e77b(%rip), %xmm1 # 0x1eec714 vsubss %xmm0, %xmm1, %xmm2 vbroadcastss %xmm0, %xmm0 vmovaps 0x70(%rsp), %xmm3 vmovaps 0x80(%rsp), %xmm4 vmulps %xmm3, %xmm0, %xmm5 vbroadcastss %xmm2, %xmm2 vmovaps 0x90(%rsp), %xmm6 vmovaps 0xa0(%rsp), %xmm7 vfmadd231ps %xmm6, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm6) + xmm5 vmulps %xmm4, %xmm0, %xmm0 vfmadd231ps %xmm2, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm2) + xmm0 vmovaps %xmm5, (%rbx) vmovaps %xmm0, 0x10(%rbx) vmovss 0x30(%rsp), %xmm0 vsubss 0x40(%rsp), %xmm0, %xmm0 vmaxss %xmm8, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm6, %xmm0, %xmm2 vbroadcastss %xmm1, %xmm1 vfmadd231ps %xmm3, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm3) + xmm2 vmulps %xmm7, %xmm0, %xmm0 vfmadd231ps %xmm4, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm4) + xmm0 vmovaps %xmm2, 0x20(%rbx) vmovaps %xmm0, 0x30(%rbx) jmp 0x1eae203 incl %r15d movslq %r15d, %rdx leaq 0x110(%rsp), %r14 leaq 0x60(%rsp), %r15 movq %r14, %rdi movq %r15, %rsi callq 0x1eae97c decl %r12d movslq %r12d, %rdx leaq 0xf0(%rsp), %r12 movq %r12, %rdi movq %r15, %rsi callq 0x1eae97c vmovss 0x20(%rsp), %xmm0 vsubss 0x10(%rsp), %xmm0, %xmm0 vxorps %xmm5, %xmm5, %xmm5 vmaxss %xmm5, %xmm0, %xmm0 vmovss 0x3e69e(%rip), %xmm4 # 0x1eec714 vbroadcastss %xmm0, %xmm1 vmulps (%r14), %xmm1, %xmm2 vsubss %xmm0, %xmm4, %xmm0 vbroadcastss %xmm0, %xmm0 vfmadd231ps 0x90(%rsp), %xmm0, %xmm2 # xmm2 = (xmm0 * mem) + xmm2 vmulps 0x10(%r14), %xmm1, %xmm3 vfmadd231ps 0xa0(%rsp), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3 vmovss 0x30(%rsp), %xmm0 vsubss 0x40(%rsp), %xmm0, %xmm0 vmaxss %xmm5, %xmm0, %xmm0 vsubss %xmm0, %xmm4, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps (%r12), %xmm0, %xmm5 vbroadcastss %xmm1, %xmm1 vfmadd231ps 0x70(%rsp), %xmm1, %xmm5 # xmm5 = (xmm1 * mem) + xmm5 vmulps 0x10(%r12), %xmm0, %xmm6 vfmadd231ps 0x80(%rsp), %xmm1, %xmm6 # xmm6 = (xmm1 * mem) + xmm6 leal 0x1(%r13), %eax cmpl %ebp, %eax jge 0x1eae1f0 vmovss 0x4(%rsp), %xmm1 vmovss 0x50(%rsp), %xmm0 vsubss %xmm1, %xmm0, %xmm0 vmovss %xmm0, 0x8(%rsp) movl %eax, %r14d notl %r13d addl %ebp, %r13d leaq 0xd0(%rsp), %r15 leaq 0x60(%rsp), %r12 vmovaps %xmm6, 0x10(%rsp) vmovaps %xmm5, 0x20(%rsp) vmovaps %xmm3, 0x30(%rsp) vmovaps %xmm2, 0x40(%rsp) vcvtsi2ss %r14d, %xmm8, %xmm0 vdivss 0xc(%rsp), %xmm0, %xmm0 vsubss %xmm1, %xmm0, %xmm0 vdivss 0x8(%rsp), %xmm0, %xmm0 vsubss %xmm0, %xmm4, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm0, %xmm5, %xmm4 vbroadcastss %xmm1, %xmm1 vfmadd231ps %xmm2, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm2) + xmm4 vmovaps %xmm4, 0x50(%rsp) vmulps %xmm0, %xmm6, %xmm0 vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0 vmovaps %xmm0, 0xb0(%rsp) movq %r15, %rdi movq %r12, %rsi movq %r14, %rdx callq 0x1eae97c vmovaps 0x10(%rsp), %xmm6 vmovaps 0x20(%rsp), %xmm5 vmovss 0x3e57f(%rip), %xmm4 # 0x1eec714 vmovaps 0x30(%rsp), %xmm3 vmovaps 0x40(%rsp), %xmm2 vmovaps 0xd0(%rsp), %xmm0 vsubps 0x50(%rsp), %xmm0, %xmm0 vmovaps 0xe0(%rsp), %xmm1 vsubps 0xb0(%rsp), %xmm1, %xmm1 vxorps %xmm7, %xmm7, %xmm7 vminps %xmm7, %xmm0, %xmm0 vmaxps %xmm7, %xmm1, %xmm1 vaddps %xmm0, %xmm2, %xmm2 vaddps %xmm0, %xmm5, %xmm5 vaddps %xmm1, %xmm3, %xmm3 vaddps %xmm1, %xmm6, %xmm6 vmovss 0x4(%rsp), %xmm1 incq %r14 decl %r13d jne 0x1eae117 vmovaps %xmm2, (%rbx) vmovaps %xmm3, 0x10(%rbx) vmovaps %xmm5, 0x20(%rbx) vmovaps %xmm6, 0x30(%rbx) movq %rbx, %rax addq $0x138, %rsp # imm = 0x138 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::vlinearBounds(embree::LinearSpace3<embree::Vec3fa> const&, unsigned long, embree::BBox<float> const&) const
LBBox3fa vlinearBounds(const LinearSpace3fa& space, size_t primID, const BBox1f& time_range) const { return linearBounds(space,primID,time_range); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x138, %rsp # imm = 0x138 leaq 0xc8(%rsp), %rax movq %rcx, (%rax) leaq 0xb0(%rsp), %r14 movq %rdx, (%r14) movq %rax, 0x8(%r14) movq %rsi, 0x10(%r14) vmovss 0x28(%rsi), %xmm4 vmovss 0x2c(%rsi), %xmm0 vmovss (%r8), %xmm1 vmovss 0x4(%r8), %xmm2 vsubss %xmm0, %xmm1, %xmm1 vmovss 0x30(%rsi), %xmm3 vsubss %xmm0, %xmm3, %xmm3 vdivss %xmm3, %xmm1, %xmm1 vsubss %xmm0, %xmm2, %xmm0 vdivss %xmm3, %xmm0, %xmm2 vmovss %xmm1, 0x4(%rsp) vmulss %xmm1, %xmm4, %xmm0 vmovss %xmm0, 0x20(%rsp) vroundss $0x9, %xmm0, %xmm0, %xmm0 vmovss %xmm2, 0x50(%rsp) vmulss %xmm2, %xmm4, %xmm1 vmovss %xmm1, 0x40(%rsp) vroundss $0xa, %xmm1, %xmm1, %xmm1 vxorps %xmm2, %xmm2, %xmm2 vmaxss %xmm2, %xmm0, %xmm3 vminss %xmm4, %xmm1, %xmm2 vmovss %xmm3, 0x10(%rsp) vcvttss2si %xmm3, %r15d vmovss %xmm2, 0x30(%rsp) vcvttss2si %xmm2, %r12d vcvttss2si %xmm0, %eax movq %rdi, %rbx testl %eax, %eax movl $0xffffffff, %r13d # imm = 0xFFFFFFFF cmovnsl %eax, %r13d vcvttss2si %xmm1, %eax vmovss %xmm4, 0xc(%rsp) vcvttss2si %xmm4, %ebp incl %ebp cmpl %ebp, %eax cmovll %eax, %ebp movslq %r15d, %rdx leaq 0x80(%rsp), %rdi movq %r14, %rsi callq 0x1eaf282 movslq %r12d, %rdx leaq 0x60(%rsp), %rdi movq %r14, %rsi callq 0x1eaf282 movl %ebp, %eax subl %r13d, %eax cmpl $0x1, %eax jne 0x1eae3bf vmovss 0x20(%rsp), %xmm0 vsubss 0x10(%rsp), %xmm0, %xmm0 vxorps %xmm8, %xmm8, %xmm8 vmaxss %xmm8, %xmm0, %xmm0 vmovss 0x3e3dc(%rip), %xmm1 # 0x1eec714 vsubss %xmm0, %xmm1, %xmm2 vbroadcastss %xmm0, %xmm0 vmovaps 0x60(%rsp), %xmm3 vmovaps 0x70(%rsp), %xmm4 vmulps %xmm3, %xmm0, %xmm5 vbroadcastss %xmm2, %xmm2 vmovaps 0x80(%rsp), %xmm6 vmovaps 0x90(%rsp), %xmm7 vfmadd231ps %xmm6, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm6) + xmm5 vmulps %xmm4, %xmm0, %xmm0 vfmadd231ps %xmm2, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm2) + xmm0 vmovaps %xmm5, (%rbx) vmovaps %xmm0, 0x10(%rbx) vmovss 0x30(%rsp), %xmm0 vsubss 0x40(%rsp), %xmm0, %xmm0 vmaxss %xmm8, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm6, %xmm0, %xmm2 vbroadcastss %xmm1, %xmm1 vfmadd231ps %xmm3, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm3) + xmm2 vmulps %xmm7, %xmm0, %xmm0 vfmadd231ps %xmm4, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm4) + xmm0 vmovaps %xmm2, 0x20(%rbx) vmovaps %xmm0, 0x30(%rbx) jmp 0x1eae5a2 incl %r15d movslq %r15d, %rdx leaq 0x110(%rsp), %r14 leaq 0xb0(%rsp), %r15 movq %r14, %rdi movq %r15, %rsi callq 0x1eaf282 decl %r12d movslq %r12d, %rdx leaq 0xf0(%rsp), %r12 movq %r12, %rdi movq %r15, %rsi callq 0x1eaf282 vmovss 0x20(%rsp), %xmm0 vsubss 0x10(%rsp), %xmm0, %xmm0 vxorps %xmm5, %xmm5, %xmm5 vmaxss %xmm5, %xmm0, %xmm0 vmovss 0x3e2ff(%rip), %xmm4 # 0x1eec714 vbroadcastss %xmm0, %xmm1 vmulps (%r14), %xmm1, %xmm2 vsubss %xmm0, %xmm4, %xmm0 vbroadcastss %xmm0, %xmm0 vfmadd231ps 0x80(%rsp), %xmm0, %xmm2 # xmm2 = (xmm0 * mem) + xmm2 vmulps 0x10(%r14), %xmm1, %xmm3 vfmadd231ps 0x90(%rsp), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3 vmovss 0x30(%rsp), %xmm0 vsubss 0x40(%rsp), %xmm0, %xmm0 vmaxss %xmm5, %xmm0, %xmm0 vsubss %xmm0, %xmm4, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps (%r12), %xmm0, %xmm5 vbroadcastss %xmm1, %xmm1 vfmadd231ps 0x60(%rsp), %xmm1, %xmm5 # xmm5 = (xmm1 * mem) + xmm5 vmulps 0x10(%r12), %xmm0, %xmm6 vfmadd231ps 0x70(%rsp), %xmm1, %xmm6 # xmm6 = (xmm1 * mem) + xmm6 leal 0x1(%r13), %eax cmpl %ebp, %eax jge 0x1eae58f vmovss 0x4(%rsp), %xmm1 vmovss 0x50(%rsp), %xmm0 vsubss %xmm1, %xmm0, %xmm0 vmovss %xmm0, 0x8(%rsp) movl %eax, %r14d notl %r13d addl %ebp, %r13d leaq 0xd0(%rsp), %r15 leaq 0xb0(%rsp), %r12 vmovaps %xmm6, 0x10(%rsp) vmovaps %xmm5, 0x20(%rsp) vmovaps %xmm3, 0x30(%rsp) vmovaps %xmm2, 0x40(%rsp) vcvtsi2ss %r14d, %xmm8, %xmm0 vdivss 0xc(%rsp), %xmm0, %xmm0 vsubss %xmm1, %xmm0, %xmm0 vdivss 0x8(%rsp), %xmm0, %xmm0 vsubss %xmm0, %xmm4, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm0, %xmm5, %xmm4 vbroadcastss %xmm1, %xmm1 vfmadd231ps %xmm2, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm2) + xmm4 vmovaps %xmm4, 0x50(%rsp) vmulps %xmm0, %xmm6, %xmm0 vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0 vmovaps %xmm0, 0xa0(%rsp) movq %r15, %rdi movq %r12, %rsi movq %r14, %rdx callq 0x1eaf282 vmovaps 0x10(%rsp), %xmm6 vmovaps 0x20(%rsp), %xmm5 vmovss 0x3e1e0(%rip), %xmm4 # 0x1eec714 vmovaps 0x30(%rsp), %xmm3 vmovaps 0x40(%rsp), %xmm2 vmovaps 0xd0(%rsp), %xmm0 vsubps 0x50(%rsp), %xmm0, %xmm0 vmovaps 0xe0(%rsp), %xmm1 vsubps 0xa0(%rsp), %xmm1, %xmm1 vxorps %xmm7, %xmm7, %xmm7 vminps %xmm7, %xmm0, %xmm0 vmaxps %xmm7, %xmm1, %xmm1 vaddps %xmm0, %xmm2, %xmm2 vaddps %xmm0, %xmm5, %xmm5 vaddps %xmm1, %xmm3, %xmm3 vaddps %xmm1, %xmm6, %xmm6 vmovss 0x4(%rsp), %xmm1 incq %r14 decl %r13d jne 0x1eae4b6 vmovaps %xmm2, (%rbx) vmovaps %xmm3, 0x10(%rbx) vmovaps %xmm5, 0x20(%rbx) vmovaps %xmm6, 0x30(%rbx) movq %rbx, %rax addq $0x138, %rsp # imm = 0x138 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq nop
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::vlinearBounds(embree::Vec3fa const&, float, float, embree::LinearSpace3<embree::Vec3fa> const&, unsigned long, embree::BBox<float> const&) const
LBBox3fa vlinearBounds(const Vec3fa& ofs, const float scale, const float r_scale0, const LinearSpace3fa& space, size_t primID, const BBox1f& time_range) const { return linearBounds(ofs,scale,r_scale0,space,primID,time_range); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x168, %rsp # imm = 0x168 leaq 0x6c(%rsp), %rax vmovss %xmm0, (%rax) leaq 0x68(%rsp), %r10 vmovss %xmm1, (%r10) leaq 0xc8(%rsp), %r11 movq %r8, (%r11) leaq 0xf0(%rsp), %r14 movq %rdx, (%r14) movq %rax, 0x8(%r14) movq %r10, 0x10(%r14) movq %rcx, 0x18(%r14) movq %r11, 0x20(%r14) movq %rsi, 0x28(%r14) vmovss 0x28(%rsi), %xmm4 vmovss 0x2c(%rsi), %xmm0 vmovss (%r9), %xmm1 vmovss 0x4(%r9), %xmm2 vsubss %xmm0, %xmm1, %xmm1 vmovss 0x30(%rsi), %xmm3 vsubss %xmm0, %xmm3, %xmm3 vdivss %xmm3, %xmm1, %xmm1 vsubss %xmm0, %xmm2, %xmm0 vdivss %xmm3, %xmm0, %xmm2 vmovss %xmm1, 0x4(%rsp) vmulss %xmm1, %xmm4, %xmm0 vmovss %xmm0, 0x20(%rsp) vroundss $0x9, %xmm0, %xmm0, %xmm0 vmovss %xmm2, 0x50(%rsp) vmulss %xmm2, %xmm4, %xmm1 vmovss %xmm1, 0x40(%rsp) vroundss $0xa, %xmm1, %xmm1, %xmm1 vxorps %xmm2, %xmm2, %xmm2 vmaxss %xmm2, %xmm0, %xmm3 vminss %xmm4, %xmm1, %xmm2 vmovss %xmm3, 0x10(%rsp) vcvttss2si %xmm3, %r15d vmovss %xmm2, 0x30(%rsp) vcvttss2si %xmm2, %r12d vcvttss2si %xmm0, %eax movq %rdi, %rbx testl %eax, %eax movl $0xffffffff, %r13d # imm = 0xFFFFFFFF cmovnsl %eax, %r13d vcvttss2si %xmm1, %eax vmovss %xmm4, 0xc(%rsp) vcvttss2si %xmm4, %ebp incl %ebp cmpl %ebp, %eax cmovll %eax, %ebp movslq %r15d, %rdx leaq 0x90(%rsp), %rdi movq %r14, %rsi callq 0x1eafc8c movslq %r12d, %rdx leaq 0x70(%rsp), %rdi movq %r14, %rsi callq 0x1eafc8c movl %ebp, %eax subl %r13d, %eax cmpl $0x1, %eax jne 0x1eae781 vmovss 0x20(%rsp), %xmm0 vsubss 0x10(%rsp), %xmm0, %xmm0 vxorps %xmm8, %xmm8, %xmm8 vmaxss %xmm8, %xmm0, %xmm0 vmovss 0x3e01d(%rip), %xmm1 # 0x1eec714 vsubss %xmm0, %xmm1, %xmm2 vbroadcastss %xmm0, %xmm0 vmovaps 0x70(%rsp), %xmm3 vmovaps 0x80(%rsp), %xmm4 vmulps %xmm3, %xmm0, %xmm5 vbroadcastss %xmm2, %xmm2 vmovaps 0x90(%rsp), %xmm6 vmovaps 0xa0(%rsp), %xmm7 vfmadd231ps %xmm6, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm6) + xmm5 vmulps %xmm4, %xmm0, %xmm0 vfmadd231ps %xmm2, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm2) + xmm0 vmovaps %xmm5, (%rbx) vmovaps %xmm0, 0x10(%rbx) vmovss 0x30(%rsp), %xmm0 vsubss 0x40(%rsp), %xmm0, %xmm0 vmaxss %xmm8, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm6, %xmm0, %xmm2 vbroadcastss %xmm1, %xmm1 vfmadd231ps %xmm3, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm3) + xmm2 vmulps %xmm7, %xmm0, %xmm0 vfmadd231ps %xmm4, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm4) + xmm0 vmovaps %xmm2, 0x20(%rbx) vmovaps %xmm0, 0x30(%rbx) jmp 0x1eae967 incl %r15d movslq %r15d, %rdx leaq 0x140(%rsp), %r14 leaq 0xf0(%rsp), %r15 movq %r14, %rdi movq %r15, %rsi callq 0x1eafc8c decl %r12d movslq %r12d, %rdx leaq 0x120(%rsp), %r12 movq %r12, %rdi movq %r15, %rsi callq 0x1eafc8c vmovss 0x20(%rsp), %xmm0 vsubss 0x10(%rsp), %xmm0, %xmm0 vxorps %xmm5, %xmm5, %xmm5 vmaxss %xmm5, %xmm0, %xmm0 vmovss 0x3df3d(%rip), %xmm4 # 0x1eec714 vbroadcastss %xmm0, %xmm1 vmulps (%r14), %xmm1, %xmm2 vsubss %xmm0, %xmm4, %xmm0 vbroadcastss %xmm0, %xmm0 vfmadd231ps 0x90(%rsp), %xmm0, %xmm2 # xmm2 = (xmm0 * mem) + xmm2 vmulps 0x10(%r14), %xmm1, %xmm3 vfmadd231ps 0xa0(%rsp), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3 vmovss 0x30(%rsp), %xmm0 vsubss 0x40(%rsp), %xmm0, %xmm0 vmaxss %xmm5, %xmm0, %xmm0 vsubss %xmm0, %xmm4, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps (%r12), %xmm0, %xmm5 vbroadcastss %xmm1, %xmm1 vfmadd231ps 0x70(%rsp), %xmm1, %xmm5 # xmm5 = (xmm1 * mem) + xmm5 vmulps 0x10(%r12), %xmm0, %xmm6 vfmadd231ps 0x80(%rsp), %xmm1, %xmm6 # xmm6 = (xmm1 * mem) + xmm6 leal 0x1(%r13), %eax cmpl %ebp, %eax jge 0x1eae954 vmovss 0x4(%rsp), %xmm1 vmovss 0x50(%rsp), %xmm0 vsubss %xmm1, %xmm0, %xmm0 vmovss %xmm0, 0x8(%rsp) movl %eax, %r14d notl %r13d addl %ebp, %r13d leaq 0xd0(%rsp), %r15 leaq 0xf0(%rsp), %r12 vmovaps %xmm6, 0x10(%rsp) vmovaps %xmm5, 0x20(%rsp) vmovaps %xmm3, 0x30(%rsp) vmovaps %xmm2, 0x40(%rsp) vcvtsi2ss %r14d, %xmm8, %xmm0 vdivss 0xc(%rsp), %xmm0, %xmm0 vsubss %xmm1, %xmm0, %xmm0 vdivss 0x8(%rsp), %xmm0, %xmm0 vsubss %xmm0, %xmm4, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm0, %xmm5, %xmm4 vbroadcastss %xmm1, %xmm1 vfmadd231ps %xmm2, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm2) + xmm4 vmovaps %xmm4, 0x50(%rsp) vmulps %xmm0, %xmm6, %xmm0 vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0 vmovaps %xmm0, 0xb0(%rsp) movq %r15, %rdi movq %r12, %rsi movq %r14, %rdx callq 0x1eafc8c vmovaps 0x10(%rsp), %xmm6 vmovaps 0x20(%rsp), %xmm5 vmovss 0x3de1b(%rip), %xmm4 # 0x1eec714 vmovaps 0x30(%rsp), %xmm3 vmovaps 0x40(%rsp), %xmm2 vmovaps 0xd0(%rsp), %xmm0 vsubps 0x50(%rsp), %xmm0, %xmm0 vmovaps 0xe0(%rsp), %xmm1 vsubps 0xb0(%rsp), %xmm1, %xmm1 vxorps %xmm7, %xmm7, %xmm7 vminps %xmm7, %xmm0, %xmm0 vmaxps %xmm7, %xmm1, %xmm1 vaddps %xmm0, %xmm2, %xmm2 vaddps %xmm0, %xmm5, %xmm5 vaddps %xmm1, %xmm3, %xmm3 vaddps %xmm1, %xmm6, %xmm6 vmovss 0x4(%rsp), %xmm1 incq %r14 decl %r13d jne 0x1eae87b vmovaps %xmm2, (%rbx) vmovaps %xmm3, 0x10(%rbx) vmovaps %xmm5, 0x20(%rbx) vmovaps %xmm6, 0x30(%rbx) movq %rbx, %rax addq $0x168, %rsp # imm = 0x168 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::linearBounds(unsigned long, embree::BBox<float> const&) const::'lambda'(unsigned long)::operator()(unsigned long) const
__forceinline LBBox3fa linearBounds(size_t primID, const BBox1f& dt) const { return LBBox3fa([&] (size_t itime) { return bounds(primID, itime); }, dt, this->time_range, fnumTimeSegments); }
pushq %r15 pushq %r14 pushq %rbx movq %rdi, %rax movq (%rsi), %rdi movq 0x8(%rsi), %rcx movq 0x58(%rcx), %rsi movq 0x68(%rcx), %r8 imulq (%rdi), %r8 movl (%rsi,%r8), %esi movq 0x188(%rcx), %rdi imulq $0x38, %rdx, %rdx movq (%rdi,%rdx), %r9 movq 0x10(%rdi,%rdx), %r10 movq %r10, %r11 imulq %rsi, %r11 vmovaps (%r9,%r11), %xmm0 leal 0x1(%rsi), %edi movq %r10, %rbx imulq %rdi, %rbx vmovaps (%r9,%rbx), %xmm1 leal 0x2(%rsi), %r8d movq %r10, %r14 imulq %r8, %r14 vmovaps (%r9,%r14), %xmm2 leal 0x3(%rsi), %r15d imulq %r15, %r10 vmovaps (%r9,%r10), %xmm3 vmovss 0x24c(%rcx), %xmm4 vmulss 0xc(%r9,%r11), %xmm4, %xmm5 vmulss 0xc(%r9,%rbx), %xmm4, %xmm6 vinsertps $0x30, %xmm5, %xmm0, %xmm5 # xmm5 = xmm0[0,1,2],xmm5[0] vinsertps $0x30, %xmm6, %xmm1, %xmm6 # xmm6 = xmm1[0,1,2],xmm6[0] vmulss 0xc(%r9,%r14), %xmm4, %xmm0 vmulss 0xc(%r9,%r10), %xmm4, %xmm1 movq 0x1a8(%rcx), %rcx movq (%rcx,%rdx), %r9 movq 0x10(%rcx,%rdx), %rcx imulq %rcx, %rsi vmovups (%r9,%rsi), %xmm7 imulq %rcx, %rdi vmovups (%r9,%rdi), %xmm8 imulq %rcx, %r8 vmovups (%r9,%r8), %xmm9 vinsertps $0x30, %xmm0, %xmm2, %xmm10 # xmm10 = xmm2[0,1,2],xmm0[0] imulq %r15, %rcx vmovups (%r9,%rcx), %xmm11 vbroadcastss 0x7245e(%rip), %xmm12 # 0x1f20ec0 vinsertps $0x30, %xmm1, %xmm3, %xmm4 # xmm4 = xmm3[0,1,2],xmm1[0] vmulps %xmm4, %xmm12, %xmm3 vxorps %xmm1, %xmm1, %xmm1 vxorps %xmm0, %xmm0, %xmm0 vfmadd213ps %xmm3, %xmm10, %xmm0 # xmm0 = (xmm10 * xmm0) + xmm3 vaddps %xmm0, %xmm6, %xmm0 vfmadd231ps %xmm12, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm12) + xmm0 vmulps %xmm1, %xmm4, %xmm2 vbroadcastss 0x3e0f1(%rip), %xmm13 # 0x1eecb80 vfmadd231ps %xmm13, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm13) + xmm2 vfmadd231ps %xmm1, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm1) + xmm2 vfnmadd231ps %xmm13, %xmm5, %xmm2 # xmm2 = -(xmm5 * xmm13) + xmm2 vmulps %xmm12, %xmm11, %xmm14 vxorps %xmm15, %xmm15, %xmm15 vfmadd213ps %xmm14, %xmm9, %xmm15 # xmm15 = (xmm9 * xmm15) + xmm14 vaddps %xmm15, %xmm8, %xmm15 vfmadd231ps %xmm12, %xmm7, %xmm15 # xmm15 = (xmm7 * xmm12) + xmm15 vmulps %xmm1, %xmm11, %xmm16 vfmadd231ps %xmm13, %xmm9, %xmm16 # xmm16 = (xmm9 * xmm13) + xmm16 vfmadd231ps %xmm1, %xmm8, %xmm16 # xmm16 = (xmm8 * xmm1) + xmm16 vfnmadd231ps %xmm13, %xmm7, %xmm16 # xmm16 = -(xmm7 * xmm13) + xmm16 vaddps %xmm3, %xmm10, %xmm3 vfmadd231ps %xmm1, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm1) + xmm3 vfmadd231ps %xmm12, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm12) + xmm3 vmulps %xmm4, %xmm13, %xmm4 vfmadd231ps %xmm10, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm10) + xmm4 vfnmadd231ps %xmm6, %xmm13, %xmm4 # xmm4 = -(xmm13 * xmm6) + xmm4 vfmadd231ps %xmm5, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm5) + xmm4 vaddps %xmm14, %xmm9, %xmm5 vfmadd231ps %xmm1, %xmm8, %xmm5 # xmm5 = (xmm8 * xmm1) + xmm5 vfmadd231ps %xmm12, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm12) + xmm5 vmulps %xmm13, %xmm11, %xmm6 vfmadd231ps %xmm9, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm9) + xmm6 vfnmadd231ps %xmm13, %xmm8, %xmm6 # xmm6 = -(xmm8 * xmm13) + xmm6 vfmadd231ps %xmm7, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm7) + xmm6 vshufps $0xc9, %xmm2, %xmm2, %xmm8 # xmm8 = xmm2[1,2,0,3] vshufps $0xc9, %xmm15, %xmm15, %xmm7 # xmm7 = xmm15[1,2,0,3] vmulps %xmm7, %xmm2, %xmm7 vfmsub231ps %xmm15, %xmm8, %xmm7 # xmm7 = (xmm8 * xmm15) - xmm7 vshufps $0xc9, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,2,0,3] vshufps $0xc9, %xmm16, %xmm16, %xmm9 # xmm9 = xmm16[1,2,0,3] vmulps %xmm2, %xmm9, %xmm9 vfmsub231ps %xmm16, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm16) - xmm9 vshufps $0xc9, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,2,0,3] vshufps $0xc9, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,2,0,3] vshufps $0xc9, %xmm5, %xmm5, %xmm10 # xmm10 = xmm5[1,2,0,3] vmulps %xmm4, %xmm10, %xmm10 vfmsub231ps %xmm5, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm5) - xmm10 vshufps $0xc9, %xmm10, %xmm10, %xmm5 # xmm5 = xmm10[1,2,0,3] vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3] vmulps %xmm4, %xmm10, %xmm10 vfmsub231ps %xmm6, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm6) - xmm10 vshufps $0xc9, %xmm10, %xmm10, %xmm6 # xmm6 = xmm10[1,2,0,3] vdpps $0x7f, %xmm7, %xmm7, %xmm9 vmovss %xmm9, %xmm1, %xmm10 # xmm10 = xmm9[0],xmm1[1,2,3] vrsqrt14ss %xmm10, %xmm1, %xmm11 vmovss 0x3db8f(%rip), %xmm12 # 0x1eec718 vmulss %xmm12, %xmm11, %xmm13 vmovss 0x3db86(%rip), %xmm14 # 0x1eec71c vmulss %xmm14, %xmm9, %xmm15 vmulss %xmm11, %xmm15, %xmm15 vmulss %xmm11, %xmm11, %xmm11 vmulss %xmm11, %xmm15, %xmm11 vaddss %xmm11, %xmm13, %xmm11 vbroadcastss %xmm11, %xmm11 vmulps %xmm7, %xmm11, %xmm13 vdpps $0x7f, %xmm8, %xmm7, %xmm15 vbroadcastss %xmm9, %xmm16 vmulps %xmm8, %xmm16, %xmm8 vbroadcastss %xmm15, %xmm15 vmulps %xmm7, %xmm15, %xmm7 vsubps %xmm7, %xmm8, %xmm7 vrcp14ss %xmm10, %xmm1, %xmm8 vmovss 0x42413(%rip), %xmm10 # 0x1ef0ff8 vfnmadd213ss %xmm10, %xmm8, %xmm9 # xmm9 = -(xmm8 * xmm9) + xmm10 vmulss %xmm9, %xmm8, %xmm8 vdpps $0x7f, %xmm5, %xmm5, %xmm9 vbroadcastss %xmm8, %xmm8 vmulps %xmm7, %xmm8, %xmm7 vmulps %xmm7, %xmm11, %xmm7 vmovss %xmm9, %xmm1, %xmm8 # xmm8 = xmm9[0],xmm1[1,2,3] vrsqrt14ss %xmm8, %xmm1, %xmm11 vmulss %xmm12, %xmm11, %xmm12 vmulss %xmm14, %xmm9, %xmm14 vmulss %xmm11, %xmm14, %xmm14 vmulss %xmm11, %xmm11, %xmm11 vmulss %xmm11, %xmm14, %xmm11 vaddss %xmm11, %xmm12, %xmm11 vbroadcastss %xmm11, %xmm11 vmulps %xmm5, %xmm11, %xmm12 vdpps $0x7f, %xmm6, %xmm5, %xmm14 vbroadcastss %xmm9, %xmm15 vmulps %xmm6, %xmm15, %xmm6 vbroadcastss %xmm14, %xmm14 vmulps %xmm5, %xmm14, %xmm5 vsubps %xmm5, %xmm6, %xmm5 vrcp14ss %xmm8, %xmm1, %xmm1 vfnmadd213ss %xmm10, %xmm1, %xmm9 # xmm9 = -(xmm1 * xmm9) + xmm10 vmulss %xmm1, %xmm9, %xmm1 vbroadcastss %xmm1, %xmm1 vmulps %xmm1, %xmm5, %xmm1 vmulps %xmm1, %xmm11, %xmm5 vshufps $0xff, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[3,3,3,3] vmulps %xmm1, %xmm13, %xmm6 vsubps %xmm6, %xmm0, %xmm10 vshufps $0xff, %xmm2, %xmm2, %xmm8 # xmm8 = xmm2[3,3,3,3] vmulps %xmm13, %xmm8, %xmm8 vmulps %xmm7, %xmm1, %xmm1 vaddps %xmm1, %xmm8, %xmm7 vsubps %xmm7, %xmm2, %xmm8 vaddps %xmm6, %xmm0, %xmm31 vaddps %xmm7, %xmm2, %xmm0 vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3] vmulps %xmm2, %xmm12, %xmm6 vsubps %xmm6, %xmm3, %xmm13 vshufps $0xff, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[3,3,3,3] vmulps %xmm7, %xmm12, %xmm7 vmulps %xmm5, %xmm2, %xmm2 vaddps %xmm2, %xmm7, %xmm5 vsubps %xmm5, %xmm4, %xmm7 vaddps %xmm6, %xmm3, %xmm29 vaddps %xmm5, %xmm4, %xmm3 vbroadcastss 0x431ee(%rip), %xmm4 # 0x1ef1ebc vmulps %xmm4, %xmm8, %xmm5 vaddps %xmm5, %xmm10, %xmm5 vmulps %xmm4, %xmm7, %xmm6 vsubps %xmm6, %xmm13, %xmm6 vmulps %xmm4, %xmm0, %xmm0 vaddps %xmm0, %xmm31, %xmm0 vmovups %ymm0, -0x60(%rsp) vmulps %xmm4, %xmm3, %xmm0 vsubps %xmm0, %xmm29, %xmm30 vbroadcastss 0x63a03(%rip), %ymm2 # 0x1f12704 vbroadcastss %xmm10, %ymm0 vpermps %ymm10, %ymm2, %ymm3 vbroadcastss 0x721c8(%rip), %ymm11 # 0x1f20edc vpermps %ymm10, %ymm11, %ymm4 vbroadcastss %xmm5, %ymm17 vpermps %ymm5, %ymm2, %ymm20 vpermps %ymm5, %ymm11, %ymm22 vbroadcastss %xmm6, %ymm23 vpermps %ymm6, %ymm2, %ymm24 vpermps %ymm6, %ymm11, %ymm25 vbroadcastss %xmm13, %ymm26 vpermps %ymm13, %ymm2, %ymm27 leaq 0x278594(%rip), %rcx # 0x21272e4 vmovups 0x1dc(%rcx), %ymm9 vmovups 0x660(%rcx), %ymm10 vpermps %ymm13, %ymm11, %ymm28 vmovups 0xae4(%rcx), %ymm13 vmovups 0xf68(%rcx), %ymm16 vmulps %ymm16, %ymm26, %ymm14 vmulps %ymm16, %ymm27, %ymm18 vmulps %ymm16, %ymm28, %ymm21 vfmadd231ps %ymm23, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm23) + ymm14 vfmadd231ps %ymm24, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm24) + ymm18 vfmadd231ps %ymm25, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm25) + ymm21 vfmadd231ps %ymm17, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm17) + ymm14 vfmadd231ps %ymm20, %ymm10, %ymm18 # ymm18 = (ymm10 * ymm20) + ymm18 vfmadd231ps %ymm22, %ymm10, %ymm21 # ymm21 = (ymm10 * ymm22) + ymm21 vfmadd231ps %ymm0, %ymm9, %ymm14 # ymm14 = (ymm9 * ymm0) + ymm14 vfmadd231ps %ymm3, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm3) + ymm18 vmovups 0x13ec(%rcx), %ymm6 vmovups 0x1870(%rcx), %ymm12 vmovups 0x1cf4(%rcx), %ymm15 vmovups 0x2178(%rcx), %ymm19 vfmadd231ps %ymm4, %ymm9, %ymm21 # ymm21 = (ymm9 * ymm4) + ymm21 vmulps %ymm19, %ymm26, %ymm26 vmulps %ymm19, %ymm27, %ymm27 vmulps %ymm19, %ymm28, %ymm28 vfmadd231ps %ymm23, %ymm15, %ymm26 # ymm26 = (ymm15 * ymm23) + ymm26 vfmadd231ps %ymm24, %ymm15, %ymm27 # ymm27 = (ymm15 * ymm24) + ymm27 vfmadd231ps %ymm25, %ymm15, %ymm28 # ymm28 = (ymm15 * ymm25) + ymm28 vfmadd231ps %ymm17, %ymm12, %ymm26 # ymm26 = (ymm12 * ymm17) + ymm26 vfmadd231ps %ymm20, %ymm12, %ymm27 # ymm27 = (ymm12 * ymm20) + ymm27 vfmadd231ps %ymm22, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm22) + ymm28 vfmadd231ps %ymm0, %ymm6, %ymm26 # ymm26 = (ymm6 * ymm0) + ymm26 vfmadd231ps %ymm3, %ymm6, %ymm27 # ymm27 = (ymm6 * ymm3) + ymm27 vfmadd231ps %ymm4, %ymm6, %ymm28 # ymm28 = (ymm6 * ymm4) + ymm28 vbroadcastss 0x7269d(%rip), %ymm20 # 0x1f214d0 vmulps %ymm20, %ymm26, %ymm3 vmulps %ymm20, %ymm27, %ymm4 vmulps %ymm20, %ymm28, %ymm7 vxorps %xmm5, %xmm5, %xmm5 vblendps $0x1, %ymm5, %ymm3, %ymm8 # ymm8 = ymm5[0],ymm3[1,2,3,4,5,6,7] vblendps $0x1, %ymm5, %ymm4, %ymm1 # ymm1 = ymm5[0],ymm4[1,2,3,4,5,6,7] vblendps $0x1, %ymm5, %ymm7, %ymm0 # ymm0 = ymm5[0],ymm7[1,2,3,4,5,6,7] vsubps %ymm8, %ymm14, %ymm8 vsubps %ymm1, %ymm18, %ymm1 vsubps %ymm0, %ymm21, %ymm0 vblendps $0x80, %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[0,1,2,3,4,5,6],ymm5[7] vblendps $0x80, %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7] vblendps $0x80, %ymm5, %ymm7, %ymm7 # ymm7 = ymm7[0,1,2,3,4,5,6],ymm5[7] vaddps %ymm3, %ymm14, %ymm24 vaddps %ymm4, %ymm18, %ymm25 vaddps %ymm7, %ymm21, %ymm7 vbroadcastss 0x3cb86(%rip), %ymm22 # 0x1eeba20 vminps %ymm14, %ymm22, %ymm3 vminps %ymm18, %ymm22, %ymm17 vminps %ymm21, %ymm22, %ymm23 vminps %ymm24, %ymm8, %ymm4 vminps %ymm4, %ymm3, %ymm3 vmovups %ymm3, -0x40(%rsp) vminps %ymm25, %ymm1, %ymm3 vminps %ymm3, %ymm17, %ymm3 vmovups %ymm3, -0x20(%rsp) vminps %ymm7, %ymm0, %ymm17 vminps %ymm17, %ymm23, %ymm17 vbroadcastss 0x3dca0(%rip), %ymm23 # 0x1eecb84 vmaxps %ymm14, %ymm23, %ymm14 vmaxps %ymm18, %ymm23, %ymm18 vmaxps %ymm21, %ymm23, %ymm26 vmaxps %ymm24, %ymm8, %ymm8 vmaxps %ymm8, %ymm14, %ymm21 vmaxps %ymm25, %ymm1, %ymm1 vmaxps %ymm1, %ymm18, %ymm18 vmaxps %ymm7, %ymm0, %ymm0 vmaxps %ymm0, %ymm26, %ymm14 vbroadcastss %xmm31, %ymm4 vpermps %ymm31, %ymm2, %ymm8 vpermps %ymm31, %ymm11, %ymm24 vmovups -0x60(%rsp), %ymm0 vbroadcastss %xmm0, %ymm25 vpermps %ymm0, %ymm2, %ymm26 vpermps %ymm0, %ymm11, %ymm27 vbroadcastss %xmm30, %ymm28 vpermps %ymm30, %ymm2, %ymm31 vpermps %ymm30, %ymm11, %ymm30 vbroadcastss %xmm29, %ymm0 vpermps %ymm29, %ymm2, %ymm3 vpermps %ymm29, %ymm11, %ymm11 vmulps %ymm16, %ymm0, %ymm1 vmulps %ymm16, %ymm3, %ymm2 vmulps %ymm16, %ymm11, %ymm7 vfmadd231ps %ymm28, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm28) + ymm1 vfmadd231ps %ymm31, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm31) + ymm2 vfmadd231ps %ymm13, %ymm30, %ymm7 # ymm7 = (ymm30 * ymm13) + ymm7 vfmadd231ps %ymm25, %ymm10, %ymm1 # ymm1 = (ymm10 * ymm25) + ymm1 vfmadd231ps %ymm26, %ymm10, %ymm2 # ymm2 = (ymm10 * ymm26) + ymm2 vfmadd231ps %ymm10, %ymm27, %ymm7 # ymm7 = (ymm27 * ymm10) + ymm7 vfmadd231ps %ymm4, %ymm9, %ymm1 # ymm1 = (ymm9 * ymm4) + ymm1 vfmadd231ps %ymm8, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm8) + ymm2 vfmadd231ps %ymm9, %ymm24, %ymm7 # ymm7 = (ymm24 * ymm9) + ymm7 vmulps %ymm19, %ymm0, %ymm0 vmulps %ymm19, %ymm3, %ymm3 vmulps %ymm19, %ymm11, %ymm9 vfmadd231ps %ymm28, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm28) + ymm0 vfmadd231ps %ymm31, %ymm15, %ymm3 # ymm3 = (ymm15 * ymm31) + ymm3 vfmadd231ps %ymm30, %ymm15, %ymm9 # ymm9 = (ymm15 * ymm30) + ymm9 vfmadd231ps %ymm25, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm25) + ymm0 vfmadd231ps %ymm26, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm26) + ymm3 vfmadd231ps %ymm27, %ymm12, %ymm9 # ymm9 = (ymm12 * ymm27) + ymm9 vfmadd231ps %ymm4, %ymm6, %ymm0 # ymm0 = (ymm6 * ymm4) + ymm0 vfmadd231ps %ymm8, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm8) + ymm3 vfmadd231ps %ymm24, %ymm6, %ymm9 # ymm9 = (ymm6 * ymm24) + ymm9 vmulps %ymm20, %ymm0, %ymm0 vmulps %ymm20, %ymm3, %ymm3 vmulps %ymm20, %ymm9, %ymm4 vblendps $0x1, %ymm5, %ymm0, %ymm6 # ymm6 = ymm5[0],ymm0[1,2,3,4,5,6,7] vblendps $0x1, %ymm5, %ymm3, %ymm8 # ymm8 = ymm5[0],ymm3[1,2,3,4,5,6,7] vblendps $0x1, %ymm5, %ymm4, %ymm9 # ymm9 = ymm5[0],ymm4[1,2,3,4,5,6,7] vsubps %ymm6, %ymm1, %ymm6 vsubps %ymm8, %ymm2, %ymm10 vsubps %ymm9, %ymm7, %ymm9 vblendps $0x80, %ymm5, %ymm0, %ymm0 # ymm0 = ymm0[0,1,2,3,4,5,6],ymm5[7] vblendps $0x80, %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[0,1,2,3,4,5,6],ymm5[7] vblendps $0x80, %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7] vaddps %ymm0, %ymm1, %ymm0 vaddps %ymm3, %ymm2, %ymm3 vaddps %ymm4, %ymm7, %ymm4 vminps %ymm1, %ymm22, %ymm5 vminps %ymm2, %ymm22, %ymm8 vminps %ymm7, %ymm22, %ymm11 vminps %ymm0, %ymm6, %ymm12 vminps %ymm12, %ymm5, %ymm12 vminps %ymm3, %ymm10, %ymm5 vminps %ymm5, %ymm8, %ymm8 vminps %ymm4, %ymm9, %ymm5 vminps %ymm5, %ymm11, %ymm5 vmaxps %ymm1, %ymm23, %ymm1 vmaxps %ymm2, %ymm23, %ymm11 vmaxps %ymm7, %ymm23, %ymm7 vmaxps %ymm0, %ymm6, %ymm0 vmaxps %ymm0, %ymm1, %ymm2 vmaxps %ymm3, %ymm10, %ymm0 vmaxps %ymm0, %ymm11, %ymm1 vmaxps %ymm4, %ymm9, %ymm0 vmaxps %ymm0, %ymm7, %ymm0 vshufps $0xb1, %ymm21, %ymm21, %ymm3 # ymm3 = ymm21[1,0,3,2,5,4,7,6] vmaxps %ymm3, %ymm21, %ymm3 vshufpd $0x5, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,0,3,2] vmaxps %ymm4, %ymm3, %ymm3 vextractf128 $0x1, %ymm3, %xmm4 vmaxps %xmm4, %xmm3, %xmm3 vshufps $0xb1, %ymm18, %ymm18, %ymm4 # ymm4 = ymm18[1,0,3,2,5,4,7,6] vmaxps %ymm4, %ymm18, %ymm4 vshufpd $0x5, %ymm4, %ymm4, %ymm6 # ymm6 = ymm4[1,0,3,2] vmaxps %ymm6, %ymm4, %ymm4 vextractf128 $0x1, %ymm4, %xmm6 vmaxps %xmm6, %xmm4, %xmm4 vunpcklps %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] vshufps $0xb1, %ymm14, %ymm14, %ymm4 # ymm4 = ymm14[1,0,3,2,5,4,7,6] vmaxps %ymm4, %ymm14, %ymm4 vshufpd $0x5, %ymm4, %ymm4, %ymm6 # ymm6 = ymm4[1,0,3,2] vmaxps %ymm6, %ymm4, %ymm4 vextractf128 $0x1, %ymm4, %xmm6 vmaxps %xmm6, %xmm4, %xmm4 vinsertps $0x28, %xmm4, %xmm3, %xmm6 # xmm6 = xmm3[0,1],xmm4[0],zero vmovups -0x40(%rsp), %ymm4 vshufps $0xb1, %ymm4, %ymm4, %ymm3 # ymm3 = ymm4[1,0,3,2,5,4,7,6] vminps %ymm3, %ymm4, %ymm3 vshufpd $0x5, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,0,3,2] vminps %ymm4, %ymm3, %ymm3 vextractf128 $0x1, %ymm3, %xmm4 vminps %xmm4, %xmm3, %xmm3 vmovups -0x20(%rsp), %ymm7 vshufps $0xb1, %ymm7, %ymm7, %ymm4 # ymm4 = ymm7[1,0,3,2,5,4,7,6] vminps %ymm4, %ymm7, %ymm4 vshufpd $0x5, %ymm4, %ymm4, %ymm7 # ymm7 = ymm4[1,0,3,2] vminps %ymm7, %ymm4, %ymm4 vextractf128 $0x1, %ymm4, %xmm7 vminps %xmm7, %xmm4, %xmm4 vunpcklps %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] vshufps $0xb1, %ymm17, %ymm17, %ymm4 # ymm4 = ymm17[1,0,3,2,5,4,7,6] vminps %ymm4, %ymm17, %ymm4 vshufpd $0x5, %ymm4, %ymm4, %ymm7 # ymm7 = ymm4[1,0,3,2] vminps %ymm7, %ymm4, %ymm4 vextractf128 $0x1, %ymm4, %xmm7 vminps %xmm7, %xmm4, %xmm4 vinsertps $0x28, %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0,1],xmm4[0],zero vshufps $0xb1, %ymm12, %ymm12, %ymm4 # ymm4 = ymm12[1,0,3,2,5,4,7,6] vminps %ymm4, %ymm12, %ymm4 vshufpd $0x5, %ymm4, %ymm4, %ymm7 # ymm7 = ymm4[1,0,3,2] vminps %ymm7, %ymm4, %ymm4 vextractf128 $0x1, %ymm4, %xmm7 vminps %xmm7, %xmm4, %xmm4 vshufps $0xb1, %ymm8, %ymm8, %ymm7 # ymm7 = ymm8[1,0,3,2,5,4,7,6] vminps %ymm7, %ymm8, %ymm7 vshufpd $0x5, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2] vminps %ymm8, %ymm7, %ymm7 vextractf128 $0x1, %ymm7, %xmm8 vminps %xmm8, %xmm7, %xmm7 vunpcklps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] vshufps $0xb1, %ymm5, %ymm5, %ymm7 # ymm7 = ymm5[1,0,3,2,5,4,7,6] vminps %ymm7, %ymm5, %ymm5 vshufpd $0x5, %ymm5, %ymm5, %ymm7 # ymm7 = ymm5[1,0,3,2] vminps %ymm7, %ymm5, %ymm5 vextractf128 $0x1, %ymm5, %xmm7 vminps %xmm7, %xmm5, %xmm5 vinsertps $0x28, %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[0,1],xmm5[0],zero vminps %xmm4, %xmm3, %xmm3 vshufps $0xb1, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[1,0,3,2,5,4,7,6] vmaxps %ymm4, %ymm2, %ymm2 vshufpd $0x5, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[1,0,3,2] vmaxps %ymm4, %ymm2, %ymm2 vextractf128 $0x1, %ymm2, %xmm4 vmaxps %xmm4, %xmm2, %xmm2 vshufps $0xb1, %ymm1, %ymm1, %ymm4 # ymm4 = ymm1[1,0,3,2,5,4,7,6] vmaxps %ymm4, %ymm1, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm4 # ymm4 = ymm1[1,0,3,2] vmaxps %ymm4, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm4 vmaxps %xmm4, %xmm1, %xmm1 vunpcklps %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] vshufps $0xb1, %ymm0, %ymm0, %ymm2 # ymm2 = ymm0[1,0,3,2,5,4,7,6] vmaxps %ymm2, %ymm0, %ymm0 vshufpd $0x5, %ymm0, %ymm0, %ymm2 # ymm2 = ymm0[1,0,3,2] vmaxps %ymm2, %ymm0, %ymm0 vextractf128 $0x1, %ymm0, %xmm2 vmaxps %xmm2, %xmm0, %xmm0 vinsertps $0x28, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,1],xmm0[0],zero vmaxps %xmm0, %xmm6, %xmm0 vbroadcastss 0x71c87(%rip), %xmm1 # 0x1f20ec4 vandps %xmm1, %xmm3, %xmm2 vandps %xmm1, %xmm0, %xmm1 vmaxps %xmm1, %xmm2, %xmm1 vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3] vmaxss %xmm1, %xmm2, %xmm2 vshufpd $0x1, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[1,0] vmaxss %xmm2, %xmm1, %xmm1 vmulss 0x41d82(%rip), %xmm1, %xmm1 # 0x1ef0fe4 vbroadcastss %xmm1, %xmm1 vsubps %xmm1, %xmm3, %xmm2 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm2, (%rax) vmovaps %xmm0, 0x10(%rax) popq %rbx popq %r14 popq %r15 vzeroupper retq nop
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::CurveGeometryISA<(embree::Geometry::GType)2, embree::avx512::CurveGeometryInterface, embree::CatmullRomCurveT>::linearBounds(embree::Vec3fa const&, float, float, embree::LinearSpace3<embree::Vec3fa> const&, unsigned long, embree::BBox<float> const&) const::'lambda'(unsigned long)::operator()(unsigned long) const
PrimInfo createPrimRefArray(PrimRef* prims, const range<size_t>& r, size_t k, unsigned int geomID) const { PrimInfo pinfo(empty); for (size_t j=r.begin(); j<r.end(); j++) { if (!valid(ctype, j, make_range<size_t>(0, numTimeSegments()))) continue; const BBox3fa box = bounds(j); const PrimRef prim(box,geomID,unsigned(j)); pinfo.add_center2(prim); prims[k++] = prim; } return pinfo; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdi, %rax movq 0x28(%rsi), %r9 movq (%rsi), %r8 movq 0x8(%rsi), %rdi movq 0x18(%rsi), %rcx movq 0x20(%rsi), %rsi movq 0x58(%r9), %r10 movq 0x68(%r9), %r11 imulq (%rsi), %r11 movl (%r10,%r11), %esi movq 0x188(%r9), %r10 imulq $0x38, %rdx, %rdx movq (%r10,%rdx), %rbx movq 0x10(%r10,%rdx), %r14 movq %r14, %r15 imulq %rsi, %r15 vmovaps (%rbx,%r15), %xmm0 leal 0x1(%rsi), %r10d movq %r14, %r12 imulq %r10, %r12 vmovaps (%rbx,%r12), %xmm1 leal 0x2(%rsi), %r11d movq %r14, %r13 imulq %r11, %r13 vmovaps (%rbx,%r13), %xmm2 leal 0x3(%rsi), %ebp imulq %rbp, %r14 vmovaps (%rbx,%r14), %xmm3 vmovss 0x24c(%r9), %xmm4 vmulss 0xc(%rbx,%r15), %xmm4, %xmm5 vinsertps $0x30, %xmm5, %xmm0, %xmm5 # xmm5 = xmm0[0,1,2],xmm5[0] vmulss 0xc(%rbx,%r12), %xmm4, %xmm0 vinsertps $0x30, %xmm0, %xmm1, %xmm6 # xmm6 = xmm1[0,1,2],xmm0[0] vmulss 0xc(%rbx,%r13), %xmm4, %xmm0 vinsertps $0x30, %xmm0, %xmm2, %xmm7 # xmm7 = xmm2[0,1,2],xmm0[0] vmulss 0xc(%rbx,%r14), %xmm4, %xmm0 vinsertps $0x30, %xmm0, %xmm3, %xmm4 # xmm4 = xmm3[0,1,2],xmm0[0] movq 0x1a8(%r9), %r9 movq (%r9,%rdx), %rbx movq 0x10(%r9,%rdx), %rdx imulq %rdx, %rsi vmovups (%rbx,%rsi), %xmm8 imulq %rdx, %r10 vmovups (%rbx,%r10), %xmm9 imulq %rdx, %r11 vmovups (%rbx,%r11), %xmm10 imulq %rbp, %rdx vmovups (%rbx,%rdx), %xmm11 vbroadcastss 0x71138(%rip), %xmm12 # 0x1f20ec0 vmulps %xmm4, %xmm12, %xmm3 vxorps %xmm2, %xmm2, %xmm2 vxorps %xmm0, %xmm0, %xmm0 vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3 vaddps %xmm0, %xmm6, %xmm0 vfmadd231ps %xmm12, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm12) + xmm0 vmulps %xmm2, %xmm4, %xmm1 vbroadcastss 0x3cdd1(%rip), %xmm13 # 0x1eecb80 vfmadd231ps %xmm13, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm13) + xmm1 vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1 vfnmadd231ps %xmm13, %xmm5, %xmm1 # xmm1 = -(xmm5 * xmm13) + xmm1 vmulps %xmm12, %xmm11, %xmm14 vxorps %xmm15, %xmm15, %xmm15 vfmadd213ps %xmm14, %xmm10, %xmm15 # xmm15 = (xmm10 * xmm15) + xmm14 vaddps %xmm15, %xmm9, %xmm15 vfmadd231ps %xmm12, %xmm8, %xmm15 # xmm15 = (xmm8 * xmm12) + xmm15 vmulps %xmm2, %xmm11, %xmm16 vfmadd231ps %xmm13, %xmm10, %xmm16 # xmm16 = (xmm10 * xmm13) + xmm16 vfmadd231ps %xmm2, %xmm9, %xmm16 # xmm16 = (xmm9 * xmm2) + xmm16 vfnmadd231ps %xmm13, %xmm8, %xmm16 # xmm16 = -(xmm8 * xmm13) + xmm16 vaddps %xmm3, %xmm7, %xmm3 vfmadd231ps %xmm2, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm2) + xmm3 vfmadd231ps %xmm12, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm12) + xmm3 vmulps %xmm4, %xmm13, %xmm4 vfmadd231ps %xmm7, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm7) + xmm4 vfnmadd231ps %xmm6, %xmm13, %xmm4 # xmm4 = -(xmm13 * xmm6) + xmm4 vfmadd231ps %xmm5, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm5) + xmm4 vaddps %xmm14, %xmm10, %xmm5 vfmadd231ps %xmm2, %xmm9, %xmm5 # xmm5 = (xmm9 * xmm2) + xmm5 vfmadd231ps %xmm12, %xmm8, %xmm5 # xmm5 = (xmm8 * xmm12) + xmm5 vmulps %xmm13, %xmm11, %xmm6 vfmadd231ps %xmm10, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm10) + xmm6 vfnmadd231ps %xmm13, %xmm9, %xmm6 # xmm6 = -(xmm9 * xmm13) + xmm6 vfmadd231ps %xmm8, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm8) + xmm6 vshufps $0xc9, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,2,0,3] vshufps $0xc9, %xmm15, %xmm15, %xmm7 # xmm7 = xmm15[1,2,0,3] vmulps %xmm7, %xmm1, %xmm7 vfmsub231ps %xmm15, %xmm8, %xmm7 # xmm7 = (xmm8 * xmm15) - xmm7 vshufps $0xc9, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,2,0,3] vshufps $0xc9, %xmm16, %xmm16, %xmm9 # xmm9 = xmm16[1,2,0,3] vmulps %xmm1, %xmm9, %xmm9 vfmsub231ps %xmm16, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm16) - xmm9 vshufps $0xc9, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,2,0,3] vshufps $0xc9, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,2,0,3] vshufps $0xc9, %xmm5, %xmm5, %xmm10 # xmm10 = xmm5[1,2,0,3] vmulps %xmm4, %xmm10, %xmm10 vfmsub231ps %xmm5, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm5) - xmm10 vshufps $0xc9, %xmm10, %xmm10, %xmm5 # xmm5 = xmm10[1,2,0,3] vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3] vmulps %xmm4, %xmm10, %xmm10 vdpps $0x7f, %xmm7, %xmm7, %xmm11 vfmsub231ps %xmm6, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm6) - xmm10 vshufps $0xc9, %xmm10, %xmm10, %xmm6 # xmm6 = xmm10[1,2,0,3] vmovss %xmm11, %xmm2, %xmm9 # xmm9 = xmm11[0],xmm2[1,2,3] vrsqrt14ss %xmm9, %xmm2, %xmm10 vmovss 0x3c86f(%rip), %xmm12 # 0x1eec718 vmulss %xmm12, %xmm10, %xmm13 vmovss 0x3c866(%rip), %xmm14 # 0x1eec71c vmulss %xmm14, %xmm11, %xmm15 vmulss %xmm10, %xmm15, %xmm15 vmulss %xmm10, %xmm10, %xmm10 vmulss %xmm10, %xmm15, %xmm10 vaddss %xmm10, %xmm13, %xmm10 vdpps $0x7f, %xmm8, %xmm7, %xmm13 vbroadcastss %xmm10, %xmm10 vmulps %xmm7, %xmm10, %xmm15 vbroadcastss %xmm11, %xmm16 vmulps %xmm8, %xmm16, %xmm8 vbroadcastss %xmm13, %xmm13 vmulps %xmm7, %xmm13, %xmm7 vsubps %xmm7, %xmm8, %xmm7 vrcp14ss %xmm9, %xmm2, %xmm8 vmovss 0x410f3(%rip), %xmm9 # 0x1ef0ff8 vfnmadd213ss %xmm9, %xmm8, %xmm11 # xmm11 = -(xmm8 * xmm11) + xmm9 vmulss %xmm11, %xmm8, %xmm8 vbroadcastss %xmm8, %xmm8 vmulps %xmm7, %xmm8, %xmm7 vdpps $0x7f, %xmm5, %xmm5, %xmm8 vmulps %xmm7, %xmm10, %xmm7 vmovss %xmm8, %xmm2, %xmm10 # xmm10 = xmm8[0],xmm2[1,2,3] vrsqrt14ss %xmm10, %xmm2, %xmm11 vmulss %xmm12, %xmm11, %xmm12 vmulss %xmm14, %xmm8, %xmm13 vmulss %xmm11, %xmm13, %xmm13 vmulss %xmm11, %xmm11, %xmm11 vmulss %xmm11, %xmm13, %xmm11 vdpps $0x7f, %xmm6, %xmm5, %xmm13 vaddss %xmm11, %xmm12, %xmm11 vbroadcastss %xmm11, %xmm11 vmulps %xmm5, %xmm11, %xmm12 vbroadcastss %xmm8, %xmm14 vmulps %xmm6, %xmm14, %xmm6 vbroadcastss %xmm13, %xmm13 vmulps %xmm5, %xmm13, %xmm5 vsubps %xmm5, %xmm6, %xmm5 vrcp14ss %xmm10, %xmm2, %xmm2 vfnmadd213ss %xmm9, %xmm2, %xmm8 # xmm8 = -(xmm2 * xmm8) + xmm9 vmulss %xmm2, %xmm8, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm5, %xmm2 vmulps %xmm2, %xmm11, %xmm2 vshufps $0xff, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[3,3,3,3] vmulps %xmm5, %xmm15, %xmm6 vsubps %xmm6, %xmm0, %xmm8 vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3] vmulps %xmm15, %xmm9, %xmm9 vmulps %xmm7, %xmm5, %xmm5 vaddps %xmm5, %xmm9, %xmm5 vsubps %xmm5, %xmm1, %xmm7 vaddps %xmm6, %xmm0, %xmm0 vaddps %xmm5, %xmm1, %xmm1 vshufps $0xff, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[3,3,3,3] vmulps %xmm5, %xmm12, %xmm6 vsubps %xmm6, %xmm3, %xmm11 vshufps $0xff, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[3,3,3,3] vmulps %xmm12, %xmm9, %xmm9 vmulps %xmm2, %xmm5, %xmm2 vaddps %xmm2, %xmm9, %xmm5 vsubps %xmm5, %xmm4, %xmm9 vaddps %xmm6, %xmm3, %xmm2 vaddps %xmm5, %xmm4, %xmm3 vbroadcastss 0x41ed1(%rip), %xmm4 # 0x1ef1ebc vmulps %xmm4, %xmm7, %xmm5 vaddps %xmm5, %xmm8, %xmm10 vmulps %xmm4, %xmm9, %xmm5 vsubps %xmm5, %xmm11, %xmm12 vmulps %xmm4, %xmm1, %xmm1 vaddps %xmm1, %xmm0, %xmm1 vmulps %xmm4, %xmm3, %xmm3 vsubps %xmm3, %xmm2, %xmm4 vmovaps (%r8), %xmm6 vbroadcastss (%rdi), %xmm7 vsubps %xmm6, %xmm8, %xmm3 vmulps %xmm3, %xmm7, %xmm3 vbroadcastss %xmm3, %xmm13 vshufps $0x55, %xmm3, %xmm3, %xmm14 # xmm14 = xmm3[1,1,1,1] vmovaps (%rcx), %xmm5 vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2] vmovaps 0x10(%rcx), %xmm8 vmovaps 0x20(%rcx), %xmm9 vmulps %xmm3, %xmm9, %xmm3 vfmadd231ps %xmm14, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm14) + xmm3 vfmadd231ps %xmm13, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm13) + xmm3 vsubps %xmm6, %xmm10, %xmm10 vmulps %xmm7, %xmm10, %xmm10 vbroadcastss %xmm10, %xmm13 vshufps $0x55, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,1,1,1] vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2] vmulps %xmm10, %xmm9, %xmm10 vfmadd231ps %xmm14, %xmm8, %xmm10 # xmm10 = (xmm8 * xmm14) + xmm10 vfmadd231ps %xmm13, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm13) + xmm10 vsubps %xmm6, %xmm12, %xmm12 vmulps %xmm7, %xmm12, %xmm12 vbroadcastss %xmm12, %xmm13 vshufps $0x55, %xmm12, %xmm12, %xmm14 # xmm14 = xmm12[1,1,1,1] vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2] vmulps %xmm12, %xmm9, %xmm12 vfmadd231ps %xmm14, %xmm8, %xmm12 # xmm12 = (xmm8 * xmm14) + xmm12 vfmadd231ps %xmm13, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm13) + xmm12 vsubps %xmm6, %xmm11, %xmm11 vmulps %xmm7, %xmm11, %xmm11 vbroadcastss %xmm11, %xmm14 vshufps $0x55, %xmm11, %xmm11, %xmm15 # xmm15 = xmm11[1,1,1,1] vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2] vmulps %xmm11, %xmm9, %xmm13 vfmadd231ps %xmm15, %xmm8, %xmm13 # xmm13 = (xmm8 * xmm15) + xmm13 vfmadd231ps %xmm14, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm14) + xmm13 vsubps %xmm6, %xmm0, %xmm0 vmulps %xmm0, %xmm7, %xmm0 vbroadcastss %xmm0, %xmm11 vshufps $0x55, %xmm0, %xmm0, %xmm14 # xmm14 = xmm0[1,1,1,1] vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2] vmulps %xmm0, %xmm9, %xmm31 vfmadd231ps %xmm14, %xmm8, %xmm31 # xmm31 = (xmm8 * xmm14) + xmm31 vfmadd231ps %xmm11, %xmm5, %xmm31 # xmm31 = (xmm5 * xmm11) + xmm31 vsubps %xmm6, %xmm1, %xmm1 vmulps %xmm1, %xmm7, %xmm1 vbroadcastss %xmm1, %xmm11 vshufps $0x55, %xmm1, %xmm1, %xmm14 # xmm14 = xmm1[1,1,1,1] vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2] vmulps %xmm1, %xmm9, %xmm22 vfmadd231ps %xmm14, %xmm8, %xmm22 # xmm22 = (xmm8 * xmm14) + xmm22 vfmadd231ps %xmm11, %xmm5, %xmm22 # xmm22 = (xmm5 * xmm11) + xmm22 vsubps %xmm6, %xmm4, %xmm4 vmulps %xmm4, %xmm7, %xmm4 vbroadcastss %xmm4, %xmm11 vshufps $0x55, %xmm4, %xmm4, %xmm14 # xmm14 = xmm4[1,1,1,1] vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2] vmulps %xmm4, %xmm9, %xmm20 vfmadd231ps %xmm14, %xmm8, %xmm20 # xmm20 = (xmm8 * xmm14) + xmm20 vfmadd231ps %xmm11, %xmm5, %xmm20 # xmm20 = (xmm5 * xmm11) + xmm20 vsubps %xmm6, %xmm2, %xmm2 vmulps %xmm2, %xmm7, %xmm2 vbroadcastss %xmm2, %xmm7 vshufps $0x55, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,1,1,1] vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2] vmulps %xmm2, %xmm9, %xmm6 vfmadd231ps %xmm11, %xmm8, %xmm6 # xmm6 = (xmm8 * xmm11) + xmm6 vfmadd231ps %xmm7, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm7) + xmm6 vbroadcastss 0x6259a(%rip), %ymm29 # 0x1f12704 vbroadcastss %xmm3, %ymm2 vpermps %ymm3, %ymm29, %ymm5 vbroadcastss 0x70d5d(%rip), %ymm30 # 0x1f20edc vpermps %ymm3, %ymm30, %ymm3 vbroadcastss %xmm10, %ymm17 vpermps %ymm10, %ymm29, %ymm1 vpermps %ymm10, %ymm30, %ymm0 vbroadcastss %xmm12, %ymm23 vpermps %ymm12, %ymm29, %ymm24 vpermps %ymm12, %ymm30, %ymm25 vbroadcastss %xmm13, %ymm26 vpermps %ymm13, %ymm29, %ymm27 leaq 0x277128(%rip), %rcx # 0x21272e4 vmovups 0x1dc(%rcx), %ymm7 vmovups 0x660(%rcx), %ymm10 vpermps %ymm13, %ymm30, %ymm28 vmovups 0xae4(%rcx), %ymm13 vmovups 0xf68(%rcx), %ymm16 vmulps %ymm16, %ymm26, %ymm14 vmulps %ymm16, %ymm27, %ymm18 vmulps %ymm16, %ymm28, %ymm21 vfmadd231ps %ymm23, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm23) + ymm14 vfmadd231ps %ymm24, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm24) + ymm18 vfmadd231ps %ymm25, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm25) + ymm21 vfmadd231ps %ymm17, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm17) + ymm14 vfmadd231ps %ymm1, %ymm10, %ymm18 # ymm18 = (ymm10 * ymm1) + ymm18 vfmadd231ps %ymm0, %ymm10, %ymm21 # ymm21 = (ymm10 * ymm0) + ymm21 vfmadd231ps %ymm2, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm2) + ymm14 vfmadd231ps %ymm5, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm5) + ymm18 vmovups 0x13ec(%rcx), %ymm8 vmovups 0x1870(%rcx), %ymm12 vmovups 0x1cf4(%rcx), %ymm15 vmovups 0x2178(%rcx), %ymm19 vfmadd231ps %ymm3, %ymm7, %ymm21 # ymm21 = (ymm7 * ymm3) + ymm21 vmulps %ymm19, %ymm26, %ymm26 vmulps %ymm19, %ymm27, %ymm27 vmulps %ymm19, %ymm28, %ymm28 vfmadd231ps %ymm23, %ymm15, %ymm26 # ymm26 = (ymm15 * ymm23) + ymm26 vfmadd231ps %ymm24, %ymm15, %ymm27 # ymm27 = (ymm15 * ymm24) + ymm27 vfmadd231ps %ymm25, %ymm15, %ymm28 # ymm28 = (ymm15 * ymm25) + ymm28 vfmadd231ps %ymm17, %ymm12, %ymm26 # ymm26 = (ymm12 * ymm17) + ymm26 vfmadd231ps %ymm1, %ymm12, %ymm27 # ymm27 = (ymm12 * ymm1) + ymm27 vfmadd231ps %ymm0, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm0) + ymm28 vfmadd231ps %ymm2, %ymm8, %ymm26 # ymm26 = (ymm8 * ymm2) + ymm26 vfmadd231ps %ymm5, %ymm8, %ymm27 # ymm27 = (ymm8 * ymm5) + ymm27 vfmadd231ps %ymm3, %ymm8, %ymm28 # ymm28 = (ymm8 * ymm3) + ymm28 vbroadcastss 0x71232(%rip), %ymm0 # 0x1f214d0 vmulps %ymm0, %ymm26, %ymm3 vmulps %ymm0, %ymm27, %ymm5 vmulps %ymm0, %ymm28, %ymm9 vxorps %xmm4, %xmm4, %xmm4 vblendps $0x1, %ymm4, %ymm3, %ymm11 # ymm11 = ymm4[0],ymm3[1,2,3,4,5,6,7] vblendps $0x1, %ymm4, %ymm5, %ymm0 # ymm0 = ymm4[0],ymm5[1,2,3,4,5,6,7] vblendps $0x1, %ymm4, %ymm9, %ymm1 # ymm1 = ymm4[0],ymm9[1,2,3,4,5,6,7] vsubps %ymm11, %ymm14, %ymm11 vsubps %ymm0, %ymm18, %ymm0 vsubps %ymm1, %ymm21, %ymm1 vblendps $0x80, %ymm4, %ymm3, %ymm3 # ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7] vblendps $0x80, %ymm4, %ymm5, %ymm5 # ymm5 = ymm5[0,1,2,3,4,5,6],ymm4[7] vblendps $0x80, %ymm4, %ymm9, %ymm9 # ymm9 = ymm9[0,1,2,3,4,5,6],ymm4[7] vaddps %ymm3, %ymm14, %ymm24 vaddps %ymm5, %ymm18, %ymm25 vaddps %ymm9, %ymm21, %ymm26 vbroadcastss 0x3b71c(%rip), %ymm9 # 0x1eeba20 vminps %ymm14, %ymm9, %ymm3 vminps %ymm18, %ymm9, %ymm17 vminps %ymm21, %ymm9, %ymm23 vminps %ymm24, %ymm11, %ymm5 vminps %ymm5, %ymm3, %ymm5 vminps %ymm25, %ymm0, %ymm3 vminps %ymm3, %ymm17, %ymm3 vminps %ymm26, %ymm1, %ymm17 vminps %ymm17, %ymm23, %ymm17 vbroadcastss 0x3c843(%rip), %ymm23 # 0x1eecb84 vmaxps %ymm14, %ymm23, %ymm14 vmaxps %ymm18, %ymm23, %ymm18 vmaxps %ymm21, %ymm23, %ymm27 vmaxps %ymm24, %ymm11, %ymm11 vmaxps %ymm11, %ymm14, %ymm21 vmaxps %ymm25, %ymm0, %ymm0 vmaxps %ymm0, %ymm18, %ymm18 vmaxps %ymm26, %ymm1, %ymm0 vmaxps %ymm0, %ymm27, %ymm14 vbroadcastss %xmm31, %ymm11 vpermps %ymm31, %ymm29, %ymm24 vpermps %ymm31, %ymm30, %ymm25 vbroadcastss %xmm22, %ymm26 vpermps %ymm22, %ymm29, %ymm27 vpermps %ymm22, %ymm30, %ymm22 vbroadcastss %xmm20, %ymm28 vpermps %ymm20, %ymm29, %ymm31 vpermps %ymm20, %ymm30, %ymm20 vbroadcastss %xmm6, %ymm2 vpermps %ymm6, %ymm29, %ymm29 vpermps %ymm6, %ymm30, %ymm30 vmulps %ymm16, %ymm2, %ymm0 vmulps %ymm16, %ymm29, %ymm1 vmulps %ymm16, %ymm30, %ymm6 vfmadd231ps %ymm28, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm28) + ymm0 vfmadd231ps %ymm31, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm31) + ymm1 vfmadd231ps %ymm13, %ymm20, %ymm6 # ymm6 = (ymm20 * ymm13) + ymm6 vfmadd231ps %ymm26, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm26) + ymm0 vfmadd231ps %ymm27, %ymm10, %ymm1 # ymm1 = (ymm10 * ymm27) + ymm1 vfmadd231ps %ymm10, %ymm22, %ymm6 # ymm6 = (ymm22 * ymm10) + ymm6 vfmadd231ps %ymm11, %ymm7, %ymm0 # ymm0 = (ymm7 * ymm11) + ymm0 vfmadd231ps %ymm24, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm24) + ymm1 vfmadd231ps %ymm7, %ymm25, %ymm6 # ymm6 = (ymm25 * ymm7) + ymm6 vmulps %ymm19, %ymm2, %ymm2 vmulps %ymm19, %ymm29, %ymm7 vmulps %ymm19, %ymm30, %ymm10 vfmadd231ps %ymm28, %ymm15, %ymm2 # ymm2 = (ymm15 * ymm28) + ymm2 vfmadd231ps %ymm31, %ymm15, %ymm7 # ymm7 = (ymm15 * ymm31) + ymm7 vfmadd231ps %ymm20, %ymm15, %ymm10 # ymm10 = (ymm15 * ymm20) + ymm10 vfmadd231ps %ymm26, %ymm12, %ymm2 # ymm2 = (ymm12 * ymm26) + ymm2 vfmadd231ps %ymm27, %ymm12, %ymm7 # ymm7 = (ymm12 * ymm27) + ymm7 vfmadd231ps %ymm22, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm22) + ymm10 vfmadd231ps %ymm11, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm11) + ymm2 vfmadd231ps %ymm24, %ymm8, %ymm7 # ymm7 = (ymm8 * ymm24) + ymm7 vfmadd231ps %ymm25, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm25) + ymm10 vbroadcastss 0x7107b(%rip), %ymm8 # 0x1f214d0 vmulps %ymm2, %ymm8, %ymm2 vmulps %ymm7, %ymm8, %ymm7 vmulps %ymm8, %ymm10, %ymm8 vblendps $0x1, %ymm4, %ymm2, %ymm10 # ymm10 = ymm4[0],ymm2[1,2,3,4,5,6,7] vblendps $0x1, %ymm4, %ymm7, %ymm11 # ymm11 = ymm4[0],ymm7[1,2,3,4,5,6,7] vblendps $0x1, %ymm4, %ymm8, %ymm12 # ymm12 = ymm4[0],ymm8[1,2,3,4,5,6,7] vsubps %ymm10, %ymm0, %ymm10 vsubps %ymm11, %ymm1, %ymm11 vsubps %ymm12, %ymm6, %ymm12 vblendps $0x80, %ymm4, %ymm2, %ymm2 # ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7] vblendps $0x80, %ymm4, %ymm7, %ymm7 # ymm7 = ymm7[0,1,2,3,4,5,6],ymm4[7] vblendps $0x80, %ymm4, %ymm8, %ymm4 # ymm4 = ymm8[0,1,2,3,4,5,6],ymm4[7] vaddps %ymm2, %ymm0, %ymm2 vaddps %ymm7, %ymm1, %ymm8 vaddps %ymm4, %ymm6, %ymm13 vminps %ymm0, %ymm9, %ymm4 vminps %ymm1, %ymm9, %ymm7 vminps %ymm6, %ymm9, %ymm9 vminps %ymm2, %ymm10, %ymm15 vminps %ymm15, %ymm4, %ymm15 vminps %ymm8, %ymm11, %ymm4 vminps %ymm4, %ymm7, %ymm7 vminps %ymm13, %ymm12, %ymm4 vminps %ymm4, %ymm9, %ymm4 vmaxps %ymm0, %ymm23, %ymm0 vmaxps %ymm1, %ymm23, %ymm1 vmaxps %ymm6, %ymm23, %ymm6 vmaxps %ymm2, %ymm10, %ymm2 vmaxps %ymm2, %ymm0, %ymm2 vmaxps %ymm8, %ymm11, %ymm0 vmaxps %ymm0, %ymm1, %ymm1 vmaxps %ymm13, %ymm12, %ymm0 vmaxps %ymm0, %ymm6, %ymm0 vshufps $0xb1, %ymm21, %ymm21, %ymm6 # ymm6 = ymm21[1,0,3,2,5,4,7,6] vmaxps %ymm6, %ymm21, %ymm6 vshufpd $0x5, %ymm6, %ymm6, %ymm8 # ymm8 = ymm6[1,0,3,2] vmaxps %ymm8, %ymm6, %ymm6 vextractf128 $0x1, %ymm6, %xmm8 vmaxps %xmm8, %xmm6, %xmm6 vshufps $0xb1, %ymm18, %ymm18, %ymm8 # ymm8 = ymm18[1,0,3,2,5,4,7,6] vmaxps %ymm8, %ymm18, %ymm8 vshufpd $0x5, %ymm8, %ymm8, %ymm9 # ymm9 = ymm8[1,0,3,2] vmaxps %ymm9, %ymm8, %ymm8 vextractf128 $0x1, %ymm8, %xmm9 vmaxps %xmm9, %xmm8, %xmm8 vunpcklps %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1] vshufps $0xb1, %ymm14, %ymm14, %ymm8 # ymm8 = ymm14[1,0,3,2,5,4,7,6] vmaxps %ymm8, %ymm14, %ymm8 vshufpd $0x5, %ymm8, %ymm8, %ymm9 # ymm9 = ymm8[1,0,3,2] vmaxps %ymm9, %ymm8, %ymm8 vextractf128 $0x1, %ymm8, %xmm9 vmaxps %xmm9, %xmm8, %xmm8 vinsertps $0x28, %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[0,1],xmm8[0],zero vshufps $0xb1, %ymm5, %ymm5, %ymm8 # ymm8 = ymm5[1,0,3,2,5,4,7,6] vminps %ymm8, %ymm5, %ymm5 vshufpd $0x5, %ymm5, %ymm5, %ymm8 # ymm8 = ymm5[1,0,3,2] vminps %ymm8, %ymm5, %ymm5 vextractf128 $0x1, %ymm5, %xmm8 vminps %xmm8, %xmm5, %xmm5 vshufps $0xb1, %ymm3, %ymm3, %ymm8 # ymm8 = ymm3[1,0,3,2,5,4,7,6] vminps %ymm8, %ymm3, %ymm3 vshufpd $0x5, %ymm3, %ymm3, %ymm8 # ymm8 = ymm3[1,0,3,2] vminps %ymm8, %ymm3, %ymm3 vextractf128 $0x1, %ymm3, %xmm8 vminps %xmm8, %xmm3, %xmm3 vunpcklps %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] vshufps $0xb1, %ymm17, %ymm17, %ymm5 # ymm5 = ymm17[1,0,3,2,5,4,7,6] vminps %ymm5, %ymm17, %ymm5 vshufpd $0x5, %ymm5, %ymm5, %ymm8 # ymm8 = ymm5[1,0,3,2] vminps %ymm8, %ymm5, %ymm5 vextractf128 $0x1, %ymm5, %xmm8 vminps %xmm8, %xmm5, %xmm5 vinsertps $0x28, %xmm5, %xmm3, %xmm3 # xmm3 = xmm3[0,1],xmm5[0],zero vshufps $0xb1, %ymm15, %ymm15, %ymm5 # ymm5 = ymm15[1,0,3,2,5,4,7,6] vminps %ymm5, %ymm15, %ymm5 vshufpd $0x5, %ymm5, %ymm5, %ymm8 # ymm8 = ymm5[1,0,3,2] vminps %ymm8, %ymm5, %ymm5 vextractf128 $0x1, %ymm5, %xmm8 vminps %xmm8, %xmm5, %xmm5 vshufps $0xb1, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2,5,4,7,6] vminps %ymm8, %ymm7, %ymm7 vshufpd $0x5, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2] vminps %ymm8, %ymm7, %ymm7 vextractf128 $0x1, %ymm7, %xmm8 vminps %xmm8, %xmm7, %xmm7 vunpcklps %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] vshufps $0xb1, %ymm4, %ymm4, %ymm7 # ymm7 = ymm4[1,0,3,2,5,4,7,6] vminps %ymm7, %ymm4, %ymm4 vshufpd $0x5, %ymm4, %ymm4, %ymm7 # ymm7 = ymm4[1,0,3,2] vminps %ymm7, %ymm4, %ymm4 vextractf128 $0x1, %ymm4, %xmm7 vminps %xmm7, %xmm4, %xmm4 vinsertps $0x28, %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[0,1],xmm4[0],zero vminps %xmm4, %xmm3, %xmm3 vshufps $0xb1, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[1,0,3,2,5,4,7,6] vmaxps %ymm4, %ymm2, %ymm2 vshufpd $0x5, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[1,0,3,2] vmaxps %ymm4, %ymm2, %ymm2 vextractf128 $0x1, %ymm2, %xmm4 vmaxps %xmm4, %xmm2, %xmm2 vshufps $0xb1, %ymm1, %ymm1, %ymm4 # ymm4 = ymm1[1,0,3,2,5,4,7,6] vmaxps %ymm4, %ymm1, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm4 # ymm4 = ymm1[1,0,3,2] vmaxps %ymm4, %ymm1, %ymm1 vextractf128 $0x1, %ymm1, %xmm4 vmaxps %xmm4, %xmm1, %xmm1 vunpcklps %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] vshufps $0xb1, %ymm0, %ymm0, %ymm2 # ymm2 = ymm0[1,0,3,2,5,4,7,6] vmaxps %ymm2, %ymm0, %ymm0 vshufpd $0x5, %ymm0, %ymm0, %ymm2 # ymm2 = ymm0[1,0,3,2] vmaxps %ymm2, %ymm0, %ymm0 vextractf128 $0x1, %ymm0, %xmm2 vmaxps %xmm2, %xmm0, %xmm0 vinsertps $0x28, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,1],xmm0[0],zero vmaxps %xmm0, %xmm6, %xmm0 vbroadcastss 0x70822(%rip), %xmm1 # 0x1f20ec4 vandps %xmm1, %xmm3, %xmm2 vandps %xmm1, %xmm0, %xmm1 vmaxps %xmm1, %xmm2, %xmm1 vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3] vmaxss %xmm1, %xmm2, %xmm2 vshufpd $0x1, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[1,0] vmaxss %xmm2, %xmm1, %xmm1 vmulss 0x4091d(%rip), %xmm1, %xmm1 # 0x1ef0fe4 vbroadcastss %xmm1, %xmm1 vsubps %xmm1, %xmm3, %xmm2 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm2, (%rax) vmovaps %xmm0, 0x10(%rax) popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq nop
/embree[P]embree/kernels/common/scene_curves.cpp
embree::avx512::LineSegmentsISA::createPrimRefMBArray(embree::vector_t<embree::PrimRefMB, embree::aligned_monitored_allocator<embree::PrimRefMB, 16ul>>&, embree::BBox<float> const&, embree::range<unsigned long> const&, unsigned long, unsigned int) const
PrimInfoMB createPrimRefMBArray(mvector<PrimRefMB>& prims, const BBox1f& t0t1, const range<size_t>& r, size_t k, unsigned int geomID) const { PrimInfoMB pinfo(empty); for (size_t j=r.begin(); j<r.end(); j++) { if (!valid(j, timeSegmentRange(t0t1))) continue; const PrimRefMB prim(linearBounds(j,t0t1),this->numTimeSegments(),this->time_range,this->numTimeSegments(),geomID,unsigned(j)); pinfo.add_primref(prim); prims[k++] = prim; } return pinfo; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdi, %rax vbroadcastss 0x3a606(%rip), %xmm0 # 0x1eeba20 vmovaps %xmm0, (%rdi) vbroadcastss 0x3b75d(%rip), %xmm1 # 0x1eecb84 vmovaps %xmm1, 0x10(%rdi) vmovaps %xmm0, 0x20(%rdi) vmovaps %xmm1, 0x30(%rdi) vmovaps %xmm0, 0x40(%rdi) movq %r9, -0x30(%rsp) movq %rdx, -0x20(%rsp) vmovaps %xmm1, 0x50(%rdi) vxorps %xmm2, %xmm2, %xmm2 vmovups %ymm2, 0x60(%rdi) xorl %edx, %edx movl %edx, 0x80(%rdi) vbroadcastss 0x3b2b0(%rip), %xmm2 # 0x1eec714 vmovlps %xmm2, 0x84(%rdi) movl %edx, 0x8c(%rdi) movq (%r8), %r11 vmovsd 0x88(%rdi), %xmm4 movq 0x70(%rdi), %rdx movq %rdx, -0x38(%rsp) movq 0x78(%rdi), %rdx vmovss 0x80(%rdi), %xmm3 vmovss 0x84(%rdi), %xmm2 cmpq 0x8(%r8), %r11 jae 0x1eb1c27 movq %rdx, -0x48(%rsp) vmovss 0x38(%rsp), %xmm30 movq %rax, -0x28(%rsp) movq 0x68(%rax), %rax movq %rax, -0x40(%rsp) vmovss 0x3f479(%rip), %xmm14 # 0x1ef0940 vxorps %xmm11, %xmm11, %xmm11 vmovss 0x3f470(%rip), %xmm13 # 0x1ef0944 vmovss 0x3b236(%rip), %xmm31 # 0x1eec714 vxorps %xmm15, %xmm15, %xmm15 vbroadcastss 0x6ff0f(%rip), %xmm16 # 0x1f213fc vbroadcastss 0x3fae9(%rip), %xmm17 # 0x1ef0fe0 vmovaps %xmm0, %xmm8 vmovaps %xmm1, %xmm7 vmovaps %xmm0, %xmm6 vmovaps %xmm1, %xmm5 vmovsd 0x2c(%rsi), %xmm9 vmovss (%rcx), %xmm19 vmovss 0x4(%rcx), %xmm20 vsubss %xmm9, %xmm19, %xmm18 vmovshdup %xmm9, %xmm21 # xmm21 = xmm9[1,1,3,3] vsubss %xmm9, %xmm21, %xmm21 vdivss %xmm21, %xmm18, %xmm18 vsubss %xmm9, %xmm20, %xmm9 vdivss %xmm21, %xmm9, %xmm9 vmulss %xmm14, %xmm18, %xmm21 vmulss %xmm13, %xmm9, %xmm9 movq 0x58(%rsi), %r9 movq 0x68(%rsi), %rdx imulq %r11, %rdx movl (%r9,%rdx), %r14d leal 0x1(%r14), %r15d movq 0x150(%rsi), %rdx cmpq %r15, 0x18(%rdx) jbe 0x1eb1c13 movq %r8, %rax vmovss 0x28(%rsi), %xmm18 vmulss %xmm21, %xmm18, %xmm21 vrndscaless $0x9, %xmm21, %xmm21, %xmm21 vmaxss %xmm21, %xmm11, %xmm21 vcvttss2si %xmm21, %edi vmulss %xmm9, %xmm18, %xmm9 vroundss $0xa, %xmm9, %xmm9, %xmm9 vminss %xmm18, %xmm9, %xmm9 vcvttss2si %xmm9, %r10d cmpl %r10d, %edi seta %bpl ja 0x1eb1678 movslq %edi, %r13 imulq $0x38, %r13, %r8 movq (%rdx,%r8), %rbx movq 0x10(%rdx,%r8), %r12 movq %r12, %rdi imulq %r14, %rdi vmovaps (%rbx,%rdi), %xmm9 vcmpnleps %xmm16, %xmm9, %k1 vcmpltps %xmm17, %xmm9, %k0 {%k1} kmovd %k0, %edi cmpb $0xf, %dil jne 0x1eb166e movslq %r10d, %r10 addq %r8, %rdx addq $0x48, %rdx imulq %r15, %r12 vmovaps (%rbx,%r12), %xmm21 vcmpnleps %xmm16, %xmm21, %k1 vcmpltps %xmm17, %xmm21, %k0 {%k1} kmovd %k0, %edi cmpb $0xf, %dil jne 0x1eb166e vshufps $0xff, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[3,3,3,3] vshufps $0xff, %xmm21, %xmm21, %xmm21 # xmm21 = xmm21[3,3,3,3] vminss %xmm21, %xmm9, %xmm9 vucomiss %xmm9, %xmm11 ja 0x1eb166e incq %r13 cmpq %r10, %r13 seta %bpl ja 0x1eb1678 movq -0x10(%rdx), %rbx movq (%rdx), %r12 addq $0x38, %rdx movq %r12, %rdi imulq %r14, %rdi vmovaps (%rbx,%rdi), %xmm9 vcmpnleps %xmm16, %xmm9, %k1 vcmpltps %xmm17, %xmm9, %k0 {%k1} kmovd %k0, %edi cmpb $0xf, %dil je 0x1eb15f6 testb $0x1, %bpl je 0x1eb1c22 vmovss 0x2c(%rsi), %xmm9 vmovss 0x30(%rsi), %xmm21 vsubss %xmm9, %xmm19, %xmm19 vsubss %xmm9, %xmm21, %xmm21 vdivss %xmm21, %xmm19, %xmm19 vsubss %xmm9, %xmm20, %xmm9 vdivss %xmm21, %xmm9, %xmm25 vmulss %xmm19, %xmm18, %xmm28 vmulss %xmm25, %xmm18, %xmm26 vrndscaless $0x9, %xmm28, %xmm28, %xmm9 vrndscaless $0xa, %xmm26, %xmm26, %xmm20 vmaxss %xmm11, %xmm9, %xmm29 vminss %xmm18, %xmm20, %xmm27 vcvttss2si %xmm29, %edx vcvttss2si %xmm27, %edi vcvttss2si %xmm9, %r14d testl %r14d, %r14d movl $0xffffffff, %r8d # imm = 0xFFFFFFFF cmovsl %r8d, %r14d vcvttss2si %xmm20, %r8d vcvttss2si %xmm18, %r15d incl %r15d cmpl %r15d, %r8d cmovll %r8d, %r15d movslq %edx, %rdx movq 0x68(%rsi), %r8 imulq %r11, %r8 movl (%r9,%r8), %r12d movq 0x150(%rsi), %rbp imulq $0x38, %rdx, %rdx movq (%rbp,%rdx), %r8 movq 0x10(%rbp,%rdx), %r9 movq %r9, %r10 imulq %r12, %r10 vmovaps (%r8,%r10), %xmm9 leal 0x1(%r12), %r13d imulq %r13, %r9 vmovaps (%r8,%r9), %xmm20 vminps %xmm20, %xmm9, %xmm21 vmaxps %xmm20, %xmm9, %xmm9 vmovss 0x19c(%rsi), %xmm24 vmovss 0xc(%r8,%r9), %xmm20 vmaxss 0xc(%r8,%r10), %xmm20, %xmm20 vmulss %xmm20, %xmm24, %xmm20 vbroadcastss %xmm20, %xmm20 vsubps %xmm20, %xmm21, %xmm23 vaddps %xmm20, %xmm9, %xmm20 movslq %edi, %rdi imulq $0x38, %rdi, %rdi movq (%rbp,%rdi), %r8 movq 0x10(%rbp,%rdi), %r9 movq %r9, %r10 imulq %r12, %r10 vmovaps (%r8,%r10), %xmm9 imulq %r13, %r9 vmovaps (%r8,%r9), %xmm21 vminps %xmm21, %xmm9, %xmm22 vmaxps %xmm21, %xmm9, %xmm9 vmovss 0xc(%r8,%r9), %xmm21 vmaxss 0xc(%r8,%r10), %xmm21, %xmm21 vmulss %xmm21, %xmm24, %xmm21 vbroadcastss %xmm21, %xmm21 vsubps %xmm21, %xmm22, %xmm22 vaddps %xmm21, %xmm9, %xmm21 movl %r15d, %r8d subl %r14d, %r8d cmpl $0x1, %r8d jne 0x1eb185f vsubss %xmm29, %xmm28, %xmm9 vmaxss %xmm11, %xmm9, %xmm9 vsubss %xmm9, %xmm31, %xmm18 vbroadcastss %xmm9, %xmm9 vmulps %xmm22, %xmm9, %xmm19 vbroadcastss %xmm18, %xmm18 vfmadd231ps %xmm23, %xmm18, %xmm19 # xmm19 = (xmm18 * xmm23) + xmm19 vmulps %xmm21, %xmm9, %xmm9 vfmadd231ps %xmm18, %xmm20, %xmm9 # xmm9 = (xmm20 * xmm18) + xmm9 vsubss %xmm26, %xmm27, %xmm18 vmaxss %xmm11, %xmm18, %xmm18 vsubss %xmm18, %xmm31, %xmm24 vbroadcastss %xmm18, %xmm18 vmulps %xmm23, %xmm18, %xmm23 vbroadcastss %xmm24, %xmm24 vfmadd213ps %xmm23, %xmm24, %xmm22 # xmm22 = (xmm24 * xmm22) + xmm23 vmulps %xmm20, %xmm18, %xmm18 vfmadd213ps %xmm18, %xmm24, %xmm21 # xmm21 = (xmm24 * xmm21) + xmm18 vmovaps %xmm9, %xmm20 vmovaps %xmm19, %xmm23 movq %rax, %r8 jmp 0x1eb1b0a vmovaps %xmm1, -0x18(%rsp) vmovaps %xmm0, %xmm1 vmovaps %xmm8, %xmm0 vmovaps %xmm7, %xmm8 vmovaps %xmm6, %xmm7 vmovaps %xmm5, %xmm6 vmovaps %xmm3, %xmm5 vmovaps %xmm2, %xmm3 vmovaps %xmm14, %xmm2 vmovaps %xmm30, %xmm14 movq 0x38(%rbp,%rdx), %r8 movq 0x48(%rbp,%rdx), %rdx movq %rdx, %r9 imulq %r12, %r9 vmovaps (%r8,%r9), %xmm9 imulq %r13, %rdx vmovaps %xmm31, %xmm13 vmovaps (%r8,%rdx), %xmm31 vminps %xmm31, %xmm9, %xmm30 vmaxps %xmm31, %xmm9, %xmm9 vmovss 0xc(%r8,%rdx), %xmm31 vmaxss 0xc(%r8,%r9), %xmm31, %xmm31 vmulss %xmm31, %xmm24, %xmm31 vbroadcastss %xmm31, %xmm31 vsubps %xmm31, %xmm30, %xmm30 vaddps %xmm31, %xmm9, %xmm9 movq -0x38(%rbp,%rdi), %rdx movq -0x28(%rbp,%rdi), %rdi movq %rdi, %r8 imulq %r12, %r8 vmovaps (%rdx,%r8), %xmm31 imulq %r13, %rdi vmovaps (%rdx,%rdi), %xmm10 vminps %xmm10, %xmm31, %xmm12 vmaxps %xmm10, %xmm31, %xmm10 vmovss 0xc(%rdx,%rdi), %xmm31 vmaxss 0xc(%rdx,%r8), %xmm31, %xmm31 vmulss %xmm31, %xmm24, %xmm31 vbroadcastss %xmm31, %xmm31 vsubps %xmm31, %xmm12, %xmm12 vaddps %xmm31, %xmm10, %xmm10 vmovaps %xmm13, %xmm31 vsubss %xmm29, %xmm28, %xmm28 vmaxss %xmm11, %xmm28, %xmm28 vsubss %xmm28, %xmm13, %xmm29 vbroadcastss %xmm28, %xmm28 vmulps %xmm30, %xmm28, %xmm30 vbroadcastss %xmm29, %xmm29 vfmadd213ps %xmm30, %xmm29, %xmm23 # xmm23 = (xmm29 * xmm23) + xmm30 vmulps %xmm9, %xmm28, %xmm9 vfmadd213ps %xmm9, %xmm29, %xmm20 # xmm20 = (xmm29 * xmm20) + xmm9 vsubss %xmm26, %xmm27, %xmm9 vmaxss %xmm11, %xmm9, %xmm9 vsubss %xmm9, %xmm13, %xmm26 vbroadcastss %xmm9, %xmm9 vmulps %xmm12, %xmm9, %xmm12 vbroadcastss %xmm26, %xmm26 vfmadd213ps %xmm12, %xmm26, %xmm22 # xmm22 = (xmm26 * xmm22) + xmm12 vmulps %xmm10, %xmm9, %xmm9 vfmadd213ps %xmm9, %xmm26, %xmm21 # xmm21 = (xmm26 * xmm21) + xmm9 incl %r14d cmpl %r15d, %r14d jge 0x1eb1ad3 vsubss %xmm19, %xmm25, %xmm25 movl %r14d, %edx imulq $0x38, %rdx, %rdx addq %rdx, %rbp addq $0x10, %rbp movq %rax, %r8 vmovaps %xmm14, %xmm30 vmovaps %xmm2, %xmm14 vmovaps %xmm3, %xmm2 vmovaps %xmm5, %xmm3 vmovaps %xmm6, %xmm5 vmovaps %xmm7, %xmm6 vmovaps %xmm8, %xmm7 vmovaps %xmm0, %xmm8 vmovaps %xmm1, %xmm0 vmovaps -0x18(%rsp), %xmm1 vmovss 0x3ef43(%rip), %xmm13 # 0x1ef0944 vcvtsi2ss %r14d, %xmm4, %xmm9 vdivss %xmm18, %xmm9, %xmm9 vsubss %xmm19, %xmm9, %xmm9 vdivss %xmm25, %xmm9, %xmm9 vsubss %xmm9, %xmm31, %xmm10 vbroadcastss %xmm9, %xmm9 vmulps %xmm9, %xmm22, %xmm12 vbroadcastss %xmm10, %xmm10 vfmadd231ps %xmm23, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm23) + xmm12 vmulps %xmm9, %xmm21, %xmm9 vfmadd231ps %xmm10, %xmm20, %xmm9 # xmm9 = (xmm20 * xmm10) + xmm9 movq -0x10(%rbp), %rdx movq (%rbp), %rdi movq %rdi, %r9 imulq %r12, %r9 vmovaps (%rdx,%r9), %xmm10 imulq %r13, %rdi vmovaps (%rdx,%rdi), %xmm26 vminps %xmm26, %xmm10, %xmm27 vmovss 0xc(%rdx,%rdi), %xmm28 vmaxps %xmm26, %xmm10, %xmm10 vmaxss 0xc(%rdx,%r9), %xmm28, %xmm26 vmulss %xmm26, %xmm24, %xmm26 vbroadcastss %xmm26, %xmm26 vsubps %xmm26, %xmm27, %xmm27 vsubps %xmm12, %xmm27, %xmm12 vaddps %xmm26, %xmm10, %xmm10 vsubps %xmm9, %xmm10, %xmm9 vminps %xmm15, %xmm12, %xmm10 vmaxps %xmm15, %xmm9, %xmm9 vaddps %xmm10, %xmm23, %xmm23 vaddps %xmm10, %xmm22, %xmm22 vaddps %xmm9, %xmm20, %xmm20 vaddps %xmm9, %xmm21, %xmm21 incl %r14d addq $0x38, %rbp cmpl %r14d, %r15d jne 0x1eb1a01 jmp 0x1eb1b0a movq %rax, %r8 vmovaps %xmm14, %xmm30 vmovaps %xmm2, %xmm14 vmovaps %xmm3, %xmm2 vmovaps %xmm5, %xmm3 vmovaps %xmm6, %xmm5 vmovaps %xmm7, %xmm6 vmovaps %xmm8, %xmm7 vmovaps %xmm0, %xmm8 vmovaps %xmm1, %xmm0 vmovaps -0x18(%rsp), %xmm1 vmovss 0x3ee3a(%rip), %xmm13 # 0x1ef0944 vinsertps $0x30, %xmm30, %xmm23, %xmm10 # xmm10 = xmm23[0,1,2],xmm30[0] movl 0x24(%rsi), %edx decl %edx vmovsd 0x2c(%rsi), %xmm9 vmovd %r11d, %xmm12 vinsertps $0x30, %xmm12, %xmm20, %xmm12 # xmm12 = xmm20[0,1,2],xmm12[0] vmovd %edx, %xmm18 vinsertps $0x30, %xmm18, %xmm22, %xmm19 # xmm19 = xmm22[0,1,2],xmm18[0] vinsertps $0x30, %xmm18, %xmm21, %xmm18 # xmm18 = xmm21[0,1,2],xmm18[0] vbroadcastss 0x3b03b(%rip), %xmm22 # 0x1eecb80 vmulps %xmm22, %xmm19, %xmm20 vfmadd231ps %xmm22, %xmm10, %xmm20 # xmm20 = (xmm10 * xmm22) + xmm20 vmulps %xmm22, %xmm18, %xmm21 vfmadd231ps %xmm22, %xmm12, %xmm21 # xmm21 = (xmm12 * xmm22) + xmm21 vaddps %xmm21, %xmm20, %xmm20 vminps %xmm10, %xmm0, %xmm0 vmaxps %xmm12, %xmm1, %xmm1 vminps %xmm19, %xmm8, %xmm8 vmaxps %xmm18, %xmm7, %xmm7 vminps %xmm20, %xmm6, %xmm6 vmaxps %xmm20, %xmm5, %xmm5 vcmpltps %xmm9, %xmm4, %k1 vinsertps $0x50, %xmm9, %xmm4, %xmm20 # xmm20 = xmm4[0],xmm9[1],xmm4[2,3] vblendps $0x2, %xmm4, %xmm9, %xmm4 # xmm4 = xmm9[0],xmm4[1],xmm9[2,3] vmovaps %xmm20, %xmm4 {%k1} incq -0x40(%rsp) addq %rdx, -0x38(%rsp) movq -0x48(%rsp), %rax cmpq %rdx, %rax setb %dil vmovshdup %xmm9, %xmm20 # xmm20 = xmm9[1,1,3,3] kmovd %edi, %k1 vmovss %xmm20, %xmm2, %xmm2 {%k1} vmovss %xmm9, %xmm3, %xmm3 {%k1} cmovbeq %rdx, %rax movq %rax, -0x48(%rsp) movq -0x20(%rsp), %rax movq 0x20(%rax), %rdx movq -0x30(%rsp), %rax leaq (%rax,%rax,4), %rdi incq %rax movq %rax, -0x30(%rsp) shlq $0x4, %rdi vmovaps %xmm10, (%rdx,%rdi) vmovaps %xmm12, 0x10(%rdx,%rdi) vmovaps %xmm19, 0x20(%rdx,%rdi) vmovaps %xmm18, 0x30(%rdx,%rdi) vmovsd %xmm9, 0x40(%rdx,%rdi) incq %r11 cmpq 0x8(%r8), %r11 jb 0x1eb1507 jmp 0x1eb1c39 movq %rax, %r8 jmp 0x1eb1c13 vmovaps %xmm1, %xmm5 vmovaps %xmm0, %xmm6 vmovaps %xmm1, %xmm7 vmovaps %xmm0, %xmm8 jmp 0x1eb1c4c movq -0x28(%rsp), %rax movq -0x40(%rsp), %rcx movq %rcx, 0x68(%rax) movq -0x48(%rsp), %rdx vmovaps %xmm0, (%rax) vmovaps %xmm1, 0x10(%rax) vmovaps %xmm8, 0x20(%rax) vmovaps %xmm7, 0x30(%rax) vmovaps %xmm6, 0x40(%rax) vmovaps %xmm5, 0x50(%rax) vmovlps %xmm4, 0x88(%rax) movq -0x38(%rsp), %rcx movq %rcx, 0x70(%rax) movq %rdx, 0x78(%rax) vmovss %xmm3, 0x80(%rax) vmovss %xmm2, 0x84(%rax) popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::computeAlignedSpace(unsigned long) const
LinearSpace3fa computeAlignedSpace(const size_t primID) const { const Vec3fa dir = normalize(computeDirection(primID)); if (is_finite(dir)) return frame(dir); else return LinearSpace3fa(one); }
pushq %r14 pushq %rbx subq $0x18, %rsp movq %rdi, %rbx movq (%rsi), %rax movq %rsp, %r14 movq %r14, %rdi callq *0x1b8(%rax) vmovaps (%r14), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm2 vxorps %xmm1, %xmm1, %xmm1 vmovss %xmm2, %xmm1, %xmm3 # xmm3 = xmm2[0],xmm1[1,2,3] vrsqrt14ss %xmm3, %xmm1, %xmm3 vmulss 0x3aa42(%rip), %xmm3, %xmm4 # 0x1eec718 vmulss 0x3aa3e(%rip), %xmm2, %xmm2 # 0x1eec71c vmulss %xmm3, %xmm2, %xmm2 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm2, %xmm2 vaddss %xmm2, %xmm4, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm0, %xmm0 vcmpnltps 0x6fdde(%rip){1to4}, %xmm0, %k1 # 0x1f21ae0 vcmpleps 0x6fdd7(%rip){1to4}, %xmm0, %k0 {%k1} # 0x1f21ae4 knotw %k0, %k0 kmovd %k0, %eax testb $0x7, %al jne 0x1eb1e0a vshufpd $0x1, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,0] vmovshdup %xmm0, %xmm3 # xmm3 = xmm0[1,1,3,3] vbroadcastss 0x6f191(%rip), %xmm4 # 0x1f20ec0 vxorps %xmm4, %xmm3, %xmm3 vunpckhps %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] vmovss %xmm3, %xmm1, %xmm3 # xmm3 = xmm3[0],xmm1[1,2,3] vshufps $0x41, %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[1,0],xmm3[0,1] vdpps $0x7f, %xmm3, %xmm3, %xmm5 vxorpd %xmm4, %xmm2, %xmm2 vinsertps $0x2a, %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0],zero,xmm0[0],zero vdpps $0x7f, %xmm2, %xmm2, %xmm4 vcmpltps %xmm5, %xmm4, %k0 vpmovm2d %k0, %xmm4 vpbroadcastd %xmm4, %xmm4 vpmovd2m %xmm4, %k1 vpcmpeqd %xmm4, %xmm4, %xmm4 vmovaps %xmm4, %xmm4 {%k1} {z} vblendvps %xmm4, %xmm3, %xmm2, %xmm2 vdpps $0x7f, %xmm2, %xmm2, %xmm3 vmovss %xmm3, %xmm1, %xmm4 # xmm4 = xmm3[0],xmm1[1,2,3] vrsqrt14ss %xmm4, %xmm1, %xmm4 vmovss 0x3a982(%rip), %xmm5 # 0x1eec718 vmulss %xmm5, %xmm4, %xmm6 vmovss 0x3adde(%rip), %xmm7 # 0x1eecb80 vmulss %xmm7, %xmm3, %xmm3 vmulss %xmm4, %xmm3, %xmm3 vmulss %xmm4, %xmm4, %xmm4 vmulss %xmm4, %xmm3, %xmm3 vsubss %xmm3, %xmm6, %xmm3 vbroadcastss %xmm3, %xmm3 vmulps %xmm3, %xmm2, %xmm2 vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3] vshufps $0xc9, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,2,0,3] vmulps %xmm4, %xmm2, %xmm4 vfmsub231ps %xmm3, %xmm0, %xmm4 # xmm4 = (xmm0 * xmm3) - xmm4 vshufps $0xc9, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[1,2,0,3] vdpps $0x7f, %xmm3, %xmm3, %xmm4 vmovss %xmm4, %xmm1, %xmm6 # xmm6 = xmm4[0],xmm1[1,2,3] vrsqrt14ss %xmm6, %xmm1, %xmm1 vmulss %xmm5, %xmm1, %xmm5 vmulss %xmm7, %xmm4, %xmm4 vmulss %xmm1, %xmm4, %xmm4 vmulss %xmm1, %xmm1, %xmm1 vmulss %xmm1, %xmm4, %xmm1 vsubss %xmm1, %xmm5, %xmm1 vbroadcastss %xmm1, %xmm1 vmulps %xmm3, %xmm1, %xmm1 jmp 0x1eb1e22 vmovaps 0x3a8ee(%rip), %xmm0 # 0x1eec700 vmovsd 0x3a8d6(%rip), %xmm1 # 0x1eec6f0 vmovss 0x3a8f2(%rip), %xmm2 # 0x1eec714 vmovaps %xmm2, (%rbx) vmovaps %xmm1, 0x10(%rbx) vmovaps %xmm0, 0x20(%rbx) movq %rbx, %rax addq $0x18, %rsp popq %rbx popq %r14 retq nop
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::computeAlignedSpaceMB(unsigned long, embree::BBox<float>) const
LinearSpace3fa computeAlignedSpaceMB(const size_t primID, const BBox1f time_range) const { Vec3fa axisz(0,0,1); Vec3fa axisy(0,1,0); const range<int> tbounds = this->timeSegmentRange(time_range); if (tbounds.size() == 0) return frame(axisz); const size_t itime = (tbounds.begin()+tbounds.end())/2; const Vec3fa dir = normalize(computeDirection(primID,itime)); if (is_finite(dir)) return frame(dir); else return LinearSpace3fa(one); }
pushq %r14 pushq %rbx subq $0x18, %rsp movq %rdi, %rbx vmovsd 0x2c(%rsi), %xmm1 vmovss 0x28(%rsi), %xmm2 vsubss %xmm1, %xmm0, %xmm3 vmovshdup %xmm1, %xmm4 # xmm4 = xmm1[1,1,3,3] vsubss %xmm1, %xmm4, %xmm4 vdivss %xmm4, %xmm3, %xmm3 vmovshdup %xmm0, %xmm0 # xmm0 = xmm0[1,1,3,3] vsubss %xmm1, %xmm0, %xmm0 vdivss %xmm4, %xmm0, %xmm0 vmulss 0x3eacc(%rip), %xmm3, %xmm1 # 0x1ef0940 vmulss %xmm1, %xmm2, %xmm1 vroundss $0x9, %xmm1, %xmm1, %xmm1 vxorps %xmm3, %xmm3, %xmm3 vmaxss %xmm1, %xmm3, %xmm1 vcvttss2si %xmm1, %eax vmulss 0x3eab2(%rip), %xmm0, %xmm0 # 0x1ef0944 vmulss %xmm0, %xmm2, %xmm0 vroundss $0xa, %xmm0, %xmm0, %xmm0 vminss %xmm2, %xmm0, %xmm0 vcvttss2si %xmm0, %ecx cmpl %eax, %ecx jne 0x1eb1f4c vmovaps 0x6f62c(%rip), %xmm0 # 0x1f214e0 vdpps $0x7f, %xmm0, %xmm0, %xmm1 vmovss 0x3eb0a(%rip), %xmm2 # 0x1ef09cc vdpps $0x7f, %xmm2, %xmm2, %xmm3 vcmpltps %xmm1, %xmm3, %k0 vpmovm2d %k0, %xmm1 vpbroadcastd %xmm1, %xmm1 vpmovd2m %xmm1, %k1 vpcmpeqd %xmm1, %xmm1, %xmm1 vmovaps %xmm1, %xmm1 {%k1} {z} vxorps %xmm3, %xmm3, %xmm3 vblendvps %xmm1, %xmm0, %xmm2, %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm1 vmovss %xmm1, %xmm3, %xmm2 # xmm2 = xmm1[0],xmm3[1,2,3] vrsqrt14ss %xmm2, %xmm3, %xmm2 vmulss 0x3a80c(%rip), %xmm2, %xmm3 # 0x1eec718 vmulss 0x3a808(%rip), %xmm1, %xmm1 # 0x1eec71c vmulss %xmm2, %xmm1, %xmm1 vmulss %xmm2, %xmm2, %xmm2 vmulss %xmm2, %xmm1, %xmm1 vaddss %xmm1, %xmm3, %xmm1 vbroadcastss %xmm1, %xmm1 vmulps %xmm1, %xmm0, %xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,2,0,3] vmulps 0x3f036(%rip), %xmm1, %xmm2 # 0x1ef0f70 vmovaps 0x3a7be(%rip), %xmm0 # 0x1eec700 vfmadd231ps %xmm3, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm3) + xmm2 jmp 0x1eb207e addl %ecx, %eax movl %eax, %ecx shrl $0x1f, %ecx addl %eax, %ecx sarl %ecx movslq %ecx, %rcx movq (%rsi), %rax movq %rsp, %r14 movq %r14, %rdi callq *0x1c0(%rax) vmovaps (%r14), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm2 vxorps %xmm1, %xmm1, %xmm1 vmovss %xmm2, %xmm1, %xmm3 # xmm3 = xmm2[0],xmm1[1,2,3] vrsqrt14ss %xmm3, %xmm1, %xmm3 vmulss 0x3a78e(%rip), %xmm3, %xmm4 # 0x1eec718 vmulss 0x3a78a(%rip), %xmm2, %xmm2 # 0x1eec71c vmulss %xmm3, %xmm2, %xmm2 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm2, %xmm2 vaddss %xmm2, %xmm4, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm0, %xmm0 vcmpnltps 0x6fb2a(%rip){1to4}, %xmm0, %k1 # 0x1f21ae0 vcmpleps 0x6fb23(%rip){1to4}, %xmm0, %k0 {%k1} # 0x1f21ae4 knotw %k0, %k0 kmovd %k0, %eax testb $0x7, %al jne 0x1eb20c2 vshufpd $0x1, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,0] vmovshdup %xmm0, %xmm3 # xmm3 = xmm0[1,1,3,3] vbroadcastss 0x6eedd(%rip), %xmm4 # 0x1f20ec0 vxorps %xmm4, %xmm3, %xmm3 vunpckhps %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] vmovss %xmm3, %xmm1, %xmm3 # xmm3 = xmm3[0],xmm1[1,2,3] vshufps $0x41, %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[1,0],xmm3[0,1] vdpps $0x7f, %xmm3, %xmm3, %xmm5 vxorpd %xmm4, %xmm2, %xmm2 vinsertps $0x2a, %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0],zero,xmm0[0],zero vdpps $0x7f, %xmm2, %xmm2, %xmm4 vcmpltps %xmm5, %xmm4, %k0 vpmovm2d %k0, %xmm4 vpbroadcastd %xmm4, %xmm4 vpmovd2m %xmm4, %k1 vpcmpeqd %xmm4, %xmm4, %xmm4 vmovaps %xmm4, %xmm4 {%k1} {z} vblendvps %xmm4, %xmm3, %xmm2, %xmm2 vdpps $0x7f, %xmm2, %xmm2, %xmm3 vmovss %xmm3, %xmm1, %xmm4 # xmm4 = xmm3[0],xmm1[1,2,3] vrsqrt14ss %xmm4, %xmm1, %xmm1 vmulss 0x3a6ce(%rip), %xmm1, %xmm4 # 0x1eec718 vmulss 0x3a6ca(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm1, %xmm3, %xmm3 vmulss %xmm1, %xmm1, %xmm1 vmulss %xmm1, %xmm3, %xmm1 vaddss %xmm1, %xmm4, %xmm1 vbroadcastss %xmm1, %xmm1 vmulps %xmm1, %xmm2, %xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,2,0,3] vshufps $0xc9, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,2,0,3] vmulps %xmm2, %xmm1, %xmm2 vfmsub231ps %xmm3, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm3) - xmm2 vshufps $0xc9, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,2,0,3] vdpps $0x7f, %xmm2, %xmm2, %xmm3 vxorps %xmm4, %xmm4, %xmm4 vmovss %xmm3, %xmm4, %xmm5 # xmm5 = xmm3[0],xmm4[1,2,3] vrsqrt14ss %xmm5, %xmm4, %xmm4 vmulss 0x3a679(%rip), %xmm4, %xmm5 # 0x1eec718 vmulss 0x3a675(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm4, %xmm3, %xmm3 vmulss %xmm4, %xmm4, %xmm4 vmulss %xmm4, %xmm3, %xmm3 vaddss %xmm3, %xmm5, %xmm3 vbroadcastss %xmm3, %xmm3 vmulps %xmm2, %xmm3, %xmm2 jmp 0x1eb20da vmovaps 0x3a636(%rip), %xmm0 # 0x1eec700 vmovsd 0x3a61e(%rip), %xmm2 # 0x1eec6f0 vmovss 0x3a63a(%rip), %xmm1 # 0x1eec714 vmovaps %xmm1, (%rbx) vmovaps %xmm2, 0x10(%rbx) vmovaps %xmm0, 0x20(%rbx) movq %rbx, %rax addq $0x18, %rsp popq %rbx popq %r14 retq nop
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::computeDirection(unsigned int, unsigned long) const
Vec3fa computeDirection(unsigned int primID, size_t time) const { const unsigned vtxID = segment(primID); const Vec3fa v0 = vertex(vtxID+0,time); const Vec3fa v1 = vertex(vtxID+1,time); return v1-v0; }
movq %rdi, %rax movl %edx, %edx imulq 0x68(%rsi), %rdx movq 0x58(%rsi), %rdi movq 0x150(%rsi), %rsi movl (%rdi,%rdx), %edx imulq $0x38, %rcx, %rcx movq (%rsi,%rcx), %rdi movq 0x10(%rsi,%rcx), %rcx leal 0x1(%rdx), %esi imulq %rcx, %rdx imulq %rcx, %rsi vmovaps (%rdi,%rsi), %xmm0 vsubps (%rdi,%rdx), %xmm0, %xmm0 vmovaps %xmm0, (%rax) retq nop
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::vbounds(unsigned long) const
BBox3fa vbounds(size_t i) const { return bounds(i); }
movq %rdi, %rax imulq 0x68(%rsi), %rdx movq 0x58(%rsi), %rcx movq 0x90(%rsi), %rdi movl (%rcx,%rdx), %ecx movq 0xa0(%rsi), %rdx leal 0x1(%rcx), %r8d imulq %rdx, %rcx vmovaps (%rdi,%rcx), %xmm0 imulq %rdx, %r8 vmovaps (%rdi,%r8), %xmm1 vminps %xmm1, %xmm0, %xmm2 vmovss 0xc(%rdi,%r8), %xmm3 vmaxps %xmm1, %xmm0, %xmm0 vmaxss 0xc(%rdi,%rcx), %xmm3, %xmm1 vmulss 0x19c(%rsi), %xmm1, %xmm1 vbroadcastss %xmm1, %xmm1 vsubps %xmm1, %xmm2, %xmm2 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm2, (%rax) vmovaps %xmm0, 0x10(%rax) retq
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::vbounds(embree::LinearSpace3<embree::Vec3fa> const&, unsigned long) const
BBox3fa vbounds(const LinearSpace3fa& space, size_t i) const { return bounds(space,i); }
movq %rdi, %rax imulq 0x68(%rsi), %rcx movq 0x58(%rsi), %rdi movq 0x90(%rsi), %r8 movl (%rdi,%rcx), %ecx movq 0xa0(%rsi), %rdi leal 0x1(%rcx), %r9d imulq %rdi, %rcx imulq %rdi, %r9 vmovaps (%rdx), %xmm0 vmovaps 0x10(%rdx), %xmm1 vmovaps 0x20(%rdx), %xmm2 vmulps 0x8(%r8,%rcx){1to4}, %xmm2, %xmm3 vfmadd231ps 0x4(%r8,%rcx){1to4}, %xmm1, %xmm3 # xmm3 = (xmm1 * mem) + xmm3 vfmadd231ps (%r8,%rcx){1to4}, %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3 vbroadcastss 0xc(%r8,%rcx), %xmm4 vmulps 0x8(%r8,%r9){1to4}, %xmm2, %xmm2 vfmadd231ps 0x4(%r8,%r9){1to4}, %xmm1, %xmm2 # xmm2 = (xmm1 * mem) + xmm2 vblendps $0x8, %xmm4, %xmm3, %xmm1 # xmm1 = xmm3[0,1,2],xmm4[3] vfmadd231ps (%r8,%r9){1to4}, %xmm0, %xmm2 # xmm2 = (xmm0 * mem) + xmm2 vbroadcastss 0xc(%r8,%r9), %xmm0 vblendps $0x8, %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm0[3] vminps %xmm2, %xmm1, %xmm3 vmaxps %xmm2, %xmm1, %xmm1 vmaxss %xmm4, %xmm0, %xmm0 vmulss 0x19c(%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %xmm0 vsubps %xmm0, %xmm3, %xmm2 vaddps %xmm0, %xmm1, %xmm0 vmovaps %xmm2, (%rax) vmovaps %xmm0, 0x10(%rax) retq
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::vbounds(embree::Vec3fa const&, float, float, embree::LinearSpace3<embree::Vec3fa> const&, unsigned long, unsigned long) const
BBox3fa vbounds(const Vec3fa& ofs, const float scale, const float r_scale0, const LinearSpace3fa& space, size_t i, size_t itime = 0) const { return bounds(ofs,scale,r_scale0,space,i,itime); }
movq %rdi, %rax imulq 0x68(%rsi), %r8 vmulss %xmm1, %xmm0, %xmm1 movq 0x58(%rsi), %rdi movq 0x150(%rsi), %r10 movl (%rdi,%r8), %r11d imulq $0x38, %r9, %r8 movq (%r10,%r8), %rdi movq 0x10(%r10,%r8), %r9 leal 0x1(%r11), %r8d imulq %r9, %r11 vmovaps (%rdi,%r11), %xmm2 imulq %r9, %r8 vmovaps (%rdi,%r8), %xmm3 vmovaps (%rdx), %xmm4 vsubps %xmm4, %xmm2, %xmm2 vbroadcastss %xmm0, %xmm0 vmulps %xmm2, %xmm0, %xmm2 vbroadcastss %xmm2, %xmm5 vshufps $0x55, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,1,1,1] vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2] vmovaps (%rcx), %xmm7 vmovaps 0x10(%rcx), %xmm8 vmovaps 0x20(%rcx), %xmm9 vmulps %xmm2, %xmm9, %xmm2 vfmadd231ps %xmm6, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm6) + xmm2 vfmadd231ps %xmm5, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm5) + xmm2 vmovss 0x19c(%rsi), %xmm5 vmulss 0xc(%rdi,%r11), %xmm5, %xmm6 vmulss %xmm6, %xmm1, %xmm6 vinsertps $0x30, %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm6[0] vsubps %xmm4, %xmm3, %xmm3 vmulps %xmm3, %xmm0, %xmm0 vbroadcastss %xmm0, %xmm3 vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1] vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2] vmulps %xmm0, %xmm9, %xmm0 vfmadd231ps %xmm4, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm4) + xmm0 vfmadd231ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm3) + xmm0 vmulss 0xc(%rdi,%r8), %xmm5, %xmm3 vmulss %xmm3, %xmm1, %xmm1 vinsertps $0x30, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0] vminps %xmm0, %xmm2, %xmm3 vmaxps %xmm0, %xmm2, %xmm0 vmaxss %xmm6, %xmm1, %xmm1 vmulss %xmm1, %xmm5, %xmm1 vbroadcastss %xmm1, %xmm1 vsubps %xmm1, %xmm3, %xmm2 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm2, (%rax) vmovaps %xmm0, 0x10(%rax) retq
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::vlinearBounds(unsigned long, embree::BBox<float> const&) const
LBBox3fa vlinearBounds(size_t primID, const BBox1f& time_range) const { return linearBounds(primID,time_range); }
pushq %r15 pushq %r14 pushq %rbx movq %rdi, %rax vmovss 0x28(%rsi), %xmm0 vmovss 0x2c(%rsi), %xmm2 vmovss (%rcx), %xmm1 vmovss 0x4(%rcx), %xmm3 vsubss %xmm2, %xmm1, %xmm1 vmovss 0x30(%rsi), %xmm4 vsubss %xmm2, %xmm4, %xmm4 vdivss %xmm4, %xmm1, %xmm1 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm4, %xmm2, %xmm7 vmulss %xmm1, %xmm0, %xmm11 vroundss $0x9, %xmm11, %xmm11, %xmm2 vmulss %xmm7, %xmm0, %xmm8 vroundss $0xa, %xmm8, %xmm8, %xmm3 vxorps %xmm9, %xmm9, %xmm9 vmaxss %xmm9, %xmm2, %xmm12 vminss %xmm0, %xmm3, %xmm10 vcvttss2si %xmm12, %r8d vcvttss2si %xmm10, %r11d vcvttss2si %xmm2, %r9d testl %r9d, %r9d movl $0xffffffff, %ecx # imm = 0xFFFFFFFF vcvttss2si %xmm3, %r10d vcvttss2si %xmm0, %edi cmovnsl %r9d, %ecx incl %edi cmpl %edi, %r10d cmovll %r10d, %edi movslq %r8d, %r8 movq 0x58(%rsi), %r9 imulq 0x68(%rsi), %rdx movl (%r9,%rdx), %edx movq 0x150(%rsi), %r9 imulq $0x38, %r8, %r10 movq (%r9,%r10), %rbx movq 0x10(%r9,%r10), %r14 movq %r14, %r15 imulq %rdx, %r15 vmovaps (%rbx,%r15), %xmm2 leal 0x1(%rdx), %r8d imulq %r8, %r14 vmovaps (%rbx,%r14), %xmm3 vminps %xmm3, %xmm2, %xmm4 vmaxps %xmm3, %xmm2, %xmm2 vmovss 0x19c(%rsi), %xmm6 vmovss 0xc(%rbx,%r14), %xmm3 vmaxss 0xc(%rbx,%r15), %xmm3, %xmm3 vmulss %xmm3, %xmm6, %xmm3 vbroadcastss %xmm3, %xmm5 vsubps %xmm5, %xmm4, %xmm3 vaddps %xmm5, %xmm2, %xmm2 movslq %r11d, %rsi imulq $0x38, %rsi, %rsi movq (%r9,%rsi), %r11 movq 0x10(%r9,%rsi), %rbx movq %rbx, %r14 imulq %rdx, %r14 vmovaps (%r11,%r14), %xmm4 imulq %r8, %rbx vmovaps (%r11,%rbx), %xmm5 vminps %xmm5, %xmm4, %xmm13 vmaxps %xmm5, %xmm4, %xmm4 vmovss 0xc(%r11,%rbx), %xmm5 vmaxss 0xc(%r11,%r14), %xmm5, %xmm5 vmulss %xmm5, %xmm6, %xmm5 vbroadcastss %xmm5, %xmm14 vsubps %xmm14, %xmm13, %xmm5 vaddps %xmm4, %xmm14, %xmm4 movl %edi, %r11d subl %ecx, %r11d cmpl $0x1, %r11d jne 0x1eb2526 vsubss %xmm12, %xmm11, %xmm0 vmaxss %xmm9, %xmm0, %xmm0 vmovss 0x3a246(%rip), %xmm1 # 0x1eec714 vsubss %xmm0, %xmm1, %xmm6 vbroadcastss %xmm0, %xmm0 vmulps %xmm5, %xmm0, %xmm7 vbroadcastss %xmm6, %xmm6 vfmadd231ps %xmm3, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm3) + xmm7 vmulps %xmm4, %xmm0, %xmm0 vmovaps %xmm7, (%rax) vfmadd231ps %xmm6, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm6) + xmm0 vmovaps %xmm0, 0x10(%rax) vsubss %xmm8, %xmm10, %xmm0 vmaxss %xmm9, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm3, %xmm0, %xmm3 vbroadcastss %xmm1, %xmm1 vfmadd213ps %xmm3, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm3 vmulps %xmm2, %xmm0, %xmm0 vfmadd213ps %xmm0, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm4) + xmm0 jmp 0x1eb2704 movq 0x38(%r9,%r10), %r11 movq 0x48(%r9,%r10), %r10 movq %r10, %rbx imulq %rdx, %rbx vmovaps (%r11,%rbx), %xmm13 imulq %r8, %r10 vmovaps (%r11,%r10), %xmm14 vminps %xmm14, %xmm13, %xmm15 vmaxps %xmm14, %xmm13, %xmm13 vmovss 0xc(%r11,%r10), %xmm14 vmaxss 0xc(%r11,%rbx), %xmm14, %xmm14 vmulss %xmm6, %xmm14, %xmm14 vbroadcastss %xmm14, %xmm14 vsubps %xmm14, %xmm15, %xmm15 movq -0x38(%r9,%rsi), %r10 movq -0x28(%r9,%rsi), %rsi movq %rsi, %r11 imulq %rdx, %r11 vmovaps (%r10,%r11), %xmm16 vaddps %xmm14, %xmm13, %xmm13 imulq %r8, %rsi vmovaps (%r10,%rsi), %xmm14 vminps %xmm14, %xmm16, %xmm17 vmaxps %xmm14, %xmm16, %xmm14 vmovss 0xc(%r10,%rsi), %xmm16 vmaxss 0xc(%r10,%r11), %xmm16, %xmm16 vmulss %xmm16, %xmm6, %xmm16 vbroadcastss %xmm16, %xmm16 vsubps %xmm16, %xmm17, %xmm17 vaddps %xmm16, %xmm14, %xmm14 vsubss %xmm12, %xmm11, %xmm11 vmaxss %xmm9, %xmm11, %xmm12 vmovss 0x3a13a(%rip), %xmm11 # 0x1eec714 vsubss %xmm12, %xmm11, %xmm16 vbroadcastss %xmm12, %xmm12 vmulps %xmm15, %xmm12, %xmm15 vbroadcastss %xmm16, %xmm16 vfmadd213ps %xmm15, %xmm16, %xmm3 # xmm3 = (xmm16 * xmm3) + xmm15 vmulps %xmm13, %xmm12, %xmm12 vfmadd213ps %xmm12, %xmm16, %xmm2 # xmm2 = (xmm16 * xmm2) + xmm12 vsubss %xmm8, %xmm10, %xmm8 vmaxss %xmm9, %xmm8, %xmm8 vsubss %xmm8, %xmm11, %xmm9 vbroadcastss %xmm8, %xmm8 vmulps %xmm17, %xmm8, %xmm10 vbroadcastss %xmm9, %xmm9 vfmadd213ps %xmm10, %xmm9, %xmm5 # xmm5 = (xmm9 * xmm5) + xmm10 vmulps %xmm14, %xmm8, %xmm8 vfmadd213ps %xmm8, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm4) + xmm8 incl %ecx cmpl %edi, %ecx jge 0x1eb26fb vsubss %xmm1, %xmm7, %xmm7 movl %ecx, %esi imulq $0x38, %rsi, %rsi addq %r9, %rsi addq $0x10, %rsi vxorps %xmm8, %xmm8, %xmm8 vcvtsi2ss %ecx, %xmm0, %xmm9 vdivss %xmm0, %xmm9, %xmm9 vsubss %xmm1, %xmm9, %xmm9 vdivss %xmm7, %xmm9, %xmm9 vsubss %xmm9, %xmm11, %xmm10 vbroadcastss %xmm9, %xmm9 vmulps %xmm5, %xmm9, %xmm12 vbroadcastss %xmm10, %xmm10 vfmadd231ps %xmm3, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm3) + xmm12 vmulps %xmm4, %xmm9, %xmm9 vfmadd231ps %xmm10, %xmm2, %xmm9 # xmm9 = (xmm2 * xmm10) + xmm9 movq -0x10(%rsi), %r9 movq (%rsi), %r10 movq %r10, %r11 imulq %rdx, %r11 vmovaps (%r9,%r11), %xmm10 imulq %r8, %r10 vmovaps (%r9,%r10), %xmm13 vminps %xmm13, %xmm10, %xmm14 vmovss 0xc(%r9,%r10), %xmm15 vmaxps %xmm13, %xmm10, %xmm10 vmaxss 0xc(%r9,%r11), %xmm15, %xmm13 vmulss %xmm6, %xmm13, %xmm13 vbroadcastss %xmm13, %xmm13 vsubps %xmm13, %xmm14, %xmm14 vsubps %xmm12, %xmm14, %xmm12 vaddps %xmm13, %xmm10, %xmm10 vsubps %xmm9, %xmm10, %xmm9 vminps %xmm8, %xmm12, %xmm10 vmaxps %xmm8, %xmm9, %xmm9 vaddps %xmm3, %xmm10, %xmm3 vaddps %xmm5, %xmm10, %xmm5 vaddps %xmm2, %xmm9, %xmm2 vaddps %xmm4, %xmm9, %xmm4 incl %ecx addq $0x38, %rsi cmpl %ecx, %edi jne 0x1eb264f vmovaps %xmm3, (%rax) vmovaps %xmm2, 0x10(%rax) vmovaps %xmm5, 0x20(%rax) vmovaps %xmm4, 0x30(%rax) popq %rbx popq %r14 popq %r15 retq
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::vlinearBounds(embree::LinearSpace3<embree::Vec3fa> const&, unsigned long, embree::BBox<float> const&) const
LBBox3fa vlinearBounds(const LinearSpace3fa& space, size_t primID, const BBox1f& time_range) const { return linearBounds(space,primID,time_range); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx movq %rdi, %rax vmovss 0x28(%rsi), %xmm24 vmovss 0x2c(%rsi), %xmm2 vmovss (%r8), %xmm1 vmovss 0x4(%r8), %xmm3 vmovss 0x30(%rsi), %xmm4 vsubss %xmm2, %xmm1, %xmm1 vsubss %xmm2, %xmm4, %xmm4 vdivss %xmm4, %xmm1, %xmm23 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm4, %xmm2, %xmm18 vmulss %xmm23, %xmm24, %xmm21 vmulss %xmm18, %xmm24, %xmm19 vrndscaless $0x9, %xmm21, %xmm21, %xmm2 vrndscaless $0xa, %xmm19, %xmm19, %xmm3 vxorps %xmm17, %xmm17, %xmm17 vmaxss %xmm17, %xmm2, %xmm15 vminss %xmm24, %xmm3, %xmm20 vcvttss2si %xmm15, %r9d vcvttss2si %xmm20, %ebx vcvttss2si %xmm2, %r8d testl %r8d, %r8d movl $0xffffffff, %edi # imm = 0xFFFFFFFF vcvttss2si %xmm3, %r10d cmovnsl %r8d, %edi vcvttss2si %xmm24, %r8d incl %r8d cmpl %r8d, %r10d cmovll %r10d, %r8d movslq %r9d, %r9 movq 0x58(%rsi), %r10 imulq 0x68(%rsi), %rcx movl (%r10,%rcx), %ecx movq 0x150(%rsi), %r10 imulq $0x38, %r9, %r11 movq (%r10,%r11), %r14 movq 0x10(%r10,%r11), %r15 movq %r15, %r12 imulq %rcx, %r12 leal 0x1(%rcx), %r9d imulq %r9, %r15 vmovaps (%rdx), %xmm4 vmovaps 0x10(%rdx), %xmm5 vmovaps 0x20(%rdx), %xmm6 vmulps 0x8(%r14,%r12){1to4}, %xmm6, %xmm2 vfmadd231ps 0x4(%r14,%r12){1to4}, %xmm5, %xmm2 # xmm2 = (xmm5 * mem) + xmm2 vfmadd231ps (%r14,%r12){1to4}, %xmm4, %xmm2 # xmm2 = (xmm4 * mem) + xmm2 vbroadcastss 0xc(%r14,%r12), %xmm3 vmulps 0x8(%r14,%r15){1to4}, %xmm6, %xmm7 vfmadd231ps 0x4(%r14,%r15){1to4}, %xmm5, %xmm7 # xmm7 = (xmm5 * mem) + xmm7 vblendps $0x8, %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm3[3] vfmadd231ps (%r14,%r15){1to4}, %xmm4, %xmm7 # xmm7 = (xmm4 * mem) + xmm7 vbroadcastss 0xc(%r14,%r15), %xmm8 vblendps $0x8, %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[0,1,2],xmm8[3] vminps %xmm7, %xmm2, %xmm16 vmaxps %xmm7, %xmm2, %xmm2 vmovss 0x19c(%rsi), %xmm9 vmaxss %xmm3, %xmm8, %xmm3 vmulss %xmm3, %xmm9, %xmm3 vbroadcastss %xmm3, %xmm7 vsubps %xmm7, %xmm16, %xmm3 vaddps %xmm7, %xmm2, %xmm2 movslq %ebx, %rdx imulq $0x38, %rdx, %rdx movq (%r10,%rdx), %rsi movq 0x10(%r10,%rdx), %rbx movq %rbx, %r14 imulq %rcx, %r14 vmulps 0x8(%rsi,%r14){1to4}, %xmm6, %xmm7 vfmadd231ps 0x4(%rsi,%r14){1to4}, %xmm5, %xmm7 # xmm7 = (xmm5 * mem) + xmm7 imulq %r9, %rbx vfmadd231ps (%rsi,%r14){1to4}, %xmm4, %xmm7 # xmm7 = (xmm4 * mem) + xmm7 vbroadcastss 0xc(%rsi,%r14), %xmm8 vblendps $0x8, %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[0,1,2],xmm8[3] vmulps 0x8(%rsi,%rbx){1to4}, %xmm6, %xmm12 vfmadd231ps 0x4(%rsi,%rbx){1to4}, %xmm5, %xmm12 # xmm12 = (xmm5 * mem) + xmm12 vfmadd231ps (%rsi,%rbx){1to4}, %xmm4, %xmm12 # xmm12 = (xmm4 * mem) + xmm12 vbroadcastss 0xc(%rsi,%rbx), %xmm10 vblendps $0x8, %xmm10, %xmm12, %xmm12 # xmm12 = xmm12[0,1,2],xmm10[3] vminps %xmm12, %xmm7, %xmm16 vmaxps %xmm12, %xmm7, %xmm7 vmaxss %xmm8, %xmm10, %xmm8 vmulss %xmm8, %xmm9, %xmm8 vbroadcastss %xmm8, %xmm10 vsubps %xmm10, %xmm16, %xmm8 vaddps %xmm7, %xmm10, %xmm7 movl %r8d, %esi subl %edi, %esi cmpl $0x1, %esi jne 0x1eb295e vsubss %xmm15, %xmm21, %xmm0 vmaxss %xmm17, %xmm0, %xmm0 vmovss 0x39e10(%rip), %xmm1 # 0x1eec714 vsubss %xmm0, %xmm1, %xmm4 vbroadcastss %xmm0, %xmm0 vmulps %xmm0, %xmm8, %xmm5 vbroadcastss %xmm4, %xmm4 vfmadd231ps %xmm3, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm3) + xmm5 vmulps %xmm7, %xmm0, %xmm0 vmovaps %xmm5, (%rax) vfmadd231ps %xmm4, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm4) + xmm0 vmovaps %xmm0, 0x10(%rax) vsubss %xmm19, %xmm20, %xmm0 vmaxss %xmm17, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm3, %xmm0, %xmm3 vbroadcastss %xmm1, %xmm1 vfmadd213ps %xmm3, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm3 vmulps %xmm2, %xmm0, %xmm0 vfmadd213ps %xmm0, %xmm1, %xmm7 # xmm7 = (xmm1 * xmm7) + xmm0 jmp 0x1eb2be3 movq 0x38(%r10,%r11), %rsi movq 0x48(%r10,%r11), %r11 movq %r11, %rbx imulq %rcx, %rbx imulq %r9, %r11 vmulps 0x8(%rsi,%rbx){1to4}, %xmm6, %xmm10 vfmadd231ps 0x4(%rsi,%rbx){1to4}, %xmm5, %xmm10 # xmm10 = (xmm5 * mem) + xmm10 vfmadd231ps (%rsi,%rbx){1to4}, %xmm4, %xmm10 # xmm10 = (xmm4 * mem) + xmm10 vbroadcastss 0xc(%rsi,%rbx), %xmm12 vmulps 0x8(%rsi,%r11){1to4}, %xmm6, %xmm11 vfmadd231ps 0x4(%rsi,%r11){1to4}, %xmm5, %xmm11 # xmm11 = (xmm5 * mem) + xmm11 vfmadd231ps (%rsi,%r11){1to4}, %xmm4, %xmm11 # xmm11 = (xmm4 * mem) + xmm11 vbroadcastss 0xc(%rsi,%r11), %xmm13 vblendps $0x8, %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[0,1,2],xmm12[3] vblendps $0x8, %xmm13, %xmm11, %xmm11 # xmm11 = xmm11[0,1,2],xmm13[3] vminps %xmm11, %xmm10, %xmm16 vmaxps %xmm11, %xmm10, %xmm10 vmaxss %xmm12, %xmm13, %xmm11 vmulss %xmm11, %xmm9, %xmm11 vbroadcastss %xmm11, %xmm11 vsubps %xmm11, %xmm16, %xmm12 vaddps %xmm11, %xmm10, %xmm16 movq -0x38(%r10,%rdx), %rsi movq -0x28(%r10,%rdx), %rdx movq %rdx, %r11 imulq %rcx, %r11 vmulps 0x8(%rsi,%r11){1to4}, %xmm6, %xmm11 vfmadd231ps 0x4(%rsi,%r11){1to4}, %xmm5, %xmm11 # xmm11 = (xmm5 * mem) + xmm11 vfmadd231ps (%rsi,%r11){1to4}, %xmm4, %xmm11 # xmm11 = (xmm4 * mem) + xmm11 imulq %r9, %rdx vbroadcastss 0xc(%rsi,%r11), %xmm13 vblendps $0x8, %xmm13, %xmm11, %xmm11 # xmm11 = xmm11[0,1,2],xmm13[3] vmulps 0x8(%rsi,%rdx){1to4}, %xmm6, %xmm10 vfmadd231ps 0x4(%rsi,%rdx){1to4}, %xmm5, %xmm10 # xmm10 = (xmm5 * mem) + xmm10 vfmadd231ps (%rsi,%rdx){1to4}, %xmm4, %xmm10 # xmm10 = (xmm4 * mem) + xmm10 vbroadcastss 0xc(%rsi,%rdx), %xmm14 vblendps $0x8, %xmm14, %xmm10, %xmm10 # xmm10 = xmm10[0,1,2],xmm14[3] vminps %xmm10, %xmm11, %xmm22 vmaxps %xmm10, %xmm11, %xmm10 vmaxss %xmm13, %xmm14, %xmm11 vmulss %xmm11, %xmm9, %xmm11 vbroadcastss %xmm11, %xmm11 vsubps %xmm11, %xmm22, %xmm13 vaddps %xmm11, %xmm10, %xmm10 vsubss %xmm15, %xmm21, %xmm11 vmaxss %xmm17, %xmm11, %xmm11 vmovss 0x39c9b(%rip), %xmm21 # 0x1eec714 vsubss %xmm11, %xmm21, %xmm15 vbroadcastss %xmm11, %xmm11 vmulps %xmm12, %xmm11, %xmm12 vbroadcastss %xmm15, %xmm15 vfmadd213ps %xmm12, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm3) + xmm12 vmulps %xmm16, %xmm11, %xmm11 vfmadd213ps %xmm11, %xmm15, %xmm2 # xmm2 = (xmm15 * xmm2) + xmm11 vsubss %xmm19, %xmm20, %xmm11 vmaxss %xmm17, %xmm11, %xmm11 vsubss %xmm11, %xmm21, %xmm12 vbroadcastss %xmm11, %xmm11 vmulps %xmm13, %xmm11, %xmm13 vbroadcastss %xmm12, %xmm12 vfmadd213ps %xmm13, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm8) + xmm13 vmulps %xmm10, %xmm11, %xmm10 vfmadd213ps %xmm10, %xmm12, %xmm7 # xmm7 = (xmm12 * xmm7) + xmm10 incl %edi cmpl %r8d, %edi jge 0x1eb2bda vsubss %xmm23, %xmm18, %xmm10 movl %edi, %edx imulq $0x38, %rdx, %rdx addq %r10, %rdx addq $0x10, %rdx vxorps %xmm11, %xmm11, %xmm11 vcvtsi2ss %edi, %xmm17, %xmm12 vdivss %xmm24, %xmm12, %xmm12 vsubss %xmm23, %xmm12, %xmm12 vdivss %xmm10, %xmm12, %xmm12 vsubss %xmm12, %xmm21, %xmm13 vbroadcastss %xmm12, %xmm12 vmulps %xmm12, %xmm8, %xmm15 vbroadcastss %xmm13, %xmm13 vfmadd231ps %xmm3, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm3) + xmm15 vmulps %xmm7, %xmm12, %xmm12 vfmadd231ps %xmm13, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm13) + xmm12 movq -0x10(%rdx), %rsi movq (%rdx), %r10 movq %r10, %r11 imulq %rcx, %r11 imulq %r9, %r10 vmulps 0x8(%rsi,%r11){1to4}, %xmm6, %xmm13 vfmadd231ps 0x4(%rsi,%r11){1to4}, %xmm5, %xmm13 # xmm13 = (xmm5 * mem) + xmm13 vfmadd231ps (%rsi,%r11){1to4}, %xmm4, %xmm13 # xmm13 = (xmm4 * mem) + xmm13 vbroadcastss 0xc(%rsi,%r11), %xmm1 vmulps 0x8(%rsi,%r10){1to4}, %xmm6, %xmm14 vfmadd231ps 0x4(%rsi,%r10){1to4}, %xmm5, %xmm14 # xmm14 = (xmm5 * mem) + xmm14 vblendps $0x8, %xmm1, %xmm13, %xmm13 # xmm13 = xmm13[0,1,2],xmm1[3] vfmadd231ps (%rsi,%r10){1to4}, %xmm4, %xmm14 # xmm14 = (xmm4 * mem) + xmm14 vbroadcastss 0xc(%rsi,%r10), %xmm0 vblendps $0x8, %xmm0, %xmm14, %xmm14 # xmm14 = xmm14[0,1,2],xmm0[3] vminps %xmm14, %xmm13, %xmm16 vmaxps %xmm14, %xmm13, %xmm13 vmaxss %xmm1, %xmm0, %xmm0 vmulss %xmm0, %xmm9, %xmm0 vbroadcastss %xmm0, %xmm0 vsubps %xmm0, %xmm16, %xmm1 vsubps %xmm15, %xmm1, %xmm1 vaddps %xmm0, %xmm13, %xmm0 vsubps %xmm12, %xmm0, %xmm0 vminps %xmm11, %xmm1, %xmm1 vmaxps %xmm11, %xmm0, %xmm0 vaddps %xmm1, %xmm3, %xmm3 vaddps %xmm1, %xmm8, %xmm8 vaddps %xmm0, %xmm2, %xmm2 vaddps %xmm0, %xmm7, %xmm7 incl %edi addq $0x38, %rdx cmpl %edi, %r8d jne 0x1eb2af1 vmovaps %xmm3, (%rax) vmovaps %xmm2, 0x10(%rax) vmovaps %xmm8, 0x20(%rax) vmovaps %xmm7, 0x30(%rax) popq %rbx popq %r12 popq %r14 popq %r15 retq nop
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::LineSegmentsISA::vlinearBounds(embree::Vec3fa const&, float, float, embree::LinearSpace3<embree::Vec3fa> const&, unsigned long, embree::BBox<float> const&) const
LBBox3fa vlinearBounds(const Vec3fa& ofs, const float scale, const float r_scale0, const LinearSpace3fa& space, size_t primID, const BBox1f& time_range) const { return linearBounds(ofs,scale,r_scale0,space,primID,time_range); }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movq %rdi, %rax vmovss 0x28(%rsi), %xmm2 vmovss 0x2c(%rsi), %xmm4 vmovss (%r9), %xmm3 vmovss 0x4(%r9), %xmm5 vmovss 0x30(%rsi), %xmm6 vsubss %xmm4, %xmm3, %xmm3 vsubss %xmm4, %xmm6, %xmm6 vdivss %xmm6, %xmm3, %xmm3 vsubss %xmm4, %xmm5, %xmm4 vdivss %xmm6, %xmm4, %xmm13 vmulss %xmm3, %xmm2, %xmm17 vmulss %xmm2, %xmm13, %xmm14 vrndscaless $0x9, %xmm17, %xmm17, %xmm4 vroundss $0xa, %xmm14, %xmm14, %xmm5 vxorps %xmm15, %xmm15, %xmm15 vmaxss %xmm15, %xmm4, %xmm18 vminss %xmm2, %xmm5, %xmm16 vcvttss2si %xmm18, %r10d vcvttss2si %xmm16, %ebp vcvttss2si %xmm4, %r9d testl %r9d, %r9d movl $0xffffffff, %edi # imm = 0xFFFFFFFF vcvttss2si %xmm5, %r11d cmovnsl %r9d, %edi vcvttss2si %xmm2, %r9d incl %r9d cmpl %r9d, %r11d cmovll %r11d, %r9d movslq %r10d, %r10 movq 0x58(%rsi), %r11 imulq 0x68(%rsi), %r8 movl (%r11,%r8), %r8d movq 0x150(%rsi), %r11 imulq $0x38, %r10, %rbx movq (%r11,%rbx), %r14 movq 0x10(%r11,%rbx), %r15 movq %r15, %r12 imulq %r8, %r12 vmovaps (%r14,%r12), %xmm5 leal 0x1(%r8), %r10d imulq %r10, %r15 vmovaps (%r14,%r15), %xmm6 vmovaps (%rdx), %xmm4 vmulss %xmm1, %xmm0, %xmm1 vsubps %xmm4, %xmm5, %xmm5 vbroadcastss %xmm0, %xmm0 vmulps %xmm5, %xmm0, %xmm5 vbroadcastss %xmm5, %xmm10 vshufps $0x55, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[1,1,1,1] vmovaps (%rcx), %xmm7 vmovaps 0x10(%rcx), %xmm8 vmovaps 0x20(%rcx), %xmm9 vshufps $0xaa, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2] vmulps %xmm5, %xmm9, %xmm5 vfmadd231ps %xmm11, %xmm8, %xmm5 # xmm5 = (xmm8 * xmm11) + xmm5 vfmadd231ps %xmm10, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm10) + xmm5 vmovss 0x19c(%rsi), %xmm10 vmulss 0xc(%r14,%r12), %xmm10, %xmm11 vmulss %xmm1, %xmm11, %xmm11 vinsertps $0x30, %xmm11, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm11[0] vsubps %xmm4, %xmm6, %xmm6 vmulps %xmm6, %xmm0, %xmm6 vbroadcastss %xmm6, %xmm12 vshufps $0x55, %xmm6, %xmm6, %xmm19 # xmm19 = xmm6[1,1,1,1] vshufps $0xaa, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2] vmulps %xmm6, %xmm9, %xmm6 vfmadd231ps %xmm19, %xmm8, %xmm6 # xmm6 = (xmm8 * xmm19) + xmm6 vfmadd231ps %xmm12, %xmm7, %xmm6 # xmm6 = (xmm7 * xmm12) + xmm6 vmulss 0xc(%r14,%r15), %xmm10, %xmm12 vmulss %xmm1, %xmm12, %xmm12 vinsertps $0x30, %xmm12, %xmm6, %xmm6 # xmm6 = xmm6[0,1,2],xmm12[0] vminps %xmm6, %xmm5, %xmm19 vmaxps %xmm6, %xmm5, %xmm5 vmaxss %xmm11, %xmm12, %xmm6 vmulss %xmm6, %xmm10, %xmm6 vbroadcastss %xmm6, %xmm11 vsubps %xmm11, %xmm19, %xmm6 vaddps %xmm5, %xmm11, %xmm5 movslq %ebp, %rcx imulq $0x38, %rcx, %rcx movq (%r11,%rcx), %rdx movq 0x10(%r11,%rcx), %rsi movq %rsi, %r14 imulq %r8, %r14 vmovaps (%rdx,%r14), %xmm11 imulq %r10, %rsi vmovaps (%rdx,%rsi), %xmm12 vsubps %xmm4, %xmm11, %xmm11 vmulps %xmm0, %xmm11, %xmm11 vbroadcastss %xmm11, %xmm19 vshufps $0x55, %xmm11, %xmm11, %xmm20 # xmm20 = xmm11[1,1,1,1] vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2] vmulps %xmm11, %xmm9, %xmm11 vfmadd231ps %xmm20, %xmm8, %xmm11 # xmm11 = (xmm8 * xmm20) + xmm11 vmulss 0xc(%rdx,%r14), %xmm10, %xmm20 vfmadd231ps %xmm19, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm19) + xmm11 vmulss %xmm20, %xmm1, %xmm19 vinsertps $0x30, %xmm19, %xmm11, %xmm11 # xmm11 = xmm11[0,1,2],xmm19[0] vsubps %xmm4, %xmm12, %xmm12 vmulps %xmm0, %xmm12, %xmm12 vbroadcastss %xmm12, %xmm20 vshufps $0x55, %xmm12, %xmm12, %xmm21 # xmm21 = xmm12[1,1,1,1] vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2] vmulps %xmm12, %xmm9, %xmm12 vfmadd231ps %xmm21, %xmm8, %xmm12 # xmm12 = (xmm8 * xmm21) + xmm12 vmulss 0xc(%rdx,%rsi), %xmm10, %xmm21 vfmadd231ps %xmm20, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm20) + xmm12 vmulss %xmm21, %xmm1, %xmm20 vinsertps $0x30, %xmm20, %xmm12, %xmm12 # xmm12 = xmm12[0,1,2],xmm20[0] vminps %xmm12, %xmm11, %xmm21 vmaxps %xmm12, %xmm11, %xmm11 vmaxss %xmm19, %xmm20, %xmm12 vmulss %xmm12, %xmm10, %xmm12 vbroadcastss %xmm12, %xmm19 vsubps %xmm19, %xmm21, %xmm12 vaddps %xmm19, %xmm11, %xmm11 movl %r9d, %edx subl %edi, %edx cmpl $0x1, %edx jne 0x1eb2ebe vsubss %xmm18, %xmm17, %xmm0 vmaxss %xmm15, %xmm0, %xmm0 vmovss 0x398af(%rip), %xmm1 # 0x1eec714 vsubss %xmm0, %xmm1, %xmm2 vbroadcastss %xmm0, %xmm0 vmulps %xmm0, %xmm12, %xmm3 vbroadcastss %xmm2, %xmm2 vfmadd231ps %xmm6, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm6) + xmm3 vmulps %xmm0, %xmm11, %xmm0 vmovaps %xmm3, (%rax) vfmadd231ps %xmm2, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm2) + xmm0 vmovaps %xmm0, 0x10(%rax) vsubss %xmm14, %xmm16, %xmm0 vmaxss %xmm15, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm6, %xmm0, %xmm2 vbroadcastss %xmm1, %xmm1 vfmadd213ps %xmm2, %xmm1, %xmm12 # xmm12 = (xmm1 * xmm12) + xmm2 vmulps %xmm5, %xmm0, %xmm0 vfmadd213ps %xmm0, %xmm1, %xmm11 # xmm11 = (xmm1 * xmm11) + xmm0 jmp 0x1eb325c movq 0x38(%r11,%rbx), %rdx movq 0x48(%r11,%rbx), %rsi movq %rsi, %rbx imulq %r8, %rbx vmovaps (%rdx,%rbx), %xmm19 imulq %r10, %rsi vmovaps (%rdx,%rsi), %xmm20 vsubps %xmm4, %xmm19, %xmm19 vmulps %xmm19, %xmm0, %xmm19 vbroadcastss %xmm19, %xmm21 vshufps $0x55, %xmm19, %xmm19, %xmm22 # xmm22 = xmm19[1,1,1,1] vshufps $0xaa, %xmm19, %xmm19, %xmm19 # xmm19 = xmm19[2,2,2,2] vmulps %xmm19, %xmm9, %xmm19 vfmadd231ps %xmm22, %xmm8, %xmm19 # xmm19 = (xmm8 * xmm22) + xmm19 vfmadd231ps %xmm21, %xmm7, %xmm19 # xmm19 = (xmm7 * xmm21) + xmm19 vmulss 0xc(%rdx,%rbx), %xmm10, %xmm21 vmulss %xmm21, %xmm1, %xmm21 vinsertps $0x30, %xmm21, %xmm19, %xmm19 # xmm19 = xmm19[0,1,2],xmm21[0] vsubps %xmm4, %xmm20, %xmm20 vmulps %xmm20, %xmm0, %xmm20 vbroadcastss %xmm20, %xmm22 vshufps $0x55, %xmm20, %xmm20, %xmm23 # xmm23 = xmm20[1,1,1,1] vshufps $0xaa, %xmm20, %xmm20, %xmm20 # xmm20 = xmm20[2,2,2,2] vmulps %xmm20, %xmm9, %xmm20 vfmadd231ps %xmm23, %xmm8, %xmm20 # xmm20 = (xmm8 * xmm23) + xmm20 vfmadd231ps %xmm22, %xmm7, %xmm20 # xmm20 = (xmm7 * xmm22) + xmm20 vmulss 0xc(%rdx,%rsi), %xmm10, %xmm22 vmulss %xmm22, %xmm1, %xmm22 vinsertps $0x30, %xmm22, %xmm20, %xmm20 # xmm20 = xmm20[0,1,2],xmm22[0] vminps %xmm20, %xmm19, %xmm23 vmaxps %xmm20, %xmm19, %xmm19 vmaxss %xmm21, %xmm22, %xmm20 vmulss %xmm20, %xmm10, %xmm20 vbroadcastss %xmm20, %xmm20 vsubps %xmm20, %xmm23, %xmm21 movq -0x38(%r11,%rcx), %rdx movq -0x28(%r11,%rcx), %rcx movq %rcx, %rsi imulq %r8, %rsi vmovaps (%rdx,%rsi), %xmm22 imulq %r10, %rcx vmovaps (%rdx,%rcx), %xmm23 vaddps %xmm20, %xmm19, %xmm19 vsubps %xmm4, %xmm22, %xmm20 vmulps %xmm20, %xmm0, %xmm20 vbroadcastss %xmm20, %xmm22 vshufps $0x55, %xmm20, %xmm20, %xmm24 # xmm24 = xmm20[1,1,1,1] vshufps $0xaa, %xmm20, %xmm20, %xmm20 # xmm20 = xmm20[2,2,2,2] vmulps %xmm20, %xmm9, %xmm20 vfmadd231ps %xmm24, %xmm8, %xmm20 # xmm20 = (xmm8 * xmm24) + xmm20 vfmadd231ps %xmm22, %xmm7, %xmm20 # xmm20 = (xmm7 * xmm22) + xmm20 vmulss 0xc(%rdx,%rsi), %xmm10, %xmm22 vmulss %xmm22, %xmm1, %xmm22 vinsertps $0x30, %xmm22, %xmm20, %xmm20 # xmm20 = xmm20[0,1,2],xmm22[0] vsubps %xmm4, %xmm23, %xmm23 vmulps %xmm23, %xmm0, %xmm23 vbroadcastss %xmm23, %xmm24 vshufps $0x55, %xmm23, %xmm23, %xmm25 # xmm25 = xmm23[1,1,1,1] vshufps $0xaa, %xmm23, %xmm23, %xmm23 # xmm23 = xmm23[2,2,2,2] vmulps %xmm23, %xmm9, %xmm23 vfmadd231ps %xmm25, %xmm8, %xmm23 # xmm23 = (xmm8 * xmm25) + xmm23 vfmadd231ps %xmm24, %xmm7, %xmm23 # xmm23 = (xmm7 * xmm24) + xmm23 vmulss 0xc(%rdx,%rcx), %xmm10, %xmm24 vmulss %xmm24, %xmm1, %xmm24 vinsertps $0x30, %xmm24, %xmm23, %xmm23 # xmm23 = xmm23[0,1,2],xmm24[0] vminps %xmm23, %xmm20, %xmm25 vmaxps %xmm23, %xmm20, %xmm20 vmaxss %xmm22, %xmm24, %xmm22 vmulss %xmm22, %xmm10, %xmm22 vbroadcastss %xmm22, %xmm22 vsubps %xmm22, %xmm25, %xmm23 vaddps %xmm22, %xmm20, %xmm20 vsubss %xmm18, %xmm17, %xmm17 vmaxss %xmm15, %xmm17, %xmm18 vmovss 0x3968a(%rip), %xmm17 # 0x1eec714 vsubss %xmm18, %xmm17, %xmm22 vbroadcastss %xmm18, %xmm18 vmulps %xmm21, %xmm18, %xmm21 vbroadcastss %xmm22, %xmm22 vfmadd213ps %xmm21, %xmm22, %xmm6 # xmm6 = (xmm22 * xmm6) + xmm21 vmulps %xmm19, %xmm18, %xmm18 vfmadd213ps %xmm18, %xmm22, %xmm5 # xmm5 = (xmm22 * xmm5) + xmm18 vsubss %xmm14, %xmm16, %xmm14 vmaxss %xmm15, %xmm14, %xmm14 vsubss %xmm14, %xmm17, %xmm15 vbroadcastss %xmm14, %xmm14 vmulps %xmm23, %xmm14, %xmm16 vbroadcastss %xmm15, %xmm15 vfmadd213ps %xmm16, %xmm15, %xmm12 # xmm12 = (xmm15 * xmm12) + xmm16 vmulps %xmm20, %xmm14, %xmm14 vfmadd213ps %xmm14, %xmm15, %xmm11 # xmm11 = (xmm15 * xmm11) + xmm14 incl %edi cmpl %r9d, %edi jge 0x1eb3253 vsubss %xmm3, %xmm13, %xmm13 movl %edi, %ecx imulq $0x38, %rcx, %rcx addq %r11, %rcx addq $0x10, %rcx vxorps %xmm14, %xmm14, %xmm14 vcvtsi2ss %edi, %xmm0, %xmm15 vdivss %xmm2, %xmm15, %xmm15 vsubss %xmm3, %xmm15, %xmm15 vdivss %xmm13, %xmm15, %xmm15 vsubss %xmm15, %xmm17, %xmm16 vbroadcastss %xmm15, %xmm15 vmulps %xmm15, %xmm12, %xmm18 vbroadcastss %xmm16, %xmm16 vfmadd231ps %xmm6, %xmm16, %xmm18 # xmm18 = (xmm16 * xmm6) + xmm18 vmulps %xmm15, %xmm11, %xmm15 vfmadd231ps %xmm16, %xmm5, %xmm15 # xmm15 = (xmm5 * xmm16) + xmm15 movq -0x10(%rcx), %rdx movq (%rcx), %rsi movq %rsi, %r11 imulq %r8, %r11 vmovaps (%rdx,%r11), %xmm16 imulq %r10, %rsi vmovaps (%rdx,%rsi), %xmm19 vsubps %xmm4, %xmm16, %xmm16 vmulps %xmm16, %xmm0, %xmm16 vbroadcastss %xmm16, %xmm20 vshufps $0x55, %xmm16, %xmm16, %xmm21 # xmm21 = xmm16[1,1,1,1] vshufps $0xaa, %xmm16, %xmm16, %xmm16 # xmm16 = xmm16[2,2,2,2] vmulps %xmm16, %xmm9, %xmm16 vfmadd231ps %xmm21, %xmm8, %xmm16 # xmm16 = (xmm8 * xmm21) + xmm16 vfmadd231ps %xmm20, %xmm7, %xmm16 # xmm16 = (xmm7 * xmm20) + xmm16 vmulss 0xc(%rdx,%r11), %xmm10, %xmm20 vmulss %xmm20, %xmm1, %xmm20 vinsertps $0x30, %xmm20, %xmm16, %xmm16 # xmm16 = xmm16[0,1,2],xmm20[0] vsubps %xmm4, %xmm19, %xmm19 vmulps %xmm19, %xmm0, %xmm19 vbroadcastss %xmm19, %xmm21 vshufps $0x55, %xmm19, %xmm19, %xmm22 # xmm22 = xmm19[1,1,1,1] vshufps $0xaa, %xmm19, %xmm19, %xmm19 # xmm19 = xmm19[2,2,2,2] vmulps %xmm19, %xmm9, %xmm19 vfmadd231ps %xmm22, %xmm8, %xmm19 # xmm19 = (xmm8 * xmm22) + xmm19 vfmadd231ps %xmm21, %xmm7, %xmm19 # xmm19 = (xmm7 * xmm21) + xmm19 vmulss 0xc(%rdx,%rsi), %xmm10, %xmm21 vmulss %xmm21, %xmm1, %xmm21 vinsertps $0x30, %xmm21, %xmm19, %xmm19 # xmm19 = xmm19[0,1,2],xmm21[0] vminps %xmm19, %xmm16, %xmm22 vmaxps %xmm19, %xmm16, %xmm16 vmaxss %xmm20, %xmm21, %xmm19 vmulss %xmm19, %xmm10, %xmm19 vbroadcastss %xmm19, %xmm19 vsubps %xmm19, %xmm22, %xmm20 vsubps %xmm18, %xmm20, %xmm18 vaddps %xmm19, %xmm16, %xmm16 vsubps %xmm15, %xmm16, %xmm15 vminps %xmm14, %xmm18, %xmm16 vmaxps %xmm14, %xmm15, %xmm15 vaddps %xmm16, %xmm6, %xmm6 vaddps %xmm16, %xmm12, %xmm12 vaddps %xmm5, %xmm15, %xmm5 vaddps %xmm15, %xmm11, %xmm11 incl %edi addq $0x38, %rcx cmpl %edi, %r9d jne 0x1eb3107 vmovaps %xmm6, (%rax) vmovaps %xmm5, 0x10(%rax) vmovaps %xmm12, 0x20(%rax) vmovaps %xmm11, 0x30(%rax) popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq nop
/embree[P]embree/kernels/common/scene_line_segments.h
embree::avx512::GridMeshISA::vlinearBounds(unsigned long, embree::BBox<float> const&, embree::SubGridBuildData const*) const
LBBox3fa vlinearBounds(size_t buildID, const BBox1f& time_range, const SubGridBuildData * const sgrids) const override { const SubGridBuildData &subgrid = sgrids[buildID]; const unsigned int primID = subgrid.primID; const size_t x = subgrid.x(); const size_t y = subgrid.y(); return linearBounds(grid(primID),x,y,time_range); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdi, %r10 movl 0x4(%r8,%rdx,8), %edi movzwl (%r8,%rdx,8), %r9d movl $0x7fff, %eax # imm = 0x7FFF andq %rax, %r9 movzwl 0x2(%r8,%rdx,8), %edx andq %rax, %rdx imulq 0x68(%rsi), %rdi vmovss 0x28(%rsi), %xmm0 vmovss 0x2c(%rsi), %xmm1 vmovss (%rcx), %xmm2 movq 0x58(%rsi), %r8 vmovss 0x4(%rcx), %xmm4 vsubss %xmm1, %xmm2, %xmm2 vmovss 0x30(%rsi), %xmm3 vsubss %xmm1, %xmm3, %xmm5 vdivss %xmm5, %xmm2, %xmm3 vsubss %xmm1, %xmm4, %xmm1 vdivss %xmm5, %xmm1, %xmm6 vmulss %xmm3, %xmm0, %xmm9 vmulss %xmm6, %xmm0, %xmm7 vroundss $0x9, %xmm9, %xmm9, %xmm1 vroundss $0xa, %xmm7, %xmm7, %xmm2 vxorps %xmm4, %xmm4, %xmm4 vmaxss %xmm4, %xmm1, %xmm10 vminss %xmm0, %xmm2, %xmm8 vcvttss2si %xmm10, %eax movl %eax, -0x18(%rsp) vcvttss2si %xmm1, %eax testl %eax, %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmovnsl %eax, %ecx movl %ecx, -0x10(%rsp) vcvttss2si %xmm2, %eax vcvttss2si %xmm0, %ecx incl %ecx cmpl %ecx, %eax cmovll %eax, %ecx movl %ecx, -0x24(%rsp) leaq 0x3(%rdx), %rax movzwl 0xa(%r8,%rdi), %ecx cmpq %rcx, %rax movq %rcx, %r11 cmovbq %rax, %r11 vbroadcastss 0x387c2(%rip), %xmm1 # 0x1eecb84 vbroadcastss 0x37655(%rip), %xmm2 # 0x1eeba20 vmovaps %xmm2, %xmm5 vmovaps %xmm1, %xmm4 movq %rdx, -0x30(%rsp) movq %rcx, -0x8(%rsp) cmpw %dx, %cx jbe 0x1eb44a1 movslq -0x18(%rsp), %rax leaq 0x3(%r9), %rcx movzwl 0x8(%r8,%rdi), %r14d cmpq %r14, %rcx movq %r14, %r15 cmovbq %rcx, %r15 imulq $0x38, %rax, %r12 addq 0xe0(%rsi), %r12 vbroadcastss 0x3760d(%rip), %xmm5 # 0x1eeba20 vbroadcastss 0x38768(%rip), %xmm4 # 0x1eecb84 subq %r9, %r15 movb $0x1, %dl vbroadcastss 0x6cfd2(%rip), %xmm11 # 0x1f213fc vbroadcastss 0x3cbad(%rip), %xmm12 # 0x1ef0fe0 movq -0x30(%rsp), %r13 cmpw %r9w, %r14w jbe 0x1eb4496 movl (%r8,%rdi), %eax movl 0x4(%r8,%rdi), %ebp imulq %r13, %rbp movq 0x10(%r12), %rcx addq %r9, %rax addq %rbp, %rax imulq %rcx, %rax addq (%r12), %rax movq %r15, %rbp vmovups (%rax), %xmm13 vcmpnleps %xmm11, %xmm13, %k1 vcmpltps %xmm12, %xmm13, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %ebx testb $0x7, %bl jne 0x1eb49de vminps %xmm13, %xmm5, %xmm5 vmaxps %xmm13, %xmm4, %xmm4 addq %rcx, %rax decq %rbp jne 0x1eb4461 incq %r13 cmpq %r11, %r13 setb %dl jne 0x1eb4438 vcvttss2si %xmm8, %eax movl %eax, -0x20(%rsp) movq -0x8(%rsp), %rax cmpw -0x30(%rsp), %ax jbe 0x1eb4576 movslq -0x20(%rsp), %rax leaq 0x3(%r9), %rcx movzwl 0x8(%r8,%rdi), %r15d cmpq %r15, %rcx movq %r15, %r14 cmovbq %rcx, %r14 imulq $0x38, %rax, %r13 addq 0xe0(%rsi), %r13 vbroadcastss 0x37539(%rip), %xmm2 # 0x1eeba20 vbroadcastss 0x38694(%rip), %xmm1 # 0x1eecb84 subq %r9, %r14 movb $0x1, %bpl vbroadcastss 0x6cefd(%rip), %xmm11 # 0x1f213fc vbroadcastss 0x3cad8(%rip), %xmm12 # 0x1ef0fe0 movq -0x30(%rsp), %rdx cmpw %r9w, %r15w jbe 0x1eb456a movl (%r8,%rdi), %eax movl 0x4(%r8,%rdi), %ebx imulq %rdx, %rbx movq 0x10(%r13), %rcx addq %r9, %rax addq %rbx, %rax imulq %rcx, %rax addq (%r13), %rax movq %r14, %r12 vmovups (%rax), %xmm13 vcmpnleps %xmm11, %xmm13, %k1 vcmpltps %xmm12, %xmm13, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %ebx testb $0x7, %bl jne 0x1eb4a00 vminps %xmm13, %xmm2, %xmm2 vmaxps %xmm13, %xmm1, %xmm1 addq %rcx, %rax decq %r12 jne 0x1eb4535 incq %rdx cmpq %r11, %rdx setb %bpl jne 0x1eb450d movl -0x24(%rsp), %eax subl -0x10(%rsp), %eax cmpl $0x1, %eax jne 0x1eb45f0 vsubss %xmm10, %xmm9, %xmm0 vxorps %xmm3, %xmm3, %xmm3 vmaxss %xmm3, %xmm0, %xmm0 vmovss 0x3817c(%rip), %xmm6 # 0x1eec714 vsubss %xmm0, %xmm6, %xmm9 vbroadcastss %xmm0, %xmm0 vmulps %xmm2, %xmm0, %xmm10 vbroadcastss %xmm9, %xmm9 vfmadd231ps %xmm5, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm5) + xmm10 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm10, (%r10) vfmadd231ps %xmm9, %xmm4, %xmm0 # xmm0 = (xmm4 * xmm9) + xmm0 vmovaps %xmm0, 0x10(%r10) vsubss %xmm7, %xmm8, %xmm0 vmaxss %xmm3, %xmm0, %xmm0 vsubss %xmm0, %xmm6, %xmm3 vbroadcastss %xmm0, %xmm0 vmulps %xmm5, %xmm0, %xmm5 vbroadcastss %xmm3, %xmm3 vfmadd213ps %xmm5, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + xmm5 vmulps %xmm4, %xmm0, %xmm0 vfmadd213ps %xmm0, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm0 jmp 0x1eb49c4 vbroadcastss 0x3858b(%rip), %xmm11 # 0x1eecb84 vbroadcastss 0x3741e(%rip), %xmm12 # 0x1eeba20 vmovaps %xmm12, %xmm14 vmovaps %xmm11, %xmm13 movq -0x8(%rsp), %rax cmpw -0x30(%rsp), %ax jbe 0x1eb46dd leaq 0x3(%r9), %rax movzwl 0x8(%r8,%rdi), %r15d cmpq %r15, %rax movq %r15, %r12 cmovbq %rax, %r12 movslq -0x18(%rsp), %rax imulq $0x38, %rax, %r13 addq 0xe0(%rsi), %r13 vbroadcastss 0x373d7(%rip), %xmm14 # 0x1eeba20 vbroadcastss 0x38532(%rip), %xmm13 # 0x1eecb84 subq %r9, %r12 movb $0x1, %bpl vbroadcastss 0x6cd9b(%rip), %xmm15 # 0x1f213fc vbroadcastss 0x3c975(%rip), %xmm16 # 0x1ef0fe0 movq -0x30(%rsp), %rdx cmpw %r9w, %r15w jbe 0x1eb46d1 movl (%r8,%rdi), %eax movl 0x4(%r8,%rdi), %ebx imulq %rdx, %rbx movq 0x48(%r13), %rcx addq %r9, %rax addq %rbx, %rax imulq %rcx, %rax addq 0x38(%r13), %rax movq %r12, %r14 vmovups (%rax), %xmm17 vcmpnleps %xmm15, %xmm17, %k1 vcmpltps %xmm16, %xmm17, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %ebx testb $0x7, %bl jne 0x1eb4a23 vminps %xmm17, %xmm14, %xmm14 vmaxps %xmm17, %xmm13, %xmm13 addq %rcx, %rax decq %r14 jne 0x1eb4698 incq %rdx cmpq %r11, %rdx setb %bpl jne 0x1eb4670 movq -0x8(%rsp), %rax cmpw -0x30(%rsp), %ax jbe 0x1eb47ae leaq 0x3(%r9), %rax movzwl 0x8(%r8,%rdi), %r15d cmpq %r15, %rax movq %r15, %r12 cmovbq %rax, %r12 movslq -0x20(%rsp), %rax imulq $0x38, %rax, %r14 addq 0xe0(%rsi), %r14 vbroadcastss 0x37306(%rip), %xmm12 # 0x1eeba20 vbroadcastss 0x38461(%rip), %xmm11 # 0x1eecb84 subq %r9, %r12 movb $0x1, %bpl vbroadcastss 0x6ccca(%rip), %xmm15 # 0x1f213fc vbroadcastss 0x3c8a4(%rip), %xmm16 # 0x1ef0fe0 movq -0x30(%rsp), %rdx cmpw %r9w, %r15w jbe 0x1eb47a2 movl (%r8,%rdi), %eax movl 0x4(%r8,%rdi), %ebx imulq %rdx, %rbx movq -0x28(%r14), %rcx addq %r9, %rax addq %rbx, %rax imulq %rcx, %rax addq -0x38(%r14), %rax movq %r12, %r13 vmovups (%rax), %xmm17 vcmpnleps %xmm15, %xmm17, %k1 vcmpltps %xmm16, %xmm17, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %ebx testb $0x7, %bl jne 0x1eb4a46 vminps %xmm17, %xmm12, %xmm12 vmaxps %xmm17, %xmm11, %xmm11 addq %rcx, %rax decq %r13 jne 0x1eb4769 incq %rdx cmpq %r11, %rdx setb %bpl jne 0x1eb4741 movq %r10, -0x20(%rsp) vsubss %xmm10, %xmm9, %xmm9 vxorps %xmm10, %xmm10, %xmm10 vmaxss %xmm10, %xmm9, %xmm15 vmovss 0x37f4a(%rip), %xmm9 # 0x1eec714 vsubss %xmm15, %xmm9, %xmm16 vbroadcastss %xmm15, %xmm15 vmulps %xmm14, %xmm15, %xmm14 vbroadcastss %xmm16, %xmm16 vfmadd213ps %xmm14, %xmm16, %xmm5 # xmm5 = (xmm16 * xmm5) + xmm14 vmulps %xmm13, %xmm15, %xmm13 vfmadd213ps %xmm13, %xmm16, %xmm4 # xmm4 = (xmm16 * xmm4) + xmm13 vsubss %xmm7, %xmm8, %xmm7 vmaxss %xmm10, %xmm7, %xmm7 vsubss %xmm7, %xmm9, %xmm8 vbroadcastss %xmm7, %xmm7 vmulps %xmm7, %xmm12, %xmm10 vbroadcastss %xmm8, %xmm8 vfmadd213ps %xmm10, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm10 vmulps %xmm7, %xmm11, %xmm7 vfmadd213ps %xmm7, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm7 movl -0x10(%rsp), %eax incl %eax cmpl -0x24(%rsp), %eax jge 0x1eb49b4 vsubss %xmm3, %xmm6, %xmm6 leaq 0x3(%r9), %r14 movq 0xe0(%rsi), %rcx movq %rcx, -0x10(%rsp) movl %eax, %ebx movq %r9, %rax vbroadcastss 0x371d4(%rip), %xmm7 # 0x1eeba20 vbroadcastss 0x3832f(%rip), %xmm8 # 0x1eecb84 negq %rax movq %rax, -0x18(%rsp) vbroadcastss 0x6cb96(%rip), %xmm10 # 0x1f213fc vbroadcastss 0x3c771(%rip), %xmm11 # 0x1ef0fe0 vxorps %xmm12, %xmm12, %xmm12 vmovaps %xmm8, %xmm13 vmovaps %xmm7, %xmm14 movq -0x8(%rsp), %rax cmpw -0x30(%rsp), %ax jbe 0x1eb4949 movzwl 0x8(%r8,%rdi), %r12d cmpq %r12, %r14 movq %r12, %r15 cmovbq %r14, %r15 imulq $0x38, %rbx, %rbp addq -0x10(%rsp), %rbp addq -0x18(%rsp), %r15 movb $0x1, %al movq -0x30(%rsp), %rdx vmovaps %xmm8, %xmm13 vmovaps %xmm7, %xmm14 cmpw %r9w, %r12w jbe 0x1eb4915 movl (%r8,%rdi), %ecx movl 0x4(%r8,%rdi), %r13d imulq %rdx, %r13 movq 0x10(%rbp), %rsi addq %r9, %rcx addq %r13, %rcx imulq %rsi, %rcx addq (%rbp), %rcx movq %r15, %r13 vmovups (%rcx), %xmm15 vcmpnleps %xmm10, %xmm15, %k1 vcmpltps %xmm11, %xmm15, %k0 {%k1} knotw %k0, %k0 kmovd %k0, %r10d testb $0x7, %r10b jne 0x1eb4922 vminps %xmm15, %xmm14, %xmm14 vmaxps %xmm15, %xmm13, %xmm13 addq %rsi, %rcx decq %r13 jne 0x1eb48e3 incq %rdx cmpq %r11, %rdx setb %al jne 0x1eb48bb jmp 0x1eb4949 vmovaps %xmm8, %xmm15 testb $0x1, %al jne 0x1eb4930 vmovaps %xmm13, %xmm15 vmovaps %xmm7, %xmm16 jne 0x1eb493e vmovaps %xmm14, %xmm16 vmovaps %xmm15, %xmm13 vmovaps %xmm16, %xmm14 vcvtsi2ss %ebx, %xmm18, %xmm15 vdivss %xmm0, %xmm15, %xmm15 vsubss %xmm3, %xmm15, %xmm15 vdivss %xmm6, %xmm15, %xmm15 vsubss %xmm15, %xmm9, %xmm16 vbroadcastss %xmm15, %xmm15 vmulps %xmm15, %xmm2, %xmm17 vbroadcastss %xmm16, %xmm16 vfmadd231ps %xmm5, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm5) + xmm17 vsubps %xmm17, %xmm14, %xmm14 vmulps %xmm1, %xmm15, %xmm15 vfmadd231ps %xmm16, %xmm4, %xmm15 # xmm15 = (xmm4 * xmm16) + xmm15 vsubps %xmm15, %xmm13, %xmm13 vminps %xmm12, %xmm14, %xmm14 vmaxps %xmm12, %xmm13, %xmm13 vaddps %xmm5, %xmm14, %xmm5 vaddps %xmm2, %xmm14, %xmm2 vaddps %xmm4, %xmm13, %xmm4 vaddps %xmm1, %xmm13, %xmm1 incq %rbx cmpl %ebx, -0x24(%rsp) jne 0x1eb4874 movq -0x20(%rsp), %r10 vmovaps %xmm5, (%r10) vmovaps %xmm4, 0x10(%r10) vmovaps %xmm2, 0x20(%r10) vmovaps %xmm1, 0x30(%r10) movq %r10, %rax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq testb $0x1, %dl je 0x1eb49ec vbroadcastss 0x37034(%rip), %xmm5 # 0x1eeba20 je 0x1eb44a1 vbroadcastss 0x38189(%rip), %xmm4 # 0x1eecb84 jmp 0x1eb44a1 testb $0x1, %bpl je 0x1eb4a0f vbroadcastss 0x37011(%rip), %xmm2 # 0x1eeba20 je 0x1eb4576 vbroadcastss 0x38166(%rip), %xmm1 # 0x1eecb84 jmp 0x1eb4576 testb $0x1, %bpl je 0x1eb4a32 vbroadcastss 0x36fee(%rip), %xmm14 # 0x1eeba20 je 0x1eb46dd vbroadcastss 0x38143(%rip), %xmm13 # 0x1eecb84 jmp 0x1eb46dd testb $0x1, %bpl je 0x1eb4a55 vbroadcastss 0x36fcb(%rip), %xmm12 # 0x1eeba20 je 0x1eb47ae vbroadcastss 0x38120(%rip), %xmm11 # 0x1eecb84 jmp 0x1eb47ae nopl (%rax)
/embree[P]embree/kernels/common/scene_grid_mesh.h
embree::avx512::PointsISA::createPrimRefArray(embree::PrimRef*, embree::range<unsigned long> const&, unsigned long, unsigned int) const
PrimInfo createPrimRefArray(PrimRef* prims, const range<size_t>& r, size_t k, unsigned int geomID) const { PrimInfo pinfo(empty); for (size_t j = r.begin(); j < r.end(); j++) { BBox3fa bounds = empty; if (!buildBounds(j, &bounds)) continue; const PrimRef prim(bounds, geomID, unsigned(j)); pinfo.add_center2(prim); prims[k++] = prim; } return pinfo; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdi, %rax vbroadcastss 0x36f44(%rip), %xmm0 # 0x1eeba20 vmovaps %xmm0, (%rdi) vbroadcastss 0x3809b(%rip), %xmm1 # 0x1eecb84 vmovaps %xmm1, 0x10(%rdi) vmovaps %xmm0, 0x20(%rdi) vmovaps %xmm1, 0x30(%rdi) vxorps %xmm2, %xmm2, %xmm2 vmovaps %xmm2, 0x40(%rdi) movq (%rcx), %r10 movq 0x48(%rdi), %rdi cmpq 0x8(%rcx), %r10 jae 0x1eb4c46 vbroadcastss 0x6c8e1(%rip), %xmm4 # 0x1f213fc vbroadcastss 0x3c4bc(%rip), %xmm5 # 0x1ef0fe0 vmovd %r9d, %xmm6 vxorps %xmm7, %xmm7, %xmm7 vbroadcastss 0x36eea(%rip), %xmm8 # 0x1eeba20 vbroadcastss 0x38045(%rip), %xmm9 # 0x1eecb84 vmovaps %xmm0, %xmm3 vmovaps %xmm1, %xmm2 movl %r10d, %r11d movq 0xe0(%rsi), %rbx cmpq 0x18(%rbx), %r11 jae 0x1eb4ba9 xorl %r13d, %r13d xorl %r15d, %r15d movl %r13d, %r9d testb $0x1, %r13b jne 0x1eb4bac imulq $0x38, %r15, %r15 movq (%rbx,%r15), %r14 movq 0x10(%rbx,%r15), %r12 imulq %r11, %r12 vmovaps (%r14,%r12), %xmm10 vcmpnleps %xmm4, %xmm10, %k1 vcmpltps %xmm5, %xmm10, %k0 {%k1} kmovd %k0, %ebp cmpb $0xf, %bpl jne 0x1eb4bac movl $0x1, %r15d movb $0x1, %r13b vucomiss 0xc(%r14,%r12), %xmm7 jbe 0x1eb4b5d jmp 0x1eb4bac xorl %r9d, %r9d vmovaps %xmm9, %xmm10 vmovaps %xmm8, %xmm11 testb $0x1, %r9b je 0x1eb4bec movq 0x58(%rsi), %r11 movq 0x68(%rsi), %rbx imulq %r10, %rbx vmovaps (%r11,%rbx), %xmm10 vmovss 0xc(%r11,%rbx), %xmm11 vmulss 0x128(%rsi), %xmm11, %xmm11 vbroadcastss %xmm11, %xmm12 vsubps %xmm12, %xmm10, %xmm11 vaddps %xmm12, %xmm10, %xmm10 testb $0x1, %r9b je 0x1eb4c37 vinsertps $0x30, %xmm6, %xmm11, %xmm11 # xmm11 = xmm11[0,1,2],xmm6[0] vmovd %r10d, %xmm12 vinsertps $0x30, %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[0,1,2],xmm12[0] vminps %xmm11, %xmm0, %xmm0 vmaxps %xmm10, %xmm1, %xmm1 vaddps %xmm11, %xmm10, %xmm12 vminps %xmm12, %xmm3, %xmm3 vmaxps %xmm12, %xmm2, %xmm2 incq %rdi leaq 0x1(%r8), %r9 shlq $0x5, %r8 vmovaps %xmm11, (%rdx,%r8) vmovaps %xmm10, 0x10(%rdx,%r8) movq %r9, %r8 incq %r10 cmpq 0x8(%rcx), %r10 jb 0x1eb4b47 jmp 0x1eb4c4e vmovaps %xmm1, %xmm2 vmovaps %xmm0, %xmm3 vmovaps %xmm0, (%rax) vmovaps %xmm1, 0x10(%rax) vmovaps %xmm3, 0x20(%rax) vmovaps %xmm2, 0x30(%rax) movq %rdi, 0x48(%rax) popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/embree[P]embree/kernels/common/scene_points.h
embree::avx512::PointsISA::createPrimRefArrayMB(embree::vector_t<embree::PrimRef, embree::aligned_monitored_allocator<embree::PrimRef, 32ul>>&, unsigned long, embree::range<unsigned long> const&, unsigned long, unsigned int) const
__forceinline Vec3ff vertex_safe(size_t i, float time) const { if (hasMotionBlur()) return vertex(i,time); else return vertex(i); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx vbroadcastss 0x36d9d(%rip), %xmm0 # 0x1eeba20 vmovaps %xmm0, (%rdi) vbroadcastss 0x37ef4(%rip), %xmm1 # 0x1eecb84 vmovaps %xmm1, 0x10(%rdi) movq %rdx, -0x8(%rsp) vmovaps %xmm0, 0x20(%rdi) vmovaps %xmm1, 0x30(%rdi) vxorps %xmm2, %xmm2, %xmm2 vmovaps %xmm2, 0x40(%rdi) movq (%r8), %r10 movq %rdi, -0x28(%rsp) movq 0x48(%rdi), %rdi cmpq 0x8(%r8), %r10 jae 0x1eb4f25 leaq 0x1(%rcx), %rbx vmovss 0x38(%rsp), %xmm2 imulq $0x38, %rcx, %rax leaq 0x48(%rax), %rdx movq %rdx, -0x10(%rsp) vbroadcastss 0x36d3d(%rip), %xmm5 # 0x1eeba20 vbroadcastss 0x37e98(%rip), %xmm6 # 0x1eecb84 movq %rax, -0x30(%rsp) addq $0x80, %rax movq %rax, -0x20(%rsp) vbroadcastss 0x6c6f7(%rip), %xmm7 # 0x1f213fc vbroadcastss 0x3c2d2(%rip), %xmm8 # 0x1ef0fe0 vxorps %xmm9, %xmm9, %xmm9 vmovaps %xmm0, %xmm4 vmovaps %xmm1, %xmm3 movq %rbx, -0x18(%rsp) movl %r10d, %r13d movq 0xe0(%rsi), %r12 movq 0x18(%r12), %rbp cmpq %rbp, %r13 jae 0x1eb4eb9 movq -0x30(%rsp), %rax movq (%r12,%rax), %r11 movq 0x10(%r12,%rax), %rdx imulq %r13, %rdx vmovaps (%r11,%rdx), %xmm10 vcmpnleps %xmm7, %xmm10, %k1 vcmpltps %xmm8, %xmm10, %k0 {%k1} kmovd %k0, %eax xorl %r15d, %r15d cmpb $0xf, %al jne 0x1eb4dcd vucomiss 0xc(%r11,%rdx), %xmm9 ja 0x1eb4dcd movq -0x10(%rsp), %rax leaq (%r12,%rax), %r14 movq %rcx, %rbx incq %rbx cmpq %rcx, %rbx seta %r15b ja 0x1eb4eb0 movq %rdi, %r11 movq -0x10(%r14), %rdx movq (%r14), %rax imulq %r13, %rax vmovaps (%rdx,%rax), %xmm10 vcmpnleps %xmm7, %xmm10, %k1 vcmpltps %xmm8, %xmm10, %k0 {%k1} kmovd %k0, %edi cmpb $0xf, %dil jne 0x1eb4dca addq $0x38, %r14 vucomiss 0xc(%rdx,%rax), %xmm9 movq %r11, %rdi ja 0x1eb4dcd jmp 0x1eb4d7e movq %r11, %rdi cmpq %rbp, %r13 setb %al testb %r15b, %al je 0x1eb4eb9 movq -0x18(%rsp), %rbx imulq $0x38, %rbx, %rax movq (%r12,%rax), %r11 movq 0x10(%r12,%rax), %rax imulq %r13, %rax vmovaps (%r11,%rax), %xmm10 vcmpnleps %xmm7, %xmm10, %k1 vcmpltps %xmm8, %xmm10, %k0 {%k1} kmovd %k0, %edx xorl %ebp, %ebp cmpb $0xf, %dl jne 0x1eb4ebb vucomiss 0xc(%r11,%rax), %xmm9 vmovaps %xmm6, %xmm10 vmovaps %xmm5, %xmm11 ja 0x1eb4ec3 movq -0x20(%rsp), %rax leaq (%r12,%rax), %r14 movq %rbx, %r15 incq %r15 cmpq %rbx, %r15 ja 0x1eb4e77 movq -0x10(%r14), %r11 movq (%r14), %rdx imulq %r13, %rdx vmovaps (%r11,%rdx), %xmm10 vcmpnleps %xmm7, %xmm10, %k1 vcmpltps %xmm8, %xmm10, %k0 {%k1} kmovd %k0, %eax cmpb $0xf, %al jne 0x1eb4e72 addq $0x38, %r14 vucomiss 0xc(%r11,%rdx), %xmm9 jbe 0x1eb4e36 cmpq %rbx, %r15 jbe 0x1eb4eb9 movq -0x30(%rsp), %rdx movq (%r12,%rdx), %rax movq 0x10(%r12,%rdx), %rdx imulq %r10, %rdx vmovaps (%rax,%rdx), %xmm10 vmovss 0xc(%rax,%rdx), %xmm11 vmulss 0x128(%rsi), %xmm11, %xmm11 vbroadcastss %xmm11, %xmm12 vsubps %xmm12, %xmm10, %xmm11 vaddps %xmm12, %xmm10, %xmm10 movb $0x1, %bpl jmp 0x1eb4ec3 cmpq %rbp, %r13 jb 0x1eb4ddc xorl %ebp, %ebp vmovaps %xmm6, %xmm10 vmovaps %xmm5, %xmm11 testb %bpl, %bpl je 0x1eb4f16 vinsertps $0x30, %xmm2, %xmm11, %xmm11 # xmm11 = xmm11[0,1,2],xmm2[0] vmovd %r10d, %xmm12 vinsertps $0x30, %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[0,1,2],xmm12[0] vminps %xmm11, %xmm0, %xmm0 vmaxps %xmm10, %xmm1, %xmm1 vaddps %xmm11, %xmm10, %xmm12 vminps %xmm12, %xmm4, %xmm4 vmaxps %xmm12, %xmm3, %xmm3 incq %rdi leaq 0x1(%r9), %rax movq -0x8(%rsp), %rdx movq 0x20(%rdx), %rdx shlq $0x5, %r9 vmovaps %xmm11, (%rdx,%r9) vmovaps %xmm10, 0x10(%rdx,%r9) movq %rax, %r9 incq %r10 cmpq 0x8(%r8), %r10 jb 0x1eb4d20 jmp 0x1eb4f2d vmovaps %xmm1, %xmm3 vmovaps %xmm0, %xmm4 movq -0x28(%rsp), %rax vmovaps %xmm0, (%rax) vmovaps %xmm1, 0x10(%rax) vmovaps %xmm4, 0x20(%rax) vmovaps %xmm3, 0x30(%rax) movq %rdi, 0x48(%rax) popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/embree[P]embree/kernels/common/scene_points.h
embree::avx512::PointsISA::createPrimRefArrayMB(embree::PrimRef*, embree::BBox<float> const&, embree::range<unsigned long> const&, unsigned long, unsigned int) const
PrimInfo createPrimRefArrayMB(PrimRef* prims, const BBox1f& time_range, const range<size_t>& r, size_t k, unsigned int geomID) const { PrimInfo pinfo(empty); const BBox1f t0t1 = BBox1f::intersect(getTimeRange(), time_range); if (t0t1.empty()) return pinfo; for (size_t j = r.begin(); j < r.end(); j++) { LBBox3fa lbounds = empty; if (!linearBounds(j, t0t1, lbounds)) continue; const PrimRef prim(lbounds.bounds(), geomID, unsigned(j)); pinfo.add_center2(prim); prims[k++] = prim; } return pinfo; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rdi, %rbx vbroadcastss 0x36ab5(%rip), %xmm0 # 0x1eeba20 vmovaps %xmm0, (%rdi) vbroadcastss 0x37c0c(%rip), %xmm1 # 0x1eecb84 vmovaps %xmm1, 0x10(%rdi) vmovaps %xmm0, 0x20(%rdi) movq %r9, %r13 movq %r8, %r14 movq %rcx, %rbp movq %rdx, (%rsp) movq %rsi, %r12 vmovaps %xmm1, 0x30(%rdi) vxorps %xmm0, %xmm0, %xmm0 vmovaps %xmm0, 0x40(%rdi) movq %rsi, %rdi callq 0x91b664 vmovsd (%rbp), %xmm1 vcmpltps %xmm1, %xmm0, %k1 vblendps $0x2, %xmm0, %xmm1, %xmm2 # xmm2 = xmm1[0],xmm0[1],xmm1[2,3] vinsertps $0x50, %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[1],xmm0[2,3] vmovaps %xmm2, %xmm4 {%k1} vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3] vucomiss %xmm5, %xmm4 ja 0x1eb54fd vmovaps (%rbx), %xmm0 vmovaps 0x10(%rbx), %xmm1 vmovaps 0x20(%rbx), %xmm2 movq (%r14), %rcx vmovaps 0x30(%rbx), %xmm3 movq 0x48(%rbx), %rax cmpq 0x8(%r14), %rcx jae 0x1eb54e6 vmovss 0x40(%rsp), %xmm6 vmovss 0x3b93e(%rip), %xmm7 # 0x1ef0944 vmovss 0x3b932(%rip), %xmm8 # 0x1ef0940 vxorps %xmm9, %xmm9, %xmm9 vbroadcastss 0x6c3e0(%rip), %xmm10 # 0x1f213fc vbroadcastss 0x3bfbb(%rip), %xmm11 # 0x1ef0fe0 vmovss 0x376e7(%rip), %xmm12 # 0x1eec714 vxorps %xmm13, %xmm13, %xmm13 vbroadcastss 0x369e4(%rip), %xmm30 # 0x1eeba20 vbroadcastss 0x37b3e(%rip), %xmm31 # 0x1eecb84 vmovsd 0x2c(%r12), %xmm15 vmovss 0x28(%r12), %xmm14 vmovshdup %xmm15, %xmm16 # xmm16 = xmm15[1,1,3,3] vsubps %xmm15, %xmm16, %xmm16 vsubss %xmm15, %xmm5, %xmm17 vdivss %xmm16, %xmm17, %xmm17 vmulss %xmm7, %xmm17, %xmm17 movl %ecx, %edi movq 0xe0(%r12), %r9 cmpq 0x18(%r9), %rdi jae 0x1eb5158 vmulss %xmm17, %xmm14, %xmm17 vrndscaless $0xa, %xmm17, %xmm17, %xmm17 vminss %xmm14, %xmm17, %xmm17 vcvttss2si %xmm17, %r10d vsubss %xmm15, %xmm4, %xmm15 vdivss %xmm16, %xmm15, %xmm15 vmulss %xmm8, %xmm15, %xmm15 vmulss %xmm15, %xmm14, %xmm15 vroundss $0x9, %xmm15, %xmm15, %xmm15 vmaxss %xmm15, %xmm9, %xmm15 vcvttss2si %xmm15, %edx cmpl %r10d, %edx seta %sil ja 0x1eb515a movslq %edx, %r8 imulq $0x38, %r8, %r11 movq (%r9,%r11), %rbp movq 0x10(%r9,%r11), %rdx imulq %rdi, %rdx vmovaps (%rbp,%rdx), %xmm15 vcmpnleps %xmm10, %xmm15, %k1 vcmpltps %xmm11, %xmm15, %k0 {%k1} kmovd %k0, %r15d cmpb $0xf, %r15b jne 0x1eb515a vucomiss 0xc(%rbp,%rdx), %xmm9 ja 0x1eb515a movslq %r10d, %r10 addq %r11, %r9 addq $0x48, %r9 incq %r8 cmpq %r10, %r8 seta %sil ja 0x1eb515a movq -0x10(%r9), %r11 movq (%r9), %rbp imulq %rdi, %rbp vmovaps (%r11,%rbp), %xmm15 vcmpnleps %xmm10, %xmm15, %k1 vcmpltps %xmm11, %xmm15, %k0 {%k1} kmovd %k0, %edx cmpb $0xf, %dl jne 0x1eb515a addq $0x38, %r9 vucomiss 0xc(%r11,%rbp), %xmm9 ja 0x1eb515a jmp 0x1eb5115 xorl %esi, %esi vmovaps %xmm31, %xmm16 vmovaps %xmm30, %xmm17 vmovaps %xmm31, %xmm15 vmovaps %xmm30, %xmm19 testb %sil, %sil je 0x1eb547a vmovss 0x2c(%r12), %xmm15 vmovss 0x30(%r12), %xmm16 vsubss %xmm15, %xmm4, %xmm17 vsubss %xmm15, %xmm16, %xmm16 vdivss %xmm16, %xmm17, %xmm18 vsubss %xmm15, %xmm5, %xmm15 vdivss %xmm16, %xmm15, %xmm21 vmulss %xmm18, %xmm14, %xmm24 vmulss %xmm21, %xmm14, %xmm22 vrndscaless $0x9, %xmm24, %xmm24, %xmm15 vrndscaless $0xa, %xmm22, %xmm22, %xmm16 vmaxss %xmm9, %xmm15, %xmm25 vminss %xmm14, %xmm16, %xmm23 vcvttss2si %xmm25, %edx vcvttss2si %xmm23, %r11d vcvttss2si %xmm15, %edi testl %edi, %edi vcvttss2si %xmm16, %r9d movl $0xffffffff, %r8d # imm = 0xFFFFFFFF cmovsl %r8d, %edi vcvttss2si %xmm14, %r8d incl %r8d cmpl %r8d, %r9d cmovll %r9d, %r8d movslq %edx, %rdx movq 0xe0(%r12), %r9 imulq $0x38, %rdx, %r10 movq (%r9,%r10), %rdx movq 0x10(%r9,%r10), %r15 imulq %rcx, %r15 vmovaps (%rdx,%r15), %xmm15 vbroadcastss 0x128(%r12), %xmm20 vmulss 0xc(%rdx,%r15), %xmm20, %xmm16 vbroadcastss %xmm16, %xmm16 vsubps %xmm16, %xmm15, %xmm19 vaddps %xmm16, %xmm15, %xmm15 movslq %r11d, %rdx imulq $0x38, %rdx, %r11 movq (%r9,%r11), %rdx movq 0x10(%r9,%r11), %r15 imulq %rcx, %r15 vmovaps (%rdx,%r15), %xmm16 vmulss 0xc(%rdx,%r15), %xmm20, %xmm17 vbroadcastss %xmm17, %xmm26 vsubps %xmm26, %xmm16, %xmm17 vaddps %xmm26, %xmm16, %xmm16 movl %r8d, %edx subl %edi, %edx vsubss %xmm25, %xmm24, %xmm24 cmpl $0x1, %edx jne 0x1eb52fd vmaxss %xmm9, %xmm24, %xmm14 vsubss %xmm14, %xmm12, %xmm18 vbroadcastss %xmm14, %xmm14 vmulps %xmm17, %xmm14, %xmm20 vbroadcastss %xmm18, %xmm18 vfmadd231ps %xmm19, %xmm18, %xmm20 # xmm20 = (xmm18 * xmm19) + xmm20 vmulps %xmm16, %xmm14, %xmm14 vfmadd231ps %xmm18, %xmm15, %xmm14 # xmm14 = (xmm15 * xmm18) + xmm14 vsubss %xmm22, %xmm23, %xmm18 vmaxss %xmm9, %xmm18, %xmm18 vsubss %xmm18, %xmm12, %xmm21 vbroadcastss %xmm18, %xmm18 vmulps %xmm18, %xmm19, %xmm19 vbroadcastss %xmm21, %xmm21 vfmadd213ps %xmm19, %xmm21, %xmm17 # xmm17 = (xmm21 * xmm17) + xmm19 vmulps %xmm18, %xmm15, %xmm15 vfmadd213ps %xmm15, %xmm21, %xmm16 # xmm16 = (xmm21 * xmm16) + xmm15 vmovaps %xmm14, %xmm15 vmovaps %xmm20, %xmm19 jmp 0x1eb547a movq 0x38(%r9,%r10), %rdx movq 0x48(%r9,%r10), %r10 imulq %rcx, %r10 vmovaps (%rdx,%r10), %xmm25 vmulps 0xc(%rdx,%r10){1to4}, %xmm20, %xmm26 vsubps %xmm26, %xmm25, %xmm27 movq -0x38(%r9,%r11), %rdx movq -0x28(%r9,%r11), %r10 imulq %rcx, %r10 vmovaps (%rdx,%r10), %xmm28 vaddps %xmm26, %xmm25, %xmm25 vmulps 0xc(%rdx,%r10){1to4}, %xmm20, %xmm26 vsubps %xmm26, %xmm28, %xmm29 vaddps %xmm26, %xmm28, %xmm26 vmaxss %xmm9, %xmm24, %xmm24 vsubss %xmm24, %xmm12, %xmm28 vbroadcastss %xmm24, %xmm24 vmulps %xmm27, %xmm24, %xmm27 vbroadcastss %xmm28, %xmm28 vfmadd213ps %xmm27, %xmm28, %xmm19 # xmm19 = (xmm28 * xmm19) + xmm27 vmulps %xmm25, %xmm24, %xmm24 vfmadd213ps %xmm24, %xmm28, %xmm15 # xmm15 = (xmm28 * xmm15) + xmm24 vsubss %xmm22, %xmm23, %xmm22 vmaxss %xmm9, %xmm22, %xmm22 vsubss %xmm22, %xmm12, %xmm23 vbroadcastss %xmm22, %xmm22 vmulps %xmm29, %xmm22, %xmm24 vbroadcastss %xmm23, %xmm23 vfmadd213ps %xmm24, %xmm23, %xmm17 # xmm17 = (xmm23 * xmm17) + xmm24 vmulps %xmm26, %xmm22, %xmm22 vfmadd213ps %xmm22, %xmm23, %xmm16 # xmm16 = (xmm23 * xmm16) + xmm22 incl %edi cmpl %r8d, %edi jge 0x1eb547a vsubss %xmm18, %xmm21, %xmm21 movl %edi, %edx imulq $0x38, %rdx, %rdx addq %rdx, %r9 addq $0x10, %r9 vcvtsi2ss %edi, %xmm4, %xmm22 vdivss %xmm14, %xmm22, %xmm22 vsubss %xmm18, %xmm22, %xmm22 vdivss %xmm21, %xmm22, %xmm22 vsubss %xmm22, %xmm12, %xmm23 vbroadcastss %xmm22, %xmm22 vmulps %xmm22, %xmm17, %xmm24 vbroadcastss %xmm23, %xmm23 vfmadd231ps %xmm19, %xmm23, %xmm24 # xmm24 = (xmm23 * xmm19) + xmm24 vmulps %xmm22, %xmm16, %xmm22 vfmadd231ps %xmm23, %xmm15, %xmm22 # xmm22 = (xmm15 * xmm23) + xmm22 movq -0x10(%r9), %rdx movq (%r9), %r10 imulq %rcx, %r10 vmovaps (%rdx,%r10), %xmm23 vmulps 0xc(%rdx,%r10){1to4}, %xmm20, %xmm25 vsubps %xmm25, %xmm23, %xmm26 vsubps %xmm24, %xmm26, %xmm24 vaddps %xmm25, %xmm23, %xmm23 vsubps %xmm22, %xmm23, %xmm22 vminps %xmm13, %xmm24, %xmm23 vmaxps %xmm13, %xmm22, %xmm22 vaddps %xmm23, %xmm19, %xmm19 vaddps %xmm23, %xmm17, %xmm17 vaddps %xmm22, %xmm15, %xmm15 vaddps %xmm22, %xmm16, %xmm16 incl %edi addq $0x38, %r9 cmpl %edi, %r8d jne 0x1eb53d3 testb %sil, %sil je 0x1eb54d9 vminps %xmm17, %xmm19, %xmm14 vinsertps $0x30, %xmm6, %xmm14, %xmm14 # xmm14 = xmm14[0,1,2],xmm6[0] vmaxps %xmm16, %xmm15, %xmm15 vmovd %ecx, %xmm16 vinsertps $0x30, %xmm16, %xmm15, %xmm15 # xmm15 = xmm15[0,1,2],xmm16[0] vminps %xmm14, %xmm0, %xmm0 vmaxps %xmm15, %xmm1, %xmm1 vaddps %xmm15, %xmm14, %xmm16 vminps %xmm16, %xmm2, %xmm2 vmaxps %xmm16, %xmm3, %xmm3 incq %rax leaq 0x1(%r13), %rdx shlq $0x5, %r13 movq (%rsp), %rsi vmovaps %xmm14, (%rsi,%r13) vmovaps %xmm15, 0x10(%rsi,%r13) movq %rdx, %r13 incq %rcx cmpq 0x8(%r14), %rcx jb 0x1eb5046 vmovaps %xmm0, (%rbx) vmovaps %xmm1, 0x10(%rbx) vmovaps %xmm2, 0x20(%rbx) vmovaps %xmm3, 0x30(%rbx) movq %rax, 0x48(%rbx) movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq nop
/embree[P]embree/kernels/common/scene_points.h
embree::avx512::PointsISA::createPrimRefMBArray(embree::vector_t<embree::PrimRefMB, embree::aligned_monitored_allocator<embree::PrimRefMB, 16ul>>&, embree::BBox<float> const&, embree::range<unsigned long> const&, unsigned long, unsigned int) const
PrimInfoMB createPrimRefMBArray(mvector<PrimRefMB>& prims, const BBox1f& t0t1, const range<size_t>& r, size_t k, unsigned int geomID) const { PrimInfoMB pinfo(empty); for (size_t j = r.begin(); j < r.end(); j++) { if (!valid(j, timeSegmentRange(t0t1))) continue; const PrimRefMB prim(linearBounds(j, t0t1), this->numTimeSegments(), this->time_range, this->numTimeSegments(), geomID, unsigned(j)); pinfo.add_primref(prim); prims[k++] = prim; } return pinfo; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdi, %rax vbroadcastss 0x364fa(%rip), %xmm0 # 0x1eeba20 vmovaps %xmm0, (%rdi) vbroadcastss 0x37651(%rip), %xmm5 # 0x1eecb84 vmovaps %xmm5, 0x10(%rdi) vmovaps %xmm0, 0x20(%rdi) vmovaps %xmm5, 0x30(%rdi) vmovaps %xmm0, 0x40(%rdi) movq %rcx, -0x20(%rsp) movq %r8, %rcx movq %rdx, -0x8(%rsp) vmovaps %xmm5, 0x50(%rdi) vxorps %xmm2, %xmm2, %xmm2 vmovups %ymm2, 0x60(%rdi) xorl %edx, %edx movl %edx, 0x80(%rdi) vbroadcastss 0x371a1(%rip), %xmm2 # 0x1eec714 vmovlps %xmm2, 0x84(%rdi) movl %edx, 0x8c(%rdi) movq (%r8), %r11 vmovsd 0x88(%rdi), %xmm4 movq 0x70(%rdi), %r8 movq 0x78(%rdi), %rbx vmovss 0x80(%rdi), %xmm3 vmovss 0x84(%rdi), %xmm2 cmpq 0x8(%rcx), %r11 jae 0x1eb5c0e vmovss 0x3b38e(%rip), %xmm10 # 0x1ef0944 movq %rax, -0x10(%rsp) movq 0x68(%rax), %rax movq %rax, -0x18(%rsp) vxorps %xmm12, %xmm12, %xmm12 vmovss 0x37143(%rip), %xmm13 # 0x1eec714 vbroadcastss 0x375a6(%rip), %xmm14 # 0x1eecb80 vxorps %xmm15, %xmm15, %xmm15 vbroadcastss 0x3b9f7(%rip), %xmm17 # 0x1ef0fe0 vmovaps %xmm0, %xmm8 vmovaps %xmm5, %xmm7 vmovaps %xmm0, %xmm6 vmovaps %xmm5, %xmm31 movq -0x20(%rsp), %rax vmovsd 0x2c(%rsi), %xmm9 vmovss 0x4(%rax), %xmm19 vmovshdup %xmm9, %xmm18 # xmm18 = xmm9[1,1,3,3] vsubss %xmm9, %xmm18, %xmm21 vsubss %xmm9, %xmm19, %xmm18 vdivss %xmm21, %xmm18, %xmm18 vmulss %xmm10, %xmm18, %xmm22 movl %r11d, %r14d movq 0xe0(%rsi), %r13 cmpq 0x18(%r13), %r14 jae 0x1eb5be9 vmovss 0x28(%rsi), %xmm18 vmovss (%rax), %xmm20 vmulss %xmm22, %xmm18, %xmm22 vrndscaless $0xa, %xmm22, %xmm22, %xmm22 vminss %xmm18, %xmm22, %xmm22 vcvttss2si %xmm22, %ebp vsubss %xmm9, %xmm20, %xmm9 vdivss %xmm21, %xmm9, %xmm9 vmulss 0x3b2c8(%rip), %xmm9, %xmm9 # 0x1ef0940 vmulss %xmm9, %xmm18, %xmm9 vroundss $0x9, %xmm9, %xmm9, %xmm9 vmaxss %xmm9, %xmm12, %xmm9 vcvttss2si %xmm9, %edx cmpl %ebp, %edx seta %r15b ja 0x1eb5753 movq %rcx, %rax movq %r9, %rcx movq %r8, %r9 movq %rbx, %r8 movslq %edx, %r12 imulq $0x38, %r12, %r10 movq (%r13,%r10), %rdx movq 0x10(%r13,%r10), %rbx imulq %r14, %rbx vmovaps (%rdx,%rbx), %xmm9 vcmpnleps 0x6bd31(%rip){1to4}, %xmm9, %k1 # 0x1f213fc vcmpltps %xmm17, %xmm9, %k0 {%k1} kmovd %k0, %edi cmpb $0xf, %dil jne 0x1eb5bf8 vucomiss 0xc(%rdx,%rbx), %xmm12 movq %r8, %rbx movq %r9, %r8 movq %rcx, %r9 movq %rax, %rcx movq -0x20(%rsp), %rax ja 0x1eb574a movslq %ebp, %rbp addq %r10, %r13 addq $0x48, %r13 incq %r12 cmpq %rbp, %r12 seta %r15b ja 0x1eb5753 movq -0x10(%r13), %r10 movq (%r13), %rdx imulq %r14, %rdx vmovaps (%r10,%rdx), %xmm9 vcmpnleps 0x6bcd0(%rip){1to4}, %xmm9, %k1 # 0x1f213fc vcmpltps %xmm17, %xmm9, %k0 {%k1} kmovd %k0, %edi cmpb $0xf, %dil jne 0x1eb574a addq $0x38, %r13 vucomiss 0xc(%r10,%rdx), %xmm12 jbe 0x1eb5703 testb %r15b, %r15b je 0x1eb5be9 vmovaps %xmm4, %xmm1 vmovss 0x2c(%rsi), %xmm9 vmovss 0x30(%rsi), %xmm21 vsubss %xmm9, %xmm20, %xmm20 vsubss %xmm9, %xmm21, %xmm21 vdivss %xmm21, %xmm20, %xmm22 vsubss %xmm9, %xmm19, %xmm9 vdivss %xmm21, %xmm9, %xmm25 vmulss %xmm22, %xmm18, %xmm9 vmulss %xmm25, %xmm18, %xmm26 vrndscaless $0x9, %xmm9, %xmm9, %xmm19 vrndscaless $0xa, %xmm26, %xmm26, %xmm20 vmaxss %xmm12, %xmm19, %xmm29 vminss %xmm18, %xmm20, %xmm4 vcvttss2si %xmm29, %edx vcvttss2si %xmm4, %edi vcvttss2si %xmm19, %r14d testl %r14d, %r14d vcvttss2si %xmm20, %r10d movl $0xffffffff, %ebp # imm = 0xFFFFFFFF cmovsl %ebp, %r14d vcvttss2si %xmm18, %r15d incl %r15d cmpl %r15d, %r10d cmovll %r10d, %r15d movslq %edx, %rdx movq 0xe0(%rsi), %r12 imulq $0x38, %rdx, %r13 movq (%r12,%r13), %rdx movq 0x10(%r12,%r13), %r10 imulq %r11, %r10 vmovaps (%rdx,%r10), %xmm19 vbroadcastss 0x128(%rsi), %xmm24 vmulss 0xc(%rdx,%r10), %xmm24, %xmm20 vbroadcastss %xmm20, %xmm20 vsubps %xmm20, %xmm19, %xmm23 vaddps %xmm20, %xmm19, %xmm19 movslq %edi, %rdx imulq $0x38, %rdx, %rbp movq (%r12,%rbp), %rdx movq 0x10(%r12,%rbp), %rdi imulq %r11, %rdi vmovaps (%rdx,%rdi), %xmm20 vmulss 0xc(%rdx,%rdi), %xmm24, %xmm21 vbroadcastss %xmm21, %xmm30 vsubps %xmm30, %xmm20, %xmm21 vaddps %xmm30, %xmm20, %xmm20 movl %r15d, %edx subl %r14d, %edx vsubss %xmm29, %xmm9, %xmm9 cmpl $0x1, %edx jne 0x1eb58d7 vmaxss %xmm12, %xmm9, %xmm9 vsubss %xmm9, %xmm13, %xmm18 vbroadcastss %xmm9, %xmm9 vmulps %xmm21, %xmm9, %xmm22 vbroadcastss %xmm18, %xmm18 vfmadd231ps %xmm23, %xmm18, %xmm22 # xmm22 = (xmm18 * xmm23) + xmm22 vmulps %xmm20, %xmm9, %xmm9 vfmadd231ps %xmm18, %xmm19, %xmm9 # xmm9 = (xmm19 * xmm18) + xmm9 vsubss %xmm26, %xmm4, %xmm18 vmaxss %xmm12, %xmm18, %xmm18 vsubss %xmm18, %xmm13, %xmm24 vbroadcastss %xmm18, %xmm18 vmulps %xmm18, %xmm23, %xmm23 vbroadcastss %xmm24, %xmm24 vfmadd213ps %xmm23, %xmm24, %xmm21 # xmm21 = (xmm24 * xmm21) + xmm23 vmulps %xmm18, %xmm19, %xmm18 vfmadd213ps %xmm18, %xmm24, %xmm20 # xmm20 = (xmm24 * xmm20) + xmm18 vmovaps %xmm9, %xmm19 vmovaps %xmm22, %xmm23 jmp 0x1eb5af1 vmovaps %xmm13, %xmm27 vmovaps %xmm5, %xmm13 vmovaps %xmm0, %xmm16 vxorps %xmm0, %xmm0, %xmm0 vmovaps %xmm8, %xmm12 vmovaps %xmm7, %xmm8 vmovaps %xmm6, %xmm7 vmovaps %xmm31, %xmm6 vmovaps %xmm3, %xmm5 vmovaps %xmm2, %xmm3 vmovaps %xmm10, %xmm2 movq 0x38(%r12,%r13), %rdx movq 0x48(%r12,%r13), %rdi imulq %r11, %rdi vmovaps (%rdx,%rdi), %xmm29 vmulps 0xc(%rdx,%rdi){1to4}, %xmm24, %xmm30 vsubps %xmm30, %xmm29, %xmm31 movq -0x38(%r12,%rbp), %rdx movq -0x28(%r12,%rbp), %rdi imulq %r11, %rdi vmovaps (%rdx,%rdi), %xmm28 vaddps %xmm30, %xmm29, %xmm29 vmulps 0xc(%rdx,%rdi){1to4}, %xmm24, %xmm30 vsubps %xmm30, %xmm28, %xmm11 vaddps %xmm30, %xmm28, %xmm28 vmaxss %xmm0, %xmm9, %xmm9 vsubss %xmm9, %xmm27, %xmm30 vbroadcastss %xmm9, %xmm9 vmulps %xmm31, %xmm9, %xmm31 vbroadcastss %xmm30, %xmm30 vfmadd213ps %xmm31, %xmm30, %xmm23 # xmm23 = (xmm30 * xmm23) + xmm31 vmulps %xmm29, %xmm9, %xmm9 vfmadd213ps %xmm9, %xmm30, %xmm19 # xmm19 = (xmm30 * xmm19) + xmm9 vsubss %xmm26, %xmm4, %xmm9 vmaxss %xmm0, %xmm9, %xmm9 vmovaps %xmm27, %xmm29 vsubss %xmm9, %xmm27, %xmm26 vbroadcastss %xmm9, %xmm9 vmulps %xmm11, %xmm9, %xmm11 vbroadcastss %xmm26, %xmm26 vfmadd213ps %xmm11, %xmm26, %xmm21 # xmm21 = (xmm26 * xmm21) + xmm11 vmulps %xmm28, %xmm9, %xmm9 vfmadd213ps %xmm9, %xmm26, %xmm20 # xmm20 = (xmm26 * xmm20) + xmm9 incl %r14d cmpl %r15d, %r14d jge 0x1eb5abd vsubss %xmm22, %xmm25, %xmm25 movl %r14d, %edx imulq $0x38, %rdx, %rdx addq %rdx, %r12 addq $0x10, %r12 vmovaps %xmm2, %xmm10 vmovaps %xmm3, %xmm2 vmovaps %xmm5, %xmm3 vmovaps %xmm6, %xmm31 vmovaps %xmm7, %xmm6 vmovaps %xmm8, %xmm7 vmovaps %xmm12, %xmm8 vxorps %xmm12, %xmm12, %xmm12 vmovaps %xmm16, %xmm0 vmovaps %xmm13, %xmm5 vmovaps %xmm29, %xmm13 vmovaps %xmm1, %xmm4 vcvtsi2ss %r14d, %xmm14, %xmm9 vdivss %xmm18, %xmm9, %xmm9 vsubss %xmm22, %xmm9, %xmm9 vdivss %xmm25, %xmm9, %xmm9 vsubss %xmm9, %xmm13, %xmm11 vbroadcastss %xmm9, %xmm9 vmulps %xmm9, %xmm21, %xmm26 vbroadcastss %xmm11, %xmm11 vfmadd231ps %xmm23, %xmm11, %xmm26 # xmm26 = (xmm11 * xmm23) + xmm26 vmulps %xmm9, %xmm20, %xmm9 vfmadd231ps %xmm11, %xmm19, %xmm9 # xmm9 = (xmm19 * xmm11) + xmm9 movq -0x10(%r12), %rdx movq (%r12), %rdi imulq %r11, %rdi vmovaps (%rdx,%rdi), %xmm11 vmulps 0xc(%rdx,%rdi){1to4}, %xmm24, %xmm27 vsubps %xmm27, %xmm11, %xmm28 vsubps %xmm26, %xmm28, %xmm26 vaddps %xmm27, %xmm11, %xmm11 vsubps %xmm9, %xmm11, %xmm9 vminps %xmm15, %xmm26, %xmm11 vmaxps %xmm15, %xmm9, %xmm9 vaddps %xmm11, %xmm23, %xmm23 vaddps %xmm11, %xmm21, %xmm21 vaddps %xmm9, %xmm19, %xmm19 vaddps %xmm9, %xmm20, %xmm20 incl %r14d addq $0x38, %r12 cmpl %r14d, %r15d jne 0x1eb5a19 jmp 0x1eb5af5 vmovaps %xmm2, %xmm10 vmovaps %xmm3, %xmm2 vmovaps %xmm5, %xmm3 vmovaps %xmm6, %xmm31 vmovaps %xmm7, %xmm6 vmovaps %xmm8, %xmm7 vmovaps %xmm12, %xmm8 vxorps %xmm12, %xmm12, %xmm12 vmovaps %xmm16, %xmm0 vmovaps %xmm13, %xmm5 vmovaps %xmm29, %xmm13 vmovaps %xmm1, %xmm4 vmovss 0x38(%rsp), %xmm1 vinsertps $0x30, %xmm1, %xmm23, %xmm11 # xmm11 = xmm23[0,1,2],xmm1[0] movl 0x24(%rsi), %r14d decl %r14d vmovsd 0x2c(%rsi), %xmm9 vmovd %r11d, %xmm18 vinsertps $0x30, %xmm18, %xmm19, %xmm18 # xmm18 = xmm19[0,1,2],xmm18[0] vmovd %r14d, %xmm19 vinsertps $0x30, %xmm19, %xmm21, %xmm21 # xmm21 = xmm21[0,1,2],xmm19[0] vinsertps $0x30, %xmm19, %xmm20, %xmm19 # xmm19 = xmm20[0,1,2],xmm19[0] vmulps %xmm14, %xmm21, %xmm20 vfmadd231ps %xmm14, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm14) + xmm20 vmulps %xmm14, %xmm19, %xmm22 vfmadd231ps %xmm14, %xmm18, %xmm22 # xmm22 = (xmm18 * xmm14) + xmm22 vaddps %xmm22, %xmm20, %xmm20 vminps %xmm11, %xmm0, %xmm0 vmaxps %xmm18, %xmm5, %xmm5 vminps %xmm21, %xmm8, %xmm8 vmaxps %xmm19, %xmm7, %xmm7 vminps %xmm20, %xmm6, %xmm6 vmaxps %xmm20, %xmm31, %xmm31 vcmpltps %xmm9, %xmm4, %k1 vinsertps $0x50, %xmm9, %xmm4, %xmm20 # xmm20 = xmm4[0],xmm9[1],xmm4[2,3] vblendps $0x2, %xmm4, %xmm9, %xmm4 # xmm4 = xmm9[0],xmm4[1],xmm9[2,3] vmovaps %xmm20, %xmm4 {%k1} incq -0x18(%rsp) addq %r14, %r8 cmpq %r14, %rbx setb %dl vmovshdup %xmm9, %xmm20 # xmm20 = xmm9[1,1,3,3] kmovd %edx, %k1 vmovss %xmm20, %xmm2, %xmm2 {%k1} vmovss %xmm9, %xmm3, %xmm3 {%k1} cmovbeq %r14, %rbx movq -0x8(%rsp), %rdx movq 0x20(%rdx), %rdx leaq (%r9,%r9,4), %rdi incq %r9 shlq $0x4, %rdi vmovaps %xmm11, (%rdx,%rdi) vmovaps %xmm18, 0x10(%rdx,%rdi) vmovaps %xmm21, 0x20(%rdx,%rdi) vmovaps %xmm19, 0x30(%rdx,%rdi) vmovsd %xmm9, 0x40(%rdx,%rdi) incq %r11 cmpq 0x8(%rcx), %r11 jb 0x1eb5600 jmp 0x1eb5c22 movq %r8, %rbx movq %r9, %r8 movq %rcx, %r9 movq %rax, %rcx movq -0x20(%rsp), %rax jmp 0x1eb574a vmovaps %xmm5, %xmm31 vmovaps %xmm0, %xmm6 vmovaps %xmm5, %xmm7 vmovaps %xmm0, %xmm8 jmp 0x1eb5c30 movq -0x10(%rsp), %rax movq -0x18(%rsp), %rcx movq %rcx, 0x68(%rax) vmovaps %xmm0, (%rax) vmovaps %xmm5, 0x10(%rax) vmovaps %xmm8, 0x20(%rax) vmovaps %xmm7, 0x30(%rax) vmovaps %xmm6, 0x40(%rax) vmovaps %xmm31, 0x50(%rax) vmovlps %xmm4, 0x88(%rax) movq %r8, 0x70(%rax) movq %rbx, 0x78(%rax) vmovss %xmm3, 0x80(%rax) vmovss %xmm2, 0x84(%rax) popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq nop
/embree[P]embree/kernels/common/scene_points.h
embree::avx512::PointsISA::vbounds(unsigned long) const
__forceinline const T& operator [](size_t i) const { assert(i<num); return *(T*)(ptr_ofs + i*stride); }
movq 0x58(%rsi), %rcx imulq 0x68(%rsi), %rdx vmovaps (%rcx,%rdx), %xmm0 movq %rdi, %rax vmovss 0xc(%rcx,%rdx), %xmm1 vmulss 0x128(%rsi), %xmm1, %xmm1 vbroadcastss %xmm1, %xmm1 vsubps %xmm1, %xmm0, %xmm2 vaddps %xmm1, %xmm0, %xmm0 vmovaps %xmm2, (%rdi) vmovaps %xmm0, 0x10(%rdi) retq
/embree[P]embree/kernels/common/buffer.h
embree::avx512::PointsISA::vbounds(embree::LinearSpace3<embree::Vec3fa> const&, unsigned long) const
BBox3fa vbounds(const LinearSpace3fa& space, size_t i) const { return bounds(space, i); }
movq %rdi, %rax movq 0x58(%rsi), %rdi imulq 0x68(%rsi), %rcx vbroadcastss (%rdi,%rcx), %xmm0 vbroadcastss 0x4(%rdi,%rcx), %xmm1 vbroadcastss 0x8(%rdi,%rcx), %xmm2 vmulps 0x20(%rdx), %xmm2, %xmm2 vfmadd231ps 0x10(%rdx), %xmm1, %xmm2 # xmm2 = (xmm1 * mem) + xmm2 vfmadd231ps (%rdx), %xmm0, %xmm2 # xmm2 = (xmm0 * mem) + xmm2 vbroadcastss 0xc(%rdi,%rcx), %xmm0 vblendps $0x8, %xmm0, %xmm2, %xmm1 # xmm1 = xmm2[0,1,2],xmm0[3] vmulss 0x128(%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %xmm0 vsubps %xmm0, %xmm1, %xmm2 vaddps %xmm0, %xmm1, %xmm0 vmovaps %xmm2, (%rax) vmovaps %xmm0, 0x10(%rax) retq
/embree[P]embree/kernels/common/scene_points.h
embree::avx512::PointsISA::vlinearBounds(unsigned long, embree::BBox<float> const&) const
__forceinline LBBox3fa linearBounds(size_t primID, const BBox1f& dt) const { return LBBox3fa([&](size_t itime) { return bounds(primID, itime); }, dt, time_range, fnumTimeSegments); }
pushq %rbx vmovss 0x28(%rsi), %xmm0 vmovss 0x2c(%rsi), %xmm2 vmovss (%rcx), %xmm1 vmovss 0x4(%rcx), %xmm3 vmovss 0x30(%rsi), %xmm4 vsubss %xmm2, %xmm1, %xmm1 vsubss %xmm2, %xmm4, %xmm4 vdivss %xmm4, %xmm1, %xmm1 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm4, %xmm2, %xmm7 vmulss %xmm1, %xmm0, %xmm11 vmulss %xmm7, %xmm0, %xmm8 vroundss $0x9, %xmm11, %xmm11, %xmm2 vroundss $0xa, %xmm8, %xmm8, %xmm3 vxorps %xmm9, %xmm9, %xmm9 vmaxss %xmm9, %xmm2, %xmm12 vminss %xmm0, %xmm3, %xmm10 vcvttss2si %xmm12, %eax vcvttss2si %xmm10, %r11d vcvttss2si %xmm2, %r8d testl %r8d, %r8d movl $0xffffffff, %ecx # imm = 0xFFFFFFFF vcvttss2si %xmm3, %r9d cmovnsl %r8d, %ecx vcvttss2si %xmm0, %r8d incl %r8d cmpl %r8d, %r9d cmovll %r9d, %r8d cltq movq 0xe0(%rsi), %r9 imulq $0x38, %rax, %r10 movq (%r9,%r10), %rax movq 0x10(%r9,%r10), %rbx imulq %rdx, %rbx vmovaps (%rax,%rbx), %xmm2 vbroadcastss 0x128(%rsi), %xmm6 vmulss 0xc(%rax,%rbx), %xmm6, %xmm3 movq %rdi, %rax vbroadcastss %xmm3, %xmm3 vsubps %xmm3, %xmm2, %xmm4 movslq %r11d, %rsi imulq $0x38, %rsi, %rsi movq (%r9,%rsi), %rdi movq 0x10(%r9,%rsi), %r11 imulq %rdx, %r11 vmovaps (%rdi,%r11), %xmm13 vmulss 0xc(%rdi,%r11), %xmm6, %xmm14 vaddps %xmm3, %xmm2, %xmm5 vbroadcastss %xmm14, %xmm2 vsubps %xmm2, %xmm13, %xmm3 vaddps %xmm2, %xmm13, %xmm2 movl %r8d, %edi subl %ecx, %edi cmpl $0x1, %edi jne 0x1eb5e8b vsubss %xmm12, %xmm11, %xmm0 vmaxss %xmm9, %xmm0, %xmm0 vmovss 0x368e1(%rip), %xmm1 # 0x1eec714 vsubss %xmm0, %xmm1, %xmm6 vbroadcastss %xmm0, %xmm0 vmulps %xmm3, %xmm0, %xmm7 vbroadcastss %xmm6, %xmm6 vfmadd231ps %xmm4, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm4) + xmm7 vmulps %xmm2, %xmm0, %xmm0 vmovaps %xmm7, (%rax) vfmadd231ps %xmm6, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm6) + xmm0 vmovaps %xmm0, 0x10(%rax) vsubss %xmm8, %xmm10, %xmm0 vmaxss %xmm9, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm0, %xmm4, %xmm4 vbroadcastss %xmm1, %xmm1 vfmadd213ps %xmm4, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm4 vmulps %xmm0, %xmm5, %xmm0 vfmadd213ps %xmm0, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm0 jmp 0x1eb5ff2 movq 0x38(%r9,%r10), %rdi movq 0x48(%r9,%r10), %r10 imulq %rdx, %r10 vmovaps (%rdi,%r10), %xmm13 vmulps 0xc(%rdi,%r10){1to4}, %xmm6, %xmm14 vsubps %xmm14, %xmm13, %xmm15 vaddps %xmm14, %xmm13, %xmm13 movq -0x38(%r9,%rsi), %rdi movq -0x28(%r9,%rsi), %rsi imulq %rdx, %rsi vmovaps (%rdi,%rsi), %xmm14 vmulps 0xc(%rdi,%rsi){1to4}, %xmm6, %xmm16 vsubps %xmm16, %xmm14, %xmm17 vaddps %xmm16, %xmm14, %xmm14 vsubss %xmm12, %xmm11, %xmm11 vmaxss %xmm9, %xmm11, %xmm12 vmovss 0x3682a(%rip), %xmm11 # 0x1eec714 vsubss %xmm12, %xmm11, %xmm16 vbroadcastss %xmm12, %xmm12 vmulps %xmm15, %xmm12, %xmm15 vbroadcastss %xmm16, %xmm16 vfmadd213ps %xmm15, %xmm16, %xmm4 # xmm4 = (xmm16 * xmm4) + xmm15 vmulps %xmm13, %xmm12, %xmm12 vfmadd213ps %xmm12, %xmm16, %xmm5 # xmm5 = (xmm16 * xmm5) + xmm12 vsubss %xmm8, %xmm10, %xmm8 vmaxss %xmm9, %xmm8, %xmm8 vsubss %xmm8, %xmm11, %xmm9 vbroadcastss %xmm8, %xmm8 vmulps %xmm17, %xmm8, %xmm10 vbroadcastss %xmm9, %xmm9 vfmadd213ps %xmm10, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm3) + xmm10 vmulps %xmm14, %xmm8, %xmm8 vfmadd213ps %xmm8, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm2) + xmm8 incl %ecx cmpl %r8d, %ecx jge 0x1eb5fe9 vsubss %xmm1, %xmm7, %xmm7 movl %ecx, %esi imulq $0x38, %rsi, %rsi addq %r9, %rsi addq $0x10, %rsi vxorps %xmm8, %xmm8, %xmm8 vcvtsi2ss %ecx, %xmm18, %xmm9 vdivss %xmm0, %xmm9, %xmm9 vsubss %xmm1, %xmm9, %xmm9 vdivss %xmm7, %xmm9, %xmm9 vsubss %xmm9, %xmm11, %xmm10 vbroadcastss %xmm9, %xmm9 vmulps %xmm3, %xmm9, %xmm12 vbroadcastss %xmm10, %xmm10 vfmadd231ps %xmm4, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm4) + xmm12 vmulps %xmm2, %xmm9, %xmm9 vfmadd231ps %xmm10, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm10) + xmm9 movq -0x10(%rsi), %rdi movq (%rsi), %r9 imulq %rdx, %r9 vmovaps (%rdi,%r9), %xmm10 vmulps 0xc(%rdi,%r9){1to4}, %xmm6, %xmm13 vsubps %xmm13, %xmm10, %xmm14 vsubps %xmm12, %xmm14, %xmm12 vaddps %xmm13, %xmm10, %xmm10 vsubps %xmm9, %xmm10, %xmm9 vminps %xmm8, %xmm12, %xmm10 vmaxps %xmm8, %xmm9, %xmm9 vaddps %xmm4, %xmm10, %xmm4 vaddps %xmm3, %xmm10, %xmm3 vaddps %xmm5, %xmm9, %xmm5 vaddps %xmm2, %xmm9, %xmm2 incl %ecx addq $0x38, %rsi cmpl %ecx, %r8d jne 0x1eb5f60 vmovaps %xmm4, (%rax) vmovaps %xmm5, 0x10(%rax) vmovaps %xmm3, 0x20(%rax) vmovaps %xmm2, 0x30(%rax) popq %rbx retq
/embree[P]embree/kernels/common/scene_points.h
embree::avx512::PointsISA::vlinearBounds(embree::LinearSpace3<embree::Vec3fa> const&, unsigned long, embree::BBox<float> const&) const
__forceinline LBBox3fa linearBounds(const LinearSpace3fa& space, size_t primID, const BBox1f& dt) const { return LBBox3fa([&](size_t itime) { return bounds(space, primID, itime); }, dt, time_range, fnumTimeSegments); }
pushq %r15 pushq %r14 pushq %rbx vmovss 0x28(%rsi), %xmm0 vmovss 0x2c(%rsi), %xmm2 vmovss (%r8), %xmm1 vmovss 0x4(%r8), %xmm3 vmovss 0x30(%rsi), %xmm4 vsubss %xmm2, %xmm1, %xmm1 vsubss %xmm2, %xmm4, %xmm4 vdivss %xmm4, %xmm1, %xmm1 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm4, %xmm2, %xmm17 vmulss %xmm1, %xmm0, %xmm14 vmulss %xmm17, %xmm0, %xmm19 vroundss $0x9, %xmm14, %xmm14, %xmm2 vrndscaless $0xa, %xmm19, %xmm19, %xmm3 vxorps %xmm16, %xmm16, %xmm16 vmaxss %xmm16, %xmm2, %xmm15 vminss %xmm0, %xmm3, %xmm13 vcvttss2si %xmm15, %eax vcvttss2si %xmm13, %ebx vcvttss2si %xmm2, %r9d testl %r9d, %r9d movl $0xffffffff, %r8d # imm = 0xFFFFFFFF vcvttss2si %xmm3, %r10d cmovnsl %r9d, %r8d vcvttss2si %xmm0, %r9d incl %r9d cmpl %r9d, %r10d cmovll %r10d, %r9d cltq movq 0xe0(%rsi), %r10 imulq $0x38, %rax, %r11 movq (%r10,%r11), %r14 movq 0x10(%r10,%r11), %r15 imulq %rcx, %r15 vmovaps (%rdx), %xmm6 vmovaps 0x10(%rdx), %xmm7 vmovaps 0x20(%rdx), %xmm8 vmulps 0x8(%r14,%r15){1to4}, %xmm8, %xmm2 vfmadd231ps 0x4(%r14,%r15){1to4}, %xmm7, %xmm2 # xmm2 = (xmm7 * mem) + xmm2 vfmadd231ps (%r14,%r15){1to4}, %xmm6, %xmm2 # xmm2 = (xmm6 * mem) + xmm2 movq %rdi, %rax vbroadcastss 0xc(%r14,%r15), %xmm3 vblendps $0x8, %xmm3, %xmm2, %xmm4 # xmm4 = xmm2[0,1,2],xmm3[3] vbroadcastss 0x128(%rsi), %xmm9 vmulss %xmm3, %xmm9, %xmm2 vbroadcastss %xmm2, %xmm3 vsubps %xmm3, %xmm4, %xmm2 movslq %ebx, %rdx imulq $0x38, %rdx, %rdx movq (%r10,%rdx), %rsi movq 0x10(%r10,%rdx), %rdi imulq %rcx, %rdi vmulps 0x8(%rsi,%rdi){1to4}, %xmm8, %xmm12 vfmadd231ps 0x4(%rsi,%rdi){1to4}, %xmm7, %xmm12 # xmm12 = (xmm7 * mem) + xmm12 vaddps %xmm3, %xmm4, %xmm5 vfmadd231ps (%rsi,%rdi){1to4}, %xmm6, %xmm12 # xmm12 = (xmm6 * mem) + xmm12 vbroadcastss 0xc(%rsi,%rdi), %xmm3 vblendps $0x8, %xmm3, %xmm12, %xmm12 # xmm12 = xmm12[0,1,2],xmm3[3] vmulss %xmm3, %xmm9, %xmm3 vbroadcastss %xmm3, %xmm3 vsubps %xmm3, %xmm12, %xmm4 vaddps %xmm3, %xmm12, %xmm3 movl %r9d, %esi subl %r8d, %esi cmpl $0x1, %esi jne 0x1eb61b1 vsubss %xmm15, %xmm14, %xmm0 vmaxss %xmm16, %xmm0, %xmm0 vmovss 0x365bd(%rip), %xmm1 # 0x1eec714 vsubss %xmm0, %xmm1, %xmm6 vbroadcastss %xmm0, %xmm0 vmulps %xmm4, %xmm0, %xmm7 vbroadcastss %xmm6, %xmm6 vfmadd231ps %xmm2, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm2) + xmm7 vmulps %xmm3, %xmm0, %xmm0 vmovaps %xmm7, (%rax) vfmadd231ps %xmm6, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm6) + xmm0 vmovaps %xmm0, 0x10(%rax) vsubss %xmm19, %xmm13, %xmm0 vmaxss %xmm16, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm1 vbroadcastss %xmm0, %xmm0 vmulps %xmm2, %xmm0, %xmm2 vbroadcastss %xmm1, %xmm1 vfmadd213ps %xmm2, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm4) + xmm2 vmulps %xmm5, %xmm0, %xmm0 vfmadd213ps %xmm0, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm0 jmp 0x1eb6378 movq 0x38(%r10,%r11), %rsi movq 0x48(%r10,%r11), %rdi imulq %rcx, %rdi vmulps 0x8(%rsi,%rdi){1to4}, %xmm8, %xmm12 vfmadd231ps 0x4(%rsi,%rdi){1to4}, %xmm7, %xmm12 # xmm12 = (xmm7 * mem) + xmm12 vfmadd231ps (%rsi,%rdi){1to4}, %xmm6, %xmm12 # xmm12 = (xmm6 * mem) + xmm12 vbroadcastss 0xc(%rsi,%rdi), %xmm10 vblendps $0x8, %xmm10, %xmm12, %xmm12 # xmm12 = xmm12[0,1,2],xmm10[3] vmulps %xmm10, %xmm9, %xmm10 vsubps %xmm10, %xmm12, %xmm18 vaddps %xmm12, %xmm10, %xmm10 movq -0x38(%r10,%rdx), %rsi movq -0x28(%r10,%rdx), %rdx imulq %rcx, %rdx vmulps 0x8(%rsi,%rdx){1to4}, %xmm8, %xmm12 vfmadd231ps 0x4(%rsi,%rdx){1to4}, %xmm7, %xmm12 # xmm12 = (xmm7 * mem) + xmm12 vfmadd231ps (%rsi,%rdx){1to4}, %xmm6, %xmm12 # xmm12 = (xmm6 * mem) + xmm12 vbroadcastss 0xc(%rsi,%rdx), %xmm11 vblendps $0x8, %xmm11, %xmm12, %xmm12 # xmm12 = xmm12[0,1,2],xmm11[3] vmulps %xmm11, %xmm9, %xmm11 vsubps %xmm11, %xmm12, %xmm20 vaddps %xmm12, %xmm11, %xmm11 vsubss %xmm15, %xmm14, %xmm12 vmaxss %xmm16, %xmm12, %xmm12 vmovss 0x364ca(%rip), %xmm21 # 0x1eec714 vsubss %xmm12, %xmm21, %xmm15 vbroadcastss %xmm12, %xmm12 vmulps %xmm18, %xmm12, %xmm18 vbroadcastss %xmm15, %xmm15 vfmadd213ps %xmm18, %xmm15, %xmm2 # xmm2 = (xmm15 * xmm2) + xmm18 vmulps %xmm10, %xmm12, %xmm10 vfmadd213ps %xmm10, %xmm15, %xmm5 # xmm5 = (xmm15 * xmm5) + xmm10 vsubss %xmm19, %xmm13, %xmm10 vmaxss %xmm16, %xmm10, %xmm10 vsubss %xmm10, %xmm21, %xmm12 vbroadcastss %xmm10, %xmm10 vmulps %xmm20, %xmm10, %xmm13 vbroadcastss %xmm12, %xmm12 vfmadd213ps %xmm13, %xmm12, %xmm4 # xmm4 = (xmm12 * xmm4) + xmm13 vmulps %xmm11, %xmm10, %xmm10 vfmadd213ps %xmm10, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm3) + xmm10 incl %r8d cmpl %r9d, %r8d jge 0x1eb636f vsubss %xmm1, %xmm17, %xmm10 movl %r8d, %edx imulq $0x38, %rdx, %rdx addq %r10, %rdx addq $0x10, %rdx vxorps %xmm11, %xmm11, %xmm11 vcvtsi2ss %r8d, %xmm22, %xmm12 vdivss %xmm0, %xmm12, %xmm12 vsubss %xmm1, %xmm12, %xmm12 vdivss %xmm10, %xmm12, %xmm12 vsubss %xmm12, %xmm21, %xmm13 vbroadcastss %xmm12, %xmm12 vmulps %xmm4, %xmm12, %xmm15 vbroadcastss %xmm13, %xmm13 vfmadd231ps %xmm2, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm2) + xmm15 vmulps %xmm3, %xmm12, %xmm12 movq -0x10(%rdx), %rsi movq (%rdx), %rdi imulq %rcx, %rdi vmulps 0x8(%rsi,%rdi){1to4}, %xmm8, %xmm14 vfmadd231ps 0x4(%rsi,%rdi){1to4}, %xmm7, %xmm14 # xmm14 = (xmm7 * mem) + xmm14 vfmadd231ps (%rsi,%rdi){1to4}, %xmm6, %xmm14 # xmm14 = (xmm6 * mem) + xmm14 vfmadd231ps %xmm13, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm13) + xmm12 vbroadcastss 0xc(%rsi,%rdi), %xmm13 vblendps $0x8, %xmm13, %xmm14, %xmm14 # xmm14 = xmm14[0,1,2],xmm13[3] vmulps %xmm13, %xmm9, %xmm13 vsubps %xmm13, %xmm14, %xmm16 vsubps %xmm15, %xmm16, %xmm15 vaddps %xmm14, %xmm13, %xmm13 vsubps %xmm12, %xmm13, %xmm12 vminps %xmm11, %xmm15, %xmm13 vmaxps %xmm11, %xmm12, %xmm12 vaddps %xmm2, %xmm13, %xmm2 vaddps %xmm4, %xmm13, %xmm4 vaddps %xmm5, %xmm12, %xmm5 vaddps %xmm3, %xmm12, %xmm3 incl %r8d addq $0x38, %rdx cmpl %r8d, %r9d jne 0x1eb62c6 vmovaps %xmm2, (%rax) vmovaps %xmm5, 0x10(%rax) vmovaps %xmm4, 0x20(%rax) vmovaps %xmm3, 0x30(%rax) popq %rbx popq %r14 popq %r15 retq
/embree[P]embree/kernels/common/scene_points.h
embree::avx512::BVHNIntersector1<8, 257, false, embree::avx512::VirtualCurveIntersector1>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2578, %rsp # imm = 0x2578 movq %rdx, 0x20(%rsp) movq %rdi, 0x18(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1eb6ae7 vmovaps 0x10(%rsi), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm2 vxorps %xmm1, %xmm1, %xmm1 vmovss %xmm2, %xmm1, %xmm3 # xmm3 = xmm2[0],xmm1[1,2,3] vrsqrt14ss %xmm3, %xmm1, %xmm3 vmovss 0x35d94(%rip), %xmm4 # 0x1eec718 vmulss %xmm4, %xmm3, %xmm5 vmovss 0x361f0(%rip), %xmm6 # 0x1eecb80 vmulss %xmm6, %xmm2, %xmm2 vmulss %xmm3, %xmm2, %xmm2 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm2, %xmm2 vsubss %xmm2, %xmm5, %xmm2 vbroadcastss %xmm2, %xmm3 vmulps %xmm3, %xmm0, %xmm5 vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0] vmovshdup %xmm5, %xmm8 # xmm8 = xmm5[1,1,3,3] vbroadcastss 0x6a501(%rip), %xmm9 # 0x1f20ec0 vxorps %xmm9, %xmm8, %xmm8 vunpckhps %xmm1, %xmm5, %xmm10 # xmm10 = xmm5[2],xmm1[2],xmm5[3],xmm1[3] vmovss %xmm8, %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[1,2,3] vshufps $0x41, %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[1,0],xmm8[0,1] vxorpd %xmm7, %xmm9, %xmm7 vinsertps $0x2a, %xmm5, %xmm7, %xmm7 # xmm7 = xmm7[0],zero,xmm5[0],zero vdpps $0x7f, %xmm8, %xmm8, %xmm9 vdpps $0x7f, %xmm7, %xmm7, %xmm10 vcmpltps %xmm9, %xmm10, %k0 vpmovm2d %k0, %xmm9 vpbroadcastd %xmm9, %xmm9 vpmovd2m %xmm9, %k1 vpcmpeqd %xmm9, %xmm9, %xmm9 vmovaps %xmm9, %xmm9 {%k1} {z} vblendvps %xmm9, %xmm8, %xmm7, %xmm7 vdpps $0x7f, %xmm7, %xmm7, %xmm8 vmovss %xmm8, %xmm1, %xmm9 # xmm9 = xmm8[0],xmm1[1,2,3] vrsqrt14ss %xmm9, %xmm1, %xmm9 vmulss %xmm6, %xmm8, %xmm8 vmulss %xmm9, %xmm8, %xmm8 vmulss %xmm9, %xmm9, %xmm10 vmulss %xmm10, %xmm8, %xmm8 vmulss %xmm4, %xmm9, %xmm9 vsubss %xmm8, %xmm9, %xmm8 vbroadcastss %xmm8, %xmm8 vmulps %xmm7, %xmm8, %xmm7 vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3] vshufps $0xc9, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,2,0,3] vmulps %xmm7, %xmm9, %xmm9 vfmsub231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) - xmm9 vshufps $0xc9, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,2,0,3] vdpps $0x7f, %xmm8, %xmm8, %xmm9 vmovss %xmm2, 0x30(%rsp) vmovss %xmm9, %xmm1, %xmm2 vrsqrt14ss %xmm2, %xmm1, %xmm2 vmulss %xmm4, %xmm2, %xmm4 vmulss %xmm6, %xmm9, %xmm6 vmulss %xmm2, %xmm6, %xmm6 vmulss %xmm2, %xmm2, %xmm2 vmulss %xmm2, %xmm6, %xmm2 vsubss %xmm2, %xmm4, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm8, %xmm2 vmulps %xmm5, %xmm3, %xmm3 vunpcklps %xmm3, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm3[0],xmm7[1],xmm3[1] vunpckhps %xmm3, %xmm7, %xmm3 # xmm3 = xmm7[2],xmm3[2],xmm7[3],xmm3[3] vunpcklps %xmm1, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] vunpckhps %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] vunpcklps %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] vunpcklps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] vunpckhps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] vmovaps %xmm2, 0x40(%rsp) vmovaps %xmm3, 0x50(%rsp) vmovaps %xmm1, 0x60(%rsp) movq 0x70(%rax), %rax movq %rax, 0x230(%rsp) movl $0x0, 0x238(%rsp) cmpq $0x8, %rax jne 0x1eb6afc addq $0x2578, %rsp # imm = 0x2578 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vxorps %xmm3, %xmm3, %xmm3 vmaxss 0xc(%rsi), %xmm3, %xmm2 vmaxss 0x20(%rsi), %xmm3, %xmm1 leaq 0x240(%rsp), %r8 vandps 0x6a3a8(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x3a4c3(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vblendmps %xmm5, %xmm0, %xmm4 {%k1} vrcp14ps %xmm4, %xmm5 vfnmadd213ps 0x35bd2(%rip){1to4}, %xmm5, %xmm4 # xmm4 = -(xmm5 * xmm4) + mem vbroadcastss (%rsi), %ymm6 vmovups %ymm6, 0x110(%rsp) vfmadd132ps %xmm5, %xmm5, %xmm4 # xmm4 = (xmm4 * xmm5) + xmm5 vbroadcastss 0x4(%rsi), %ymm5 vmovups %ymm5, 0xf0(%rsp) vbroadcastss 0x8(%rsi), %ymm5 vmovups %ymm5, 0xd0(%rsp) vbroadcastss %xmm0, %ymm5 vmovups %ymm5, 0xb0(%rsp) vbroadcastss 0x5bb7a(%rip), %ymm8 # 0x1f12704 vpermps %ymm0, %ymm8, %ymm5 vmovups %ymm5, 0x90(%rsp) vbroadcastss 0x6a33b(%rip), %ymm5 # 0x1f20edc vpermps %ymm0, %ymm5, %ymm0 vmovups %ymm0, 0x70(%rsp) xorl %edi, %edi vucomiss %xmm3, %xmm4 setb %dil vbroadcastss %xmm4, %ymm17 vmovshdup %xmm4, %xmm0 # xmm0 = xmm4[1,1,3,3] vbroadcastsd %xmm0, %ymm18 vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0] vpermps %ymm4, %ymm5, %ymm19 vmulps (%rsi), %xmm4, %xmm4 vbroadcastss %xmm4, %ymm7 vpermps %ymm4, %ymm8, %ymm8 vpermps %ymm4, %ymm5, %ymm4 shll $0x5, %edi xorl %ebp, %ebp vucomiss %xmm3, %xmm0 setb %bpl shll $0x5, %ebp orq $0x40, %rbp xorl %r10d, %r10d vucomiss %xmm3, %xmm6 setb %r10b shll $0x5, %r10d orq $0x80, %r10 movq %rdi, %r11 xorq $0x20, %r11 movq %rbp, %rbx xorq $0x20, %rbx movq %r10, %r14 xorq $0x20, %r14 vbroadcastss %xmm2, %ymm20 vbroadcastss 0x6a28e(%rip), %ymm2 # 0x1f20ec0 vbroadcastss %xmm1, %ymm0 vxorps %ymm2, %ymm7, %ymm21 vxorps %ymm2, %ymm8, %ymm22 vxorps %ymm2, %ymm4, %ymm23 vpmovsxbd 0xa6e1d(%rip), %ymm24 # 0x1f5da70 vpbroadcastd 0xaa95f(%rip), %ymm25 # 0x1f615bc leaq 0x230(%rsp), %r13 vmovups %ymm17, 0x1f0(%rsp) vmovups %ymm18, 0x1d0(%rsp) vmovups %ymm19, 0x1b0(%rsp) movq %rdi, 0x10(%rsp) movq %r14, 0x8(%rsp) vmovups %ymm20, 0x190(%rsp) vmovups %ymm21, 0x170(%rsp) vmovups %ymm22, 0x150(%rsp) vmovups %ymm23, 0x130(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r13, %r8 je 0x1eb6ae7 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1eb6cc1 movq (%r8), %rcx movq %rcx, %rax andq $0xf, %rax jne 0x1eb6dc9 vmovaps 0x40(%rcx,%rdi), %ymm1 vfmadd132ps %ymm17, %ymm21, %ymm1 # ymm1 = (ymm1 * ymm17) + ymm21 vmovaps 0x40(%rcx,%rbp), %ymm2 vfmadd132ps %ymm18, %ymm22, %ymm2 # ymm2 = (ymm2 * ymm18) + ymm22 vpmaxsd %ymm2, %ymm1, %ymm1 vmovaps 0x40(%rcx,%r10), %ymm2 vfmadd132ps %ymm19, %ymm23, %ymm2 # ymm2 = (ymm2 * ymm19) + ymm23 vmovaps 0x40(%rcx,%r11), %ymm3 vfmadd132ps %ymm17, %ymm21, %ymm3 # ymm3 = (ymm3 * ymm17) + ymm21 vmovaps 0x40(%rcx,%rbx), %ymm4 vfmadd132ps %ymm18, %ymm22, %ymm4 # ymm4 = (ymm4 * ymm18) + ymm22 vpminsd %ymm4, %ymm3, %ymm3 vmovaps 0x40(%rcx,%r14), %ymm4 vfmadd132ps %ymm19, %ymm23, %ymm4 # ymm4 = (ymm4 * ymm19) + ymm23 vpmaxsd %ymm20, %ymm2, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm8 vpminsd %ymm0, %ymm4, %ymm1 vpminsd %ymm1, %ymm3, %ymm1 vpcmpled %ymm1, %ymm8, %k0 kmovb %k0, %r15d movb $0x1, %al testb %al, %al je 0x1eb6dd2 testq %r15, %r15 je 0x1eb6dd9 andq $-0x10, %rcx vmovdqu (%rcx), %ymm1 vmovdqu 0x20(%rcx), %ymm2 vmovdqa64 %ymm24, %ymm3 vpternlogd $0xf8, %ymm25, %ymm8, %ymm3 kmovd %r15d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) prefetcht0 0xc0(%rcx) xorl %eax, %eax blsrq %r15, %rdx jne 0x1eb6de0 testl %eax, %eax je 0x1eb6cdd jmp 0x1eb7276 cmpl $0x2, %eax je 0x1eb6e48 xorl %eax, %eax jmp 0x1eb6d61 movl $0x6, %eax jmp 0x1eb6dbc movl $0x4, %eax jmp 0x1eb6dbc vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) prefetcht0 0xc0(%rcx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rdx, %rcx jne 0x1eb7019 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rcx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm8, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 jmp 0x1eb6dbc movq %rcx, %rax andq $-0x10, %rax vmovaps 0x100(%rax), %ymm6 vmovups 0x70(%rsp), %ymm3 vmulps %ymm6, %ymm3, %ymm1 vmovaps 0x120(%rax), %ymm5 vmulps %ymm5, %ymm3, %ymm2 vmovaps 0x140(%rax), %ymm4 vmulps %ymm4, %ymm3, %ymm3 vmovaps 0x40(%rax), %ymm8 vmovaps 0x60(%rax), %ymm9 vmovaps 0x80(%rax), %ymm7 vmovaps 0xa0(%rax), %ymm10 vmovups 0x90(%rsp), %ymm13 vfmadd231ps %ymm10, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm10) + ymm1 vmovaps 0xc0(%rax), %ymm11 vfmadd231ps %ymm11, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm11) + ymm2 vmovaps 0xe0(%rax), %ymm12 vfmadd231ps %ymm12, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm12) + ymm3 vmovups 0xb0(%rsp), %ymm13 vfmadd231ps %ymm8, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm8) + ymm1 vfmadd231ps %ymm9, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm9) + ymm2 vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3 vbroadcastss 0x69fe8(%rip), %ymm14 # 0x1f20ec4 vandps %ymm1, %ymm14, %ymm13 vbroadcastss 0x3a0ff(%rip), %ymm15 # 0x1ef0fe8 vcmpltps %ymm15, %ymm13, %k1 vmovaps %ymm15, %ymm1 {%k1} vandps %ymm2, %ymm14, %ymm13 vcmpltps %ymm15, %ymm13, %k1 vmovaps %ymm15, %ymm2 {%k1} vandps %ymm3, %ymm14, %ymm13 vcmpltps %ymm15, %ymm13, %k1 vmovaps %ymm15, %ymm3 {%k1} vrcp14ps %ymm1, %ymm13 vbroadcastss 0x357ed(%rip), %ymm14 # 0x1eec714 vfnmadd213ps %ymm14, %ymm13, %ymm1 # ymm1 = -(ymm13 * ymm1) + ymm14 vfmadd132ps %ymm13, %ymm13, %ymm1 # ymm1 = (ymm1 * ymm13) + ymm13 vrcp14ps %ymm2, %ymm13 vfnmadd213ps %ymm14, %ymm13, %ymm2 # ymm2 = -(ymm13 * ymm2) + ymm14 vfmadd132ps %ymm13, %ymm13, %ymm2 # ymm2 = (ymm2 * ymm13) + ymm13 vrcp14ps %ymm3, %ymm13 vfnmadd213ps %ymm14, %ymm13, %ymm3 # ymm3 = -(ymm13 * ymm3) + ymm14 vfmadd132ps %ymm13, %ymm13, %ymm3 # ymm3 = (ymm3 * ymm13) + ymm13 vbroadcastss 0x69f66(%rip), %ymm15 # 0x1f20ec0 vxorps %ymm1, %ymm15, %ymm13 vxorps %ymm2, %ymm15, %ymm14 vmovups 0xd0(%rsp), %ymm16 vfmadd213ps 0x160(%rax), %ymm16, %ymm6 # ymm6 = (ymm16 * ymm6) + mem vfmadd213ps 0x180(%rax), %ymm16, %ymm5 # ymm5 = (ymm16 * ymm5) + mem vxorps %ymm3, %ymm15, %ymm15 vfmadd213ps 0x1a0(%rax), %ymm16, %ymm4 # ymm4 = (ymm16 * ymm4) + mem vmovups 0xf0(%rsp), %ymm16 vfmadd231ps %ymm10, %ymm16, %ymm6 # ymm6 = (ymm16 * ymm10) + ymm6 vfmadd231ps %ymm11, %ymm16, %ymm5 # ymm5 = (ymm16 * ymm11) + ymm5 vfmadd231ps %ymm12, %ymm16, %ymm4 # ymm4 = (ymm16 * ymm12) + ymm4 vmovups 0x110(%rsp), %ymm10 vfmadd231ps %ymm8, %ymm10, %ymm6 # ymm6 = (ymm10 * ymm8) + ymm6 vmulps %ymm6, %ymm13, %ymm6 vfmadd231ps %ymm9, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm9) + ymm5 vmulps %ymm5, %ymm14, %ymm5 vfmadd231ps %ymm7, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm7) + ymm4 vmulps %ymm4, %ymm15, %ymm4 vaddps %ymm6, %ymm1, %ymm1 vaddps %ymm5, %ymm2, %ymm2 vaddps %ymm4, %ymm3, %ymm3 vpminsd %ymm1, %ymm6, %ymm7 vpminsd %ymm2, %ymm5, %ymm8 vpminsd %ymm3, %ymm4, %ymm9 vmaxps %ymm9, %ymm8, %ymm8 vpmaxsd %ymm1, %ymm6, %ymm1 vpmaxsd %ymm2, %ymm5, %ymm2 vpmaxsd %ymm3, %ymm4, %ymm3 vminps %ymm3, %ymm2, %ymm2 vmaxps %ymm7, %ymm20, %ymm3 vmaxps %ymm8, %ymm3, %ymm8 vminps %ymm1, %ymm0, %ymm1 vminps %ymm2, %ymm1, %ymm1 vcmpleps %ymm1, %ymm8, %k0 jmp 0x1eb6d5b vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1eb70a2 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rcx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm8, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm8, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1eb6dbc vmovdqa %ymm8, %ymm10 movq %r14, %r12 movq %r11, %r14 movq %r10, %r11 movq %rdi, %r9 movq %rsi, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1eb7178 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rcx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa %ymm10, %ymm8 vpermd %ymm10, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm10, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm10, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rsi movq %r9, %rdi movq %r11, %r10 movq %r14, %r11 movq %r12, %r14 jmp 0x1eb6dbc valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x69d38(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0xaa48f(%rip), %ymm9 # 0x1f61620 vpermt2d %ymm8, %ymm9, %ymm3 vpmovsxbd 0xaa488(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0xaa47b(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x5b539(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1eb71be popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm10, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1eb7228 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rcx movq %rdi, %rsi movq %r9, %rdi movq %r11, %r10 movq %r14, %r11 movq %r12, %r14 vmovdqa %ymm10, %ymm8 jmp 0x1eb6dbc cmpl $0x6, %eax jne 0x1eb6cbc andq $-0x10, %rcx movzbl (%rcx), %eax movq 0x18(%rsp), %rdx movq %r8, 0x28(%rsp) movq 0x8(%rdx), %r8 shll $0x6, %eax leaq 0x30(%rsp), %rdi movq %rsi, %r14 movq 0x20(%rsp), %rdx movq %r10, %r12 movq %rbx, %r13 movq %r11, %rbx vmovdqu %ymm8, 0x210(%rsp) vzeroupper callq *(%r8,%rax) vmovdqu 0x210(%rsp), %ymm8 vpbroadcastd 0xaa2ec(%rip), %ymm25 # 0x1f615bc vpmovsxbd 0xa6796(%rip), %ymm24 # 0x1f5da70 vmovups 0x130(%rsp), %ymm23 vmovups 0x150(%rsp), %ymm22 vmovups 0x170(%rsp), %ymm21 vmovups 0x190(%rsp), %ymm20 movq %rbx, %r11 movq %r13, %rbx movq %r12, %r10 leaq 0x230(%rsp), %r13 vmovups 0x1b0(%rsp), %ymm19 vmovups 0x1d0(%rsp), %ymm18 vmovups 0x1f0(%rsp), %ymm17 movq 0x10(%rsp), %rdi movq 0x28(%rsp), %r8 movq %r14, %rsi movq 0x8(%rsp), %r14 vbroadcastss 0x20(%rsi), %ymm0 jmp 0x1eb6cbc nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16781328, false, embree::avx512::VirtualCurveIntersector1>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2578, %rsp # imm = 0x2578 movq %rdx, 0x20(%rsp) movq %rdi, 0x18(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1eb7507 vmovaps 0x10(%rsi), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm2 vxorps %xmm1, %xmm1, %xmm1 vmovss %xmm2, %xmm1, %xmm3 # xmm3 = xmm2[0],xmm1[1,2,3] vrsqrt14ss %xmm3, %xmm1, %xmm3 vmovss 0x35374(%rip), %xmm4 # 0x1eec718 vmulss %xmm4, %xmm3, %xmm5 vmovss 0x357d0(%rip), %xmm6 # 0x1eecb80 vmulss %xmm6, %xmm2, %xmm2 vmulss %xmm3, %xmm2, %xmm2 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm2, %xmm2 vsubss %xmm2, %xmm5, %xmm2 vbroadcastss %xmm2, %xmm3 vmulps %xmm3, %xmm0, %xmm5 vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0] vmovshdup %xmm5, %xmm8 # xmm8 = xmm5[1,1,3,3] vbroadcastss 0x69ae1(%rip), %xmm9 # 0x1f20ec0 vxorps %xmm9, %xmm8, %xmm8 vunpckhps %xmm1, %xmm5, %xmm10 # xmm10 = xmm5[2],xmm1[2],xmm5[3],xmm1[3] vmovss %xmm8, %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[1,2,3] vshufps $0x41, %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[1,0],xmm8[0,1] vxorpd %xmm7, %xmm9, %xmm7 vinsertps $0x2a, %xmm5, %xmm7, %xmm7 # xmm7 = xmm7[0],zero,xmm5[0],zero vdpps $0x7f, %xmm8, %xmm8, %xmm9 vdpps $0x7f, %xmm7, %xmm7, %xmm10 vcmpltps %xmm9, %xmm10, %k0 vpmovm2d %k0, %xmm9 vpbroadcastd %xmm9, %xmm9 vpmovd2m %xmm9, %k1 vpcmpeqd %xmm9, %xmm9, %xmm9 vmovaps %xmm9, %xmm9 {%k1} {z} vblendvps %xmm9, %xmm8, %xmm7, %xmm7 vdpps $0x7f, %xmm7, %xmm7, %xmm8 vmovss %xmm8, %xmm1, %xmm9 # xmm9 = xmm8[0],xmm1[1,2,3] vrsqrt14ss %xmm9, %xmm1, %xmm9 vmulss %xmm6, %xmm8, %xmm8 vmulss %xmm9, %xmm8, %xmm8 vmulss %xmm9, %xmm9, %xmm10 vmulss %xmm10, %xmm8, %xmm8 vmulss %xmm4, %xmm9, %xmm9 vsubss %xmm8, %xmm9, %xmm8 vbroadcastss %xmm8, %xmm8 vmulps %xmm7, %xmm8, %xmm7 vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3] vshufps $0xc9, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,2,0,3] vmulps %xmm7, %xmm9, %xmm9 vfmsub231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) - xmm9 vshufps $0xc9, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,2,0,3] vdpps $0x7f, %xmm8, %xmm8, %xmm9 vmovss %xmm2, 0x30(%rsp) vmovss %xmm9, %xmm1, %xmm2 vrsqrt14ss %xmm2, %xmm1, %xmm2 vmulss %xmm4, %xmm2, %xmm4 vmulss %xmm6, %xmm9, %xmm6 vmulss %xmm2, %xmm6, %xmm6 vmulss %xmm2, %xmm2, %xmm2 vmulss %xmm2, %xmm6, %xmm2 vsubss %xmm2, %xmm4, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm8, %xmm2 vmulps %xmm5, %xmm3, %xmm3 vunpcklps %xmm3, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm3[0],xmm7[1],xmm3[1] vunpckhps %xmm3, %xmm7, %xmm3 # xmm3 = xmm7[2],xmm3[2],xmm7[3],xmm3[3] vunpcklps %xmm1, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] vunpckhps %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] vunpcklps %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] vunpcklps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] vunpckhps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] vmovaps %xmm2, 0x40(%rsp) vmovaps %xmm3, 0x50(%rsp) vmovaps %xmm1, 0x60(%rsp) movq 0x70(%rax), %rax movq %rax, 0x230(%rsp) movl $0x0, 0x238(%rsp) cmpq $0x8, %rax jne 0x1eb751c addq $0x2578, %rsp # imm = 0x2578 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vxorps %xmm3, %xmm3, %xmm3 vmaxss 0xc(%rsi), %xmm3, %xmm1 vmaxss 0x20(%rsi), %xmm3, %xmm2 vandps 0x69990(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x39aab(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vblendmps %xmm5, %xmm0, %xmm4 {%k1} vrcp14ps %xmm4, %xmm5 vfnmadd213ps 0x351ba(%rip){1to4}, %xmm5, %xmm4 # xmm4 = -(xmm5 * xmm4) + mem leaq 0x240(%rsp), %r8 vfmadd132ps %xmm5, %xmm5, %xmm4 # xmm4 = (xmm4 * xmm5) + xmm5 vbroadcastss (%rsi), %ymm5 vmovups %ymm5, 0x110(%rsp) vbroadcastss 0x4(%rsi), %ymm5 vmovups %ymm5, 0xf0(%rsp) vbroadcastss 0x8(%rsi), %ymm5 vmovups %ymm5, 0xd0(%rsp) vbroadcastss %xmm0, %ymm5 vmovups %ymm5, 0xb0(%rsp) vbroadcastss 0x5b15a(%rip), %ymm8 # 0x1f12704 vpermps %ymm0, %ymm8, %ymm5 vmovups %ymm5, 0x90(%rsp) vbroadcastss 0x6991b(%rip), %ymm5 # 0x1f20edc vpermps %ymm0, %ymm5, %ymm0 vmovups %ymm0, 0x70(%rsp) xorl %edi, %edi vucomiss %xmm3, %xmm4 setb %dil vbroadcastss %xmm4, %ymm20 vmovshdup %xmm4, %xmm0 # xmm0 = xmm4[1,1,3,3] vbroadcastsd %xmm0, %ymm21 vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0] vmulps (%rsi), %xmm4, %xmm7 vpermps %ymm4, %ymm5, %ymm22 vbroadcastss %xmm7, %ymm4 vpermps %ymm7, %ymm8, %ymm8 vpermps %ymm7, %ymm5, %ymm5 shll $0x5, %edi xorl %r13d, %r13d vucomiss %xmm3, %xmm0 setb %r13b shll $0x5, %r13d orq $0x40, %r13 xorl %r14d, %r14d vucomiss %xmm3, %xmm6 setb %r14b shll $0x5, %r14d orq $0x80, %r14 movq %rdi, %rbp xorq $0x20, %rbp movq %r13, %r12 xorq $0x20, %r12 movq %r14, %rbx xorq $0x20, %rbx vbroadcastss %xmm1, %ymm23 vbroadcastss %xmm2, %ymm0 vbroadcastss 0x69867(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm4, %ymm24 vxorps %ymm1, %ymm8, %ymm25 vxorps %ymm1, %ymm5, %ymm26 leaq 0x230(%rsp), %r10 vpmovsxbd 0xa63f3(%rip), %ymm29 # 0x1f5da70 vpbroadcastd 0xa9f35(%rip), %ymm30 # 0x1f615bc movq %rsi, 0x8(%rsp) movq %rdi, 0x10(%rsp) vmovups %ymm20, 0x1f0(%rsp) vmovups %ymm21, 0x1d0(%rsp) vmovups %ymm22, 0x1b0(%rsp) vmovups %ymm23, 0x190(%rsp) vmovups %ymm24, 0x170(%rsp) vmovups %ymm25, 0x150(%rsp) vmovups %ymm26, 0x130(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r10, %r8 je 0x1eb7507 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1eb76e3 movq (%r8), %rcx testb $0x8, %cl jne 0x1eb77dd vmovss 0x1c(%rsi), %xmm8 movl %ecx, %edx andl $0x7, %edx movq %rcx, %rax andq $-0x10, %rax cmpq $0x3, %rdx je 0x1eb7855 vmovaps 0x100(%rax,%rdi), %ymm2 vbroadcastss %xmm8, %ymm1 vfmadd213ps 0x40(%rax,%rdi), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vfmadd213ps %ymm24, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm24 vmovaps 0x100(%rax,%r13), %ymm3 vfmadd213ps 0x40(%rax,%r13), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vmovaps 0x100(%rax,%r14), %ymm4 vfmadd213ps 0x40(%rax,%r14), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm25, %ymm21, %ymm3 # ymm3 = (ymm21 * ymm3) + ymm25 vfmadd213ps %ymm26, %ymm22, %ymm4 # ymm4 = (ymm22 * ymm4) + ymm26 vmovaps 0x100(%rax,%rbp), %ymm5 vfmadd213ps 0x40(%rax,%rbp), %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + mem vmaxps %ymm4, %ymm3, %ymm3 vfmadd213ps %ymm24, %ymm20, %ymm5 # ymm5 = (ymm20 * ymm5) + ymm24 vmovaps 0x100(%rax,%r12), %ymm4 vfmadd213ps 0x40(%rax,%r12), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm25, %ymm21, %ymm4 # ymm4 = (ymm21 * ymm4) + ymm25 vmovaps 0x100(%rax,%rbx), %ymm6 vfmadd213ps 0x40(%rax,%rbx), %ymm1, %ymm6 # ymm6 = (ymm1 * ymm6) + mem vfmadd213ps %ymm26, %ymm22, %ymm6 # ymm6 = (ymm22 * ymm6) + ymm26 vminps %ymm6, %ymm4, %ymm4 vmaxps %ymm2, %ymm23, %ymm2 vmaxps %ymm3, %ymm2, %ymm8 vminps %ymm5, %ymm0, %ymm2 vminps %ymm4, %ymm2, %ymm2 cmpl $0x6, %edx je 0x1eb7ac6 vcmpleps %ymm2, %ymm8, %k0 kmovb %k0, %r15d testb $0x8, %cl jne 0x1eb784e testq %r15, %r15 je 0x1eb7abc andq $-0x10, %rcx vmovdqu (%rcx), %ymm1 vmovdqu 0x20(%rcx), %ymm2 vmovdqa64 %ymm29, %ymm3 vpternlogd $0xf8, %ymm30, %ymm8, %ymm3 kmovd %r15d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) prefetcht0 0xc0(%rcx) xorl %eax, %eax blsrq %r15, %rdx jne 0x1eb7ae2 testl %eax, %eax je 0x1eb76ff jmp 0x1eb7d88 movl $0x6, %eax jmp 0x1eb7841 vmovaps 0x40(%rax), %ymm6 vmovaps 0x60(%rax), %ymm5 vmovaps 0x80(%rax), %ymm4 vmovaps 0xa0(%rax), %ymm12 vmovaps 0xc0(%rax), %ymm11 vmovaps 0xe0(%rax), %ymm7 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vbroadcastss %xmm8, %ymm13 vmovss 0x34e70(%rip), %xmm9 # 0x1eec714 vsubss %xmm8, %xmm9, %xmm14 vmulps 0x1c0(%rax), %ymm13, %ymm8 vmulps 0x1e0(%rax), %ymm13, %ymm9 vmulps 0x200(%rax), %ymm13, %ymm10 vbroadcastss %xmm14, %ymm15 vxorps %xmm14, %xmm14, %xmm14 vfmadd231ps %ymm14, %ymm15, %ymm8 # ymm8 = (ymm15 * ymm14) + ymm8 vfmadd231ps %ymm14, %ymm15, %ymm9 # ymm9 = (ymm15 * ymm14) + ymm9 vfmadd231ps %ymm14, %ymm15, %ymm10 # ymm10 = (ymm15 * ymm14) + ymm10 vmulps 0x220(%rax), %ymm13, %ymm14 vmulps 0x240(%rax), %ymm13, %ymm16 vmulps 0x260(%rax), %ymm13, %ymm17 vaddps %ymm14, %ymm15, %ymm13 vaddps %ymm16, %ymm15, %ymm14 vaddps %ymm17, %ymm15, %ymm15 vmovups 0x70(%rsp), %ymm18 vmulps %ymm1, %ymm18, %ymm16 vmulps %ymm2, %ymm18, %ymm17 vmulps %ymm3, %ymm18, %ymm18 vmovups 0x90(%rsp), %ymm19 vfmadd231ps %ymm12, %ymm19, %ymm16 # ymm16 = (ymm19 * ymm12) + ymm16 vfmadd231ps %ymm11, %ymm19, %ymm17 # ymm17 = (ymm19 * ymm11) + ymm17 vfmadd231ps %ymm7, %ymm19, %ymm18 # ymm18 = (ymm19 * ymm7) + ymm18 vmovups 0xb0(%rsp), %ymm19 vfmadd231ps %ymm6, %ymm19, %ymm16 # ymm16 = (ymm19 * ymm6) + ymm16 vfmadd231ps %ymm5, %ymm19, %ymm17 # ymm17 = (ymm19 * ymm5) + ymm17 vfmadd231ps %ymm4, %ymm19, %ymm18 # ymm18 = (ymm19 * ymm4) + ymm18 vbroadcastss 0x69562(%rip), %ymm27 # 0x1f20ec4 vandps %ymm27, %ymm16, %ymm19 vbroadcastss 0x39676(%rip), %ymm28 # 0x1ef0fe8 vcmpltps %ymm28, %ymm19, %k1 vmovaps %ymm28, %ymm16 {%k1} vandps %ymm27, %ymm17, %ymm19 vcmpltps %ymm28, %ymm19, %k1 vmovaps %ymm28, %ymm17 {%k1} vandps %ymm27, %ymm18, %ymm19 vcmpltps %ymm28, %ymm19, %k1 vmovaps %ymm28, %ymm18 {%k1} vrcp14ps %ymm16, %ymm19 vbroadcastss 0x34d5f(%rip), %ymm27 # 0x1eec714 vfnmadd213ps %ymm27, %ymm19, %ymm16 # ymm16 = -(ymm19 * ymm16) + ymm27 vfmadd132ps %ymm19, %ymm19, %ymm16 # ymm16 = (ymm16 * ymm19) + ymm19 vrcp14ps %ymm17, %ymm19 vfnmadd213ps %ymm27, %ymm19, %ymm17 # ymm17 = -(ymm19 * ymm17) + ymm27 vfmadd132ps %ymm19, %ymm19, %ymm17 # ymm17 = (ymm17 * ymm19) + ymm19 vrcp14ps %ymm18, %ymm19 vfnmadd213ps %ymm27, %ymm19, %ymm18 # ymm18 = -(ymm19 * ymm18) + ymm27 vfmadd132ps %ymm19, %ymm19, %ymm18 # ymm18 = (ymm18 * ymm19) + ymm19 vmovups 0xd0(%rsp), %ymm19 vfmadd213ps 0x160(%rax), %ymm19, %ymm1 # ymm1 = (ymm19 * ymm1) + mem vfmadd213ps 0x180(%rax), %ymm19, %ymm2 # ymm2 = (ymm19 * ymm2) + mem vfmadd213ps 0x1a0(%rax), %ymm19, %ymm3 # ymm3 = (ymm19 * ymm3) + mem vmovups 0xf0(%rsp), %ymm19 vfmadd231ps %ymm12, %ymm19, %ymm1 # ymm1 = (ymm19 * ymm12) + ymm1 vfmadd231ps %ymm11, %ymm19, %ymm2 # ymm2 = (ymm19 * ymm11) + ymm2 vfmadd231ps %ymm7, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm7) + ymm3 vmovups 0x110(%rsp), %ymm7 vfmadd231ps %ymm6, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm6) + ymm1 vfmadd231ps %ymm5, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm5) + ymm2 vfmadd231ps %ymm4, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm4) + ymm3 vsubps %ymm1, %ymm8, %ymm4 vsubps %ymm2, %ymm9, %ymm5 vsubps %ymm3, %ymm10, %ymm6 vmulps %ymm16, %ymm4, %ymm4 vmulps %ymm17, %ymm5, %ymm5 vmulps %ymm18, %ymm6, %ymm6 vsubps %ymm1, %ymm13, %ymm1 vsubps %ymm2, %ymm14, %ymm2 vsubps %ymm3, %ymm15, %ymm3 vmulps %ymm16, %ymm1, %ymm1 vmulps %ymm17, %ymm2, %ymm2 vmulps %ymm18, %ymm3, %ymm3 vpminsd %ymm1, %ymm4, %ymm7 vpminsd %ymm2, %ymm5, %ymm8 vpminsd %ymm3, %ymm6, %ymm9 vmaxps %ymm9, %ymm8, %ymm8 vpmaxsd %ymm1, %ymm4, %ymm1 vpmaxsd %ymm2, %ymm5, %ymm2 vpmaxsd %ymm3, %ymm6, %ymm3 vminps %ymm3, %ymm2, %ymm2 vmaxps %ymm7, %ymm23, %ymm3 vmaxps %ymm8, %ymm3, %ymm8 vminps %ymm1, %ymm0, %ymm1 vminps %ymm2, %ymm1, %ymm1 vcmpleps %ymm1, %ymm8, %k0 jmp 0x1eb77d9 movl $0x4, %eax jmp 0x1eb7841 vcmpleps %ymm2, %ymm8, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1eb77d9 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) prefetcht0 0xc0(%rcx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rdx, %rcx jne 0x1eb7b46 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rcx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm8, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 jmp 0x1eb7841 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1eb7bcf vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rcx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm8, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm8, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1eb7841 vmovdqa %ymm8, %ymm10 movq %rdi, %r9 movq %rsi, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1eb7c93 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rcx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa %ymm10, %ymm8 vpermd %ymm10, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm10, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm10, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rsi movq %r9, %rdi jmp 0x1eb7841 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x6921d(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0xa9974(%rip), %ymm9 # 0x1f61620 vpermt2d %ymm8, %ymm9, %ymm3 vpmovsxbd 0xa996d(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0xa9960(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x5aa1e(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1eb7cd9 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm10, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1eb7d43 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rcx movq %rdi, %rsi movq %r9, %rdi vmovdqa %ymm10, %ymm8 jmp 0x1eb7841 cmpl $0x6, %eax jne 0x1eb76de andq $-0x10, %rcx movzbl (%rcx), %eax movq 0x18(%rsp), %rdx movq %r8, 0x28(%rsp) movq 0x8(%rdx), %r8 shll $0x6, %eax leaq 0x30(%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x20(%rsp), %rdx vmovdqu %ymm8, 0x210(%rsp) vzeroupper callq *(%r8,%rax) vmovdqu 0x210(%rsp), %ymm8 vpbroadcastd 0xa97e1(%rip), %ymm30 # 0x1f615bc vpmovsxbd 0xa5c8b(%rip), %ymm29 # 0x1f5da70 leaq 0x230(%rsp), %r10 vmovups 0x130(%rsp), %ymm26 vmovups 0x150(%rsp), %ymm25 vmovups 0x170(%rsp), %ymm24 vmovups 0x190(%rsp), %ymm23 vmovups 0x1b0(%rsp), %ymm22 vmovups 0x1d0(%rsp), %ymm21 vmovups 0x1f0(%rsp), %ymm20 movq 0x10(%rsp), %rdi movq 0x28(%rsp), %r8 movq 0x8(%rsp), %rsi vbroadcastss 0x20(%rsi), %ymm0 jmp 0x1eb76de
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 257, true, embree::avx512::VirtualCurveIntersector1>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2578, %rsp # imm = 0x2578 movq %rdx, 0x20(%rsp) movq %rdi, 0x18(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1eb8005 vmovaps 0x10(%rsi), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm2 vxorps %xmm1, %xmm1, %xmm1 vmovss %xmm2, %xmm1, %xmm3 # xmm3 = xmm2[0],xmm1[1,2,3] vrsqrt14ss %xmm3, %xmm1, %xmm3 vmovss 0x34876(%rip), %xmm4 # 0x1eec718 vmulss %xmm4, %xmm3, %xmm5 vmovss 0x34cd2(%rip), %xmm6 # 0x1eecb80 vmulss %xmm6, %xmm2, %xmm2 vmulss %xmm3, %xmm2, %xmm2 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm2, %xmm2 vsubss %xmm2, %xmm5, %xmm2 vbroadcastss %xmm2, %xmm3 vmulps %xmm3, %xmm0, %xmm5 vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0] vmovshdup %xmm5, %xmm8 # xmm8 = xmm5[1,1,3,3] vbroadcastss 0x68fe3(%rip), %xmm9 # 0x1f20ec0 vxorps %xmm9, %xmm8, %xmm8 vunpckhps %xmm1, %xmm5, %xmm10 # xmm10 = xmm5[2],xmm1[2],xmm5[3],xmm1[3] vmovss %xmm8, %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[1,2,3] vshufps $0x41, %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[1,0],xmm8[0,1] vxorpd %xmm7, %xmm9, %xmm7 vinsertps $0x2a, %xmm5, %xmm7, %xmm7 # xmm7 = xmm7[0],zero,xmm5[0],zero vdpps $0x7f, %xmm8, %xmm8, %xmm9 vdpps $0x7f, %xmm7, %xmm7, %xmm10 vcmpltps %xmm9, %xmm10, %k0 vpmovm2d %k0, %xmm9 vpbroadcastd %xmm9, %xmm9 vpmovd2m %xmm9, %k1 vpcmpeqd %xmm9, %xmm9, %xmm9 vmovaps %xmm9, %xmm9 {%k1} {z} vblendvps %xmm9, %xmm8, %xmm7, %xmm7 vdpps $0x7f, %xmm7, %xmm7, %xmm8 vmovss %xmm8, %xmm1, %xmm9 # xmm9 = xmm8[0],xmm1[1,2,3] vrsqrt14ss %xmm9, %xmm1, %xmm9 vmulss %xmm6, %xmm8, %xmm8 vmulss %xmm9, %xmm8, %xmm8 vmulss %xmm9, %xmm9, %xmm10 vmulss %xmm10, %xmm8, %xmm8 vmulss %xmm4, %xmm9, %xmm9 vsubss %xmm8, %xmm9, %xmm8 vbroadcastss %xmm8, %xmm8 vmulps %xmm7, %xmm8, %xmm7 vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3] vshufps $0xc9, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,2,0,3] vmulps %xmm7, %xmm9, %xmm9 vfmsub231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) - xmm9 vshufps $0xc9, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,2,0,3] vdpps $0x7f, %xmm8, %xmm8, %xmm9 vmovss %xmm2, 0x30(%rsp) vmovss %xmm9, %xmm1, %xmm2 vrsqrt14ss %xmm2, %xmm1, %xmm2 vmulss %xmm4, %xmm2, %xmm4 vmulss %xmm6, %xmm9, %xmm6 vmulss %xmm2, %xmm6, %xmm6 vmulss %xmm2, %xmm2, %xmm2 vmulss %xmm2, %xmm6, %xmm2 vsubss %xmm2, %xmm4, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm8, %xmm2 vmulps %xmm5, %xmm3, %xmm3 vunpcklps %xmm3, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm3[0],xmm7[1],xmm3[1] vunpckhps %xmm3, %xmm7, %xmm3 # xmm3 = xmm7[2],xmm3[2],xmm7[3],xmm3[3] vunpcklps %xmm1, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] vunpckhps %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] vunpcklps %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] vunpcklps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] vunpckhps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] vmovaps %xmm2, 0x40(%rsp) vmovaps %xmm3, 0x50(%rsp) vmovaps %xmm1, 0x60(%rsp) movq 0x70(%rax), %rax movq %rax, 0x230(%rsp) movl $0x0, 0x238(%rsp) cmpq $0x8, %rax jne 0x1eb801a addq $0x2578, %rsp # imm = 0x2578 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x68e92(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 leaq 0x240(%rsp), %r8 vcmpltps 0x38fa3(%rip){1to4}, %xmm4, %k1 # 0x1ef0fe8 vbroadcastss 0x346c6(%rip), %xmm4 # 0x1eec714 vdivps %xmm0, %xmm4, %xmm4 vbroadcastss 0x68f04(%rip), %xmm4 {%k1} # 0x1f20f60 vmulps 0x67eaa(%rip){1to4}, %xmm4, %xmm5 # 0x1f1ff10 vmulps 0x67ea4(%rip){1to4}, %xmm4, %xmm4 # 0x1f1ff14 vbroadcastss (%rsi), %ymm16 vbroadcastss 0x4(%rsi), %ymm17 vbroadcastss 0x8(%rsi), %ymm18 vbroadcastss %xmm0, %ymm6 vmovups %ymm6, 0xb0(%rsp) vbroadcastss 0x5a669(%rip), %ymm8 # 0x1f12704 vpermps %ymm0, %ymm8, %ymm6 vmovups %ymm6, 0x90(%rsp) vbroadcastss 0x68e2a(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm0 vmovups %ymm0, 0x70(%rsp) xorl %edi, %edi vucomiss %xmm1, %xmm5 setb %dil vbroadcastss %xmm5, %ymm19 vmovshdup %xmm5, %xmm0 # xmm0 = xmm5[1,1,3,3] vbroadcastsd %xmm0, %ymm20 vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0] vpermps %ymm5, %ymm6, %ymm21 vbroadcastss %xmm4, %ymm22 vpermps %ymm4, %ymm8, %ymm23 vpermps %ymm4, %ymm6, %ymm24 shll $0x5, %edi xorl %ebp, %ebp vucomiss %xmm1, %xmm0 setb %bpl shll $0x5, %ebp orq $0x40, %rbp xorl %r10d, %r10d vucomiss %xmm1, %xmm7 setb %r10b shll $0x5, %r10d orq $0x80, %r10 movq %rdi, %r11 xorq $0x20, %r11 movq %rbp, %rbx xorq $0x20, %rbx movq %r10, %r14 xorq $0x20, %r14 vbroadcastss %xmm2, %ymm25 vbroadcastss %xmm3, %ymm0 vpmovsxbd 0xa5928(%rip), %ymm26 # 0x1f5da70 vpbroadcastd 0xa946a(%rip), %ymm27 # 0x1f615bc leaq 0x230(%rsp), %r13 vmovups %ymm16, 0x1f0(%rsp) vmovups %ymm17, 0x1d0(%rsp) vmovups %ymm18, 0x1b0(%rsp) vmovups %ymm19, 0x190(%rsp) vmovups %ymm20, 0x170(%rsp) vmovups %ymm21, 0x150(%rsp) vmovups %ymm22, 0x130(%rsp) vmovups %ymm23, 0x110(%rsp) vmovups %ymm24, 0xf0(%rsp) movq %rdi, 0x10(%rsp) movq %r14, 0x8(%rsp) vmovups %ymm25, 0xd0(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r13, %r8 je 0x1eb8005 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1eb81d7 movq (%r8), %rcx movq %rcx, %rax andq $0xf, %rax jne 0x1eb82fe vmovaps 0x40(%rcx,%rdi), %ymm1 vsubps %ymm16, %ymm1, %ymm1 vmulps %ymm1, %ymm19, %ymm1 vmovaps 0x40(%rcx,%rbp), %ymm2 vsubps %ymm17, %ymm2, %ymm2 vmulps %ymm2, %ymm20, %ymm2 vmaxps %ymm2, %ymm1, %ymm1 vmovaps 0x40(%rcx,%r10), %ymm2 vsubps %ymm18, %ymm2, %ymm2 vmulps %ymm2, %ymm21, %ymm2 vmovaps 0x40(%rcx,%r11), %ymm3 vsubps %ymm16, %ymm3, %ymm3 vmulps %ymm3, %ymm22, %ymm3 vmovaps 0x40(%rcx,%rbx), %ymm4 vsubps %ymm17, %ymm4, %ymm4 vmulps %ymm4, %ymm23, %ymm4 vminps %ymm4, %ymm3, %ymm3 vmovaps 0x40(%rcx,%r14), %ymm4 vsubps %ymm18, %ymm4, %ymm4 vmulps %ymm4, %ymm24, %ymm4 vmaxps %ymm25, %ymm2, %ymm2 vmaxps %ymm2, %ymm1, %ymm8 vminps %ymm0, %ymm4, %ymm1 vminps %ymm1, %ymm3, %ymm1 vcmpleps %ymm1, %ymm8, %k0 kmovb %k0, %r15d movb $0x1, %al testb %al, %al je 0x1eb8307 testq %r15, %r15 je 0x1eb830e andq $-0x10, %rcx vmovdqu (%rcx), %ymm1 vmovdqu 0x20(%rcx), %ymm2 vmovdqa64 %ymm26, %ymm3 vpternlogd $0xf8, %ymm27, %ymm8, %ymm3 kmovd %r15d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) prefetcht0 0xc0(%rcx) xorl %eax, %eax blsrq %r15, %rdx jne 0x1eb8315 testl %eax, %eax je 0x1eb81f3 jmp 0x1eb879c cmpl $0x2, %eax je 0x1eb837d xorl %eax, %eax jmp 0x1eb8296 movl $0x6, %eax jmp 0x1eb82f1 movl $0x4, %eax jmp 0x1eb82f1 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) prefetcht0 0xc0(%rcx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rdx, %rcx jne 0x1eb853f vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rcx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm8, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 jmp 0x1eb82f1 movq %rcx, %rax andq $-0x10, %rax vmovaps 0x100(%rax), %ymm6 vmovups 0x70(%rsp), %ymm3 vmulps %ymm6, %ymm3, %ymm1 vmovaps 0x120(%rax), %ymm5 vmulps %ymm5, %ymm3, %ymm2 vmovaps 0x140(%rax), %ymm4 vmulps %ymm4, %ymm3, %ymm3 vmovaps 0x40(%rax), %ymm8 vmovaps 0x60(%rax), %ymm9 vmovaps 0x80(%rax), %ymm7 vmovaps 0xa0(%rax), %ymm10 vmovups 0x90(%rsp), %ymm13 vfmadd231ps %ymm10, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm10) + ymm1 vmovaps 0xc0(%rax), %ymm11 vfmadd231ps %ymm11, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm11) + ymm2 vmovaps 0xe0(%rax), %ymm12 vfmadd231ps %ymm12, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm12) + ymm3 vmovups 0xb0(%rsp), %ymm13 vfmadd231ps %ymm8, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm8) + ymm1 vfmadd231ps %ymm9, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm9) + ymm2 vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3 vbroadcastss 0x68ab3(%rip), %ymm14 # 0x1f20ec4 vandps %ymm1, %ymm14, %ymm13 vbroadcastss 0x38bca(%rip), %ymm15 # 0x1ef0fe8 vcmpltps %ymm15, %ymm13, %k1 vmovaps %ymm15, %ymm1 {%k1} vandps %ymm2, %ymm14, %ymm13 vcmpltps %ymm15, %ymm13, %k1 vmovaps %ymm15, %ymm2 {%k1} vandps %ymm3, %ymm14, %ymm13 vcmpltps %ymm15, %ymm13, %k1 vmovaps %ymm15, %ymm3 {%k1} vrcp14ps %ymm1, %ymm13 vbroadcastss 0x342b8(%rip), %ymm14 # 0x1eec714 vfnmadd213ps %ymm14, %ymm13, %ymm1 # ymm1 = -(ymm13 * ymm1) + ymm14 vfmadd132ps %ymm13, %ymm13, %ymm1 # ymm1 = (ymm1 * ymm13) + ymm13 vrcp14ps %ymm2, %ymm13 vfnmadd213ps %ymm14, %ymm13, %ymm2 # ymm2 = -(ymm13 * ymm2) + ymm14 vfmadd132ps %ymm13, %ymm13, %ymm2 # ymm2 = (ymm2 * ymm13) + ymm13 vrcp14ps %ymm3, %ymm13 vfnmadd213ps %ymm14, %ymm13, %ymm3 # ymm3 = -(ymm13 * ymm3) + ymm14 vfmadd132ps %ymm13, %ymm13, %ymm3 # ymm3 = (ymm3 * ymm13) + ymm13 vbroadcastss 0x68a31(%rip), %ymm15 # 0x1f20ec0 vxorps %ymm1, %ymm15, %ymm13 vxorps %ymm2, %ymm15, %ymm14 vfmadd213ps 0x160(%rax), %ymm18, %ymm6 # ymm6 = (ymm18 * ymm6) + mem vfmadd213ps 0x180(%rax), %ymm18, %ymm5 # ymm5 = (ymm18 * ymm5) + mem vfmadd213ps 0x1a0(%rax), %ymm18, %ymm4 # ymm4 = (ymm18 * ymm4) + mem vxorps %ymm3, %ymm15, %ymm15 vfmadd231ps %ymm10, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm10) + ymm6 vfmadd231ps %ymm11, %ymm17, %ymm5 # ymm5 = (ymm17 * ymm11) + ymm5 vfmadd231ps %ymm12, %ymm17, %ymm4 # ymm4 = (ymm17 * ymm12) + ymm4 vfmadd231ps %ymm8, %ymm16, %ymm6 # ymm6 = (ymm16 * ymm8) + ymm6 vmulps %ymm6, %ymm13, %ymm6 vfmadd231ps %ymm9, %ymm16, %ymm5 # ymm5 = (ymm16 * ymm9) + ymm5 vmulps %ymm5, %ymm14, %ymm5 vfmadd231ps %ymm7, %ymm16, %ymm4 # ymm4 = (ymm16 * ymm7) + ymm4 vmulps %ymm4, %ymm15, %ymm4 vaddps %ymm6, %ymm1, %ymm1 vaddps %ymm5, %ymm2, %ymm2 vaddps %ymm4, %ymm3, %ymm3 vpminsd %ymm1, %ymm6, %ymm7 vpminsd %ymm2, %ymm5, %ymm8 vpminsd %ymm3, %ymm4, %ymm9 vmaxps %ymm9, %ymm8, %ymm8 vpmaxsd %ymm1, %ymm6, %ymm1 vpmaxsd %ymm2, %ymm5, %ymm2 vpmaxsd %ymm3, %ymm4, %ymm3 vminps %ymm3, %ymm2, %ymm2 vmaxps %ymm7, %ymm25, %ymm3 vmaxps %ymm8, %ymm3, %ymm3 vminps %ymm1, %ymm0, %ymm1 vminps %ymm2, %ymm1, %ymm1 vmulps 0x679e0(%rip){1to8}, %ymm3, %ymm8 # 0x1f1ff10 vmulps 0x679da(%rip){1to8}, %ymm1, %ymm1 # 0x1f1ff14 jmp 0x1eb8289 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1eb85c8 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rcx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm8, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm8, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1eb82f1 vmovdqa %ymm8, %ymm10 movq %r14, %r12 movq %r11, %r14 movq %r10, %r11 movq %rdi, %r9 movq %rsi, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1eb869e vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rcx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa %ymm10, %ymm8 vpermd %ymm10, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm10, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm10, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rsi movq %r9, %rdi movq %r11, %r10 movq %r14, %r11 movq %r12, %r14 jmp 0x1eb82f1 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x68812(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0xa8f69(%rip), %ymm9 # 0x1f61620 vpermt2d %ymm8, %ymm9, %ymm3 vpmovsxbd 0xa8f62(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0xa8f55(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x5a013(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1eb86e4 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm10, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1eb874e vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rcx movq %rdi, %rsi movq %r9, %rdi movq %r11, %r10 movq %r14, %r11 movq %r12, %r14 vmovdqa %ymm10, %ymm8 jmp 0x1eb82f1 cmpl $0x6, %eax jne 0x1eb81d2 andq $-0x10, %rcx movzbl (%rcx), %eax movq 0x18(%rsp), %rdx movq %r8, 0x28(%rsp) movq 0x8(%rdx), %r8 shll $0x6, %eax leaq 0x30(%rsp), %rdi movq %rsi, %r14 movq 0x20(%rsp), %rdx movq %r10, %r12 movq %rbx, %r13 movq %r11, %rbx vmovdqu %ymm8, 0x210(%rsp) vzeroupper callq *(%r8,%rax) vmovdqu 0x210(%rsp), %ymm8 vpbroadcastd 0xa8dc6(%rip), %ymm27 # 0x1f615bc vpmovsxbd 0xa5270(%rip), %ymm26 # 0x1f5da70 vmovups 0xd0(%rsp), %ymm25 movq %rbx, %r11 movq %r13, %rbx movq %r12, %r10 leaq 0x230(%rsp), %r13 vmovups 0xf0(%rsp), %ymm24 vmovups 0x110(%rsp), %ymm23 vmovups 0x130(%rsp), %ymm22 vmovups 0x150(%rsp), %ymm21 vmovups 0x170(%rsp), %ymm20 vmovups 0x190(%rsp), %ymm19 movq 0x10(%rsp), %rdi vmovups 0x1b0(%rsp), %ymm18 vmovups 0x1d0(%rsp), %ymm17 vmovups 0x1f0(%rsp), %ymm16 movq 0x28(%rsp), %r8 movq %r14, %rsi movq 0x8(%rsp), %r14 vbroadcastss 0x20(%rsi), %ymm0 jmp 0x1eb81d2
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16781328, true, embree::avx512::VirtualCurveIntersector1>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2578, %rsp # imm = 0x2578 movq %rdx, 0x20(%rsp) movq %rdi, 0x18(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1eb8a4d vmovaps 0x10(%rsi), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm2 vxorps %xmm1, %xmm1, %xmm1 vmovss %xmm2, %xmm1, %xmm3 # xmm3 = xmm2[0],xmm1[1,2,3] vrsqrt14ss %xmm3, %xmm1, %xmm3 vmovss 0x33e2e(%rip), %xmm4 # 0x1eec718 vmulss %xmm4, %xmm3, %xmm5 vmovss 0x3428a(%rip), %xmm6 # 0x1eecb80 vmulss %xmm6, %xmm2, %xmm2 vmulss %xmm3, %xmm2, %xmm2 vmulss %xmm3, %xmm3, %xmm3 vmulss %xmm3, %xmm2, %xmm2 vsubss %xmm2, %xmm5, %xmm2 vbroadcastss %xmm2, %xmm3 vmulps %xmm3, %xmm0, %xmm5 vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0] vmovshdup %xmm5, %xmm8 # xmm8 = xmm5[1,1,3,3] vbroadcastss 0x6859b(%rip), %xmm9 # 0x1f20ec0 vxorps %xmm9, %xmm8, %xmm8 vunpckhps %xmm1, %xmm5, %xmm10 # xmm10 = xmm5[2],xmm1[2],xmm5[3],xmm1[3] vmovss %xmm8, %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[1,2,3] vshufps $0x41, %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[1,0],xmm8[0,1] vxorpd %xmm7, %xmm9, %xmm7 vinsertps $0x2a, %xmm5, %xmm7, %xmm7 # xmm7 = xmm7[0],zero,xmm5[0],zero vdpps $0x7f, %xmm8, %xmm8, %xmm9 vdpps $0x7f, %xmm7, %xmm7, %xmm10 vcmpltps %xmm9, %xmm10, %k0 vpmovm2d %k0, %xmm9 vpbroadcastd %xmm9, %xmm9 vpmovd2m %xmm9, %k1 vpcmpeqd %xmm9, %xmm9, %xmm9 vmovaps %xmm9, %xmm9 {%k1} {z} vblendvps %xmm9, %xmm8, %xmm7, %xmm7 vdpps $0x7f, %xmm7, %xmm7, %xmm8 vmovss %xmm8, %xmm1, %xmm9 # xmm9 = xmm8[0],xmm1[1,2,3] vrsqrt14ss %xmm9, %xmm1, %xmm9 vmulss %xmm6, %xmm8, %xmm8 vmulss %xmm9, %xmm8, %xmm8 vmulss %xmm9, %xmm9, %xmm10 vmulss %xmm10, %xmm8, %xmm8 vmulss %xmm4, %xmm9, %xmm9 vsubss %xmm8, %xmm9, %xmm8 vbroadcastss %xmm8, %xmm8 vmulps %xmm7, %xmm8, %xmm7 vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3] vshufps $0xc9, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,2,0,3] vmulps %xmm7, %xmm9, %xmm9 vfmsub231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) - xmm9 vshufps $0xc9, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,2,0,3] vdpps $0x7f, %xmm8, %xmm8, %xmm9 vmovss %xmm2, 0x30(%rsp) vmovss %xmm9, %xmm1, %xmm2 vrsqrt14ss %xmm2, %xmm1, %xmm2 vmulss %xmm4, %xmm2, %xmm4 vmulss %xmm6, %xmm9, %xmm6 vmulss %xmm2, %xmm6, %xmm6 vmulss %xmm2, %xmm2, %xmm2 vmulss %xmm2, %xmm6, %xmm2 vsubss %xmm2, %xmm4, %xmm2 vbroadcastss %xmm2, %xmm2 vmulps %xmm2, %xmm8, %xmm2 vmulps %xmm5, %xmm3, %xmm3 vunpcklps %xmm3, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm3[0],xmm7[1],xmm3[1] vunpckhps %xmm3, %xmm7, %xmm3 # xmm3 = xmm7[2],xmm3[2],xmm7[3],xmm3[3] vunpcklps %xmm1, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] vunpckhps %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] vunpcklps %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] vunpcklps %xmm5, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] vunpckhps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] vmovaps %xmm2, 0x40(%rsp) vmovaps %xmm3, 0x50(%rsp) vmovaps %xmm1, 0x60(%rsp) movq 0x70(%rax), %rax movq %rax, 0x230(%rsp) movl $0x0, 0x238(%rsp) cmpq $0x8, %rax jne 0x1eb8a62 addq $0x2578, %rsp # imm = 0x2578 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x6844a(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vcmpltps 0x38563(%rip){1to4}, %xmm4, %k1 # 0x1ef0fe8 vbroadcastss 0x33c86(%rip), %xmm4 # 0x1eec714 vdivps %xmm0, %xmm4, %xmm4 vbroadcastss 0x684c4(%rip), %xmm4 {%k1} # 0x1f20f60 vmulps 0x6746a(%rip){1to4}, %xmm4, %xmm5 # 0x1f1ff10 vmulps 0x67464(%rip){1to4}, %xmm4, %xmm4 # 0x1f1ff14 vbroadcastss (%rsi), %ymm20 leaq 0x240(%rsp), %r8 vbroadcastss 0x4(%rsi), %ymm21 vbroadcastss 0x8(%rsi), %ymm22 vbroadcastss %xmm0, %ymm6 vmovups %ymm6, 0xb0(%rsp) vbroadcastss 0x59c21(%rip), %ymm8 # 0x1f12704 vpermps %ymm0, %ymm8, %ymm6 vmovups %ymm6, 0x90(%rsp) vbroadcastss 0x683e2(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm0 vmovups %ymm0, 0x70(%rsp) xorl %edi, %edi vucomiss %xmm1, %xmm5 setb %dil vbroadcastss %xmm5, %ymm23 vmovshdup %xmm5, %xmm0 # xmm0 = xmm5[1,1,3,3] vbroadcastsd %xmm0, %ymm24 vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0] vpermps %ymm5, %ymm6, %ymm25 vbroadcastss %xmm4, %ymm26 vpermps %ymm4, %ymm8, %ymm27 vpermps %ymm4, %ymm6, %ymm28 shll $0x5, %edi xorl %r13d, %r13d vucomiss %xmm1, %xmm0 setb %r13b shll $0x5, %r13d orq $0x40, %r13 xorl %r14d, %r14d vucomiss %xmm1, %xmm7 setb %r14b shll $0x5, %r14d orq $0x80, %r14 movq %rdi, %rbp xorq $0x20, %rbp movq %r13, %r12 xorq $0x20, %r12 movq %r14, %rbx xorq $0x20, %rbx vbroadcastss %xmm2, %ymm29 vbroadcastss %xmm3, %ymm0 leaq 0x230(%rsp), %r10 vpmovsxbd 0xa4ed7(%rip), %ymm8 # 0x1f5da70 vpbroadcastd 0xa8a1a(%rip), %ymm9 # 0x1f615bc movq %rsi, 0x8(%rsp) vmovups %ymm20, 0x1f0(%rsp) vmovups %ymm21, 0x1d0(%rsp) vmovups %ymm22, 0x1b0(%rsp) movq %rdi, 0x10(%rsp) vmovups %ymm23, 0x190(%rsp) vmovups %ymm24, 0x170(%rsp) vmovups %ymm25, 0x150(%rsp) vmovups %ymm26, 0x130(%rsp) vmovups %ymm27, 0x110(%rsp) vmovups %ymm28, 0xf0(%rsp) vmovups %ymm29, 0xd0(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r10, %r8 je 0x1eb8a4d vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1eb8c1f movq (%r8), %rcx testb $0x8, %cl jne 0x1eb8d3d vmovss 0x1c(%rsi), %xmm7 movl %ecx, %edx andl $0x7, %edx movq %rcx, %rax andq $-0x10, %rax cmpq $0x3, %rdx je 0x1eb8db3 vmovaps 0x100(%rax,%rdi), %ymm2 vbroadcastss %xmm7, %ymm1 vfmadd213ps 0x40(%rax,%rdi), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vsubps %ymm20, %ymm2, %ymm2 vmovaps 0x100(%rax,%r13), %ymm3 vmulps %ymm2, %ymm23, %ymm2 vfmadd213ps 0x40(%rax,%r13), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vsubps %ymm21, %ymm3, %ymm3 vmovaps 0x100(%rax,%r14), %ymm4 vmulps %ymm3, %ymm24, %ymm3 vfmadd213ps 0x40(%rax,%r14), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vsubps %ymm22, %ymm4, %ymm4 vmulps %ymm4, %ymm25, %ymm4 vmaxps %ymm4, %ymm3, %ymm3 vmaxps %ymm2, %ymm29, %ymm2 vmaxps %ymm3, %ymm2, %ymm10 vmovaps 0x100(%rax,%rbp), %ymm2 vfmadd213ps 0x40(%rax,%rbp), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vsubps %ymm20, %ymm2, %ymm2 vmulps %ymm2, %ymm26, %ymm2 vmovaps 0x100(%rax,%r12), %ymm3 vfmadd213ps 0x40(%rax,%r12), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vsubps %ymm21, %ymm3, %ymm3 vmulps %ymm3, %ymm27, %ymm3 vmovaps 0x100(%rax,%rbx), %ymm4 vfmadd213ps 0x40(%rax,%rbx), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vsubps %ymm22, %ymm4, %ymm4 vmulps %ymm4, %ymm28, %ymm4 vminps %ymm4, %ymm3, %ymm3 vminps %ymm2, %ymm0, %ymm2 vminps %ymm3, %ymm2, %ymm2 cmpl $0x6, %edx je 0x1eb901d vcmpleps %ymm2, %ymm10, %k0 kmovb %k0, %r15d testb $0x8, %cl jne 0x1eb8dac testq %r15, %r15 je 0x1eb9013 andq $-0x10, %rcx vmovdqu (%rcx), %ymm1 vmovdqu 0x20(%rcx), %ymm2 vmovdqa %ymm8, %ymm3 vpternlogd $0xf8, %ymm9, %ymm10, %ymm3 kmovd %r15d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) prefetcht0 0xc0(%rcx) xorl %eax, %eax blsrq %r15, %rdx jne 0x1eb9039 testl %eax, %eax je 0x1eb8c3b jmp 0x1eb92dc movl $0x6, %eax jmp 0x1eb8d9f vmovaps 0x40(%rax), %ymm6 vmovaps 0x60(%rax), %ymm5 vmovaps 0x80(%rax), %ymm4 vmovaps 0xa0(%rax), %ymm14 vmovaps 0xc0(%rax), %ymm11 vmovaps 0xe0(%rax), %ymm9 vmovaps 0x100(%rax), %ymm3 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm1 vbroadcastss %xmm7, %ymm12 vmovss 0x33912(%rip), %xmm8 # 0x1eec714 vsubss %xmm7, %xmm8, %xmm7 vmulps 0x1c0(%rax), %ymm12, %ymm10 vbroadcastss %xmm7, %ymm13 vmulps 0x1e0(%rax), %ymm12, %ymm8 vmulps 0x200(%rax), %ymm12, %ymm7 vxorps %xmm15, %xmm15, %xmm15 vfmadd231ps %ymm15, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm15) + ymm10 vfmadd231ps %ymm15, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm15) + ymm8 vfmadd231ps %ymm15, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm15) + ymm7 vmulps 0x220(%rax), %ymm12, %ymm15 vmulps 0x240(%rax), %ymm12, %ymm16 vmulps 0x260(%rax), %ymm12, %ymm12 vaddps %ymm15, %ymm13, %ymm18 vaddps %ymm16, %ymm13, %ymm17 vaddps %ymm12, %ymm13, %ymm16 vmovups 0x70(%rsp), %ymm13 vmulps %ymm3, %ymm13, %ymm15 vmulps %ymm2, %ymm13, %ymm12 vmulps %ymm1, %ymm13, %ymm13 vmovups 0x90(%rsp), %ymm19 vfmadd231ps %ymm14, %ymm19, %ymm15 # ymm15 = (ymm19 * ymm14) + ymm15 vfmadd231ps %ymm11, %ymm19, %ymm12 # ymm12 = (ymm19 * ymm11) + ymm12 vfmadd231ps %ymm9, %ymm19, %ymm13 # ymm13 = (ymm19 * ymm9) + ymm13 vmovups 0xb0(%rsp), %ymm19 vfmadd231ps %ymm6, %ymm19, %ymm15 # ymm15 = (ymm19 * ymm6) + ymm15 vfmadd231ps %ymm5, %ymm19, %ymm12 # ymm12 = (ymm19 * ymm5) + ymm12 vfmadd231ps %ymm4, %ymm19, %ymm13 # ymm13 = (ymm19 * ymm4) + ymm13 vbroadcastss 0x6800e(%rip), %ymm30 # 0x1f20ec4 vandps %ymm30, %ymm15, %ymm19 vbroadcastss 0x38122(%rip), %ymm31 # 0x1ef0fe8 vcmpltps %ymm31, %ymm19, %k1 vmovaps %ymm31, %ymm15 {%k1} vandps %ymm30, %ymm12, %ymm19 vcmpltps %ymm31, %ymm19, %k1 vmovaps %ymm31, %ymm12 {%k1} vandps %ymm30, %ymm13, %ymm19 vcmpltps %ymm31, %ymm19, %k1 vmovaps %ymm31, %ymm13 {%k1} vrcp14ps %ymm15, %ymm19 vbroadcastss 0x3380b(%rip), %ymm30 # 0x1eec714 vfnmadd213ps %ymm30, %ymm19, %ymm15 # ymm15 = -(ymm19 * ymm15) + ymm30 vfmadd132ps %ymm19, %ymm19, %ymm15 # ymm15 = (ymm15 * ymm19) + ymm19 vrcp14ps %ymm12, %ymm19 vfnmadd213ps %ymm30, %ymm19, %ymm12 # ymm12 = -(ymm19 * ymm12) + ymm30 vfmadd132ps %ymm19, %ymm19, %ymm12 # ymm12 = (ymm12 * ymm19) + ymm19 vrcp14ps %ymm13, %ymm19 vfnmadd213ps %ymm30, %ymm19, %ymm13 # ymm13 = -(ymm19 * ymm13) + ymm30 vfmadd132ps %ymm19, %ymm19, %ymm13 # ymm13 = (ymm13 * ymm19) + ymm19 vfmadd213ps 0x160(%rax), %ymm22, %ymm3 # ymm3 = (ymm22 * ymm3) + mem vfmadd213ps 0x180(%rax), %ymm22, %ymm2 # ymm2 = (ymm22 * ymm2) + mem vfmadd231ps %ymm14, %ymm21, %ymm3 # ymm3 = (ymm21 * ymm14) + ymm3 vfmadd231ps %ymm11, %ymm21, %ymm2 # ymm2 = (ymm21 * ymm11) + ymm2 vfmadd213ps 0x1a0(%rax), %ymm22, %ymm1 # ymm1 = (ymm22 * ymm1) + mem vfmadd231ps %ymm9, %ymm21, %ymm1 # ymm1 = (ymm21 * ymm9) + ymm1 vpbroadcastd 0xa8653(%rip), %ymm9 # 0x1f615bc vfmadd231ps %ymm6, %ymm20, %ymm3 # ymm3 = (ymm20 * ymm6) + ymm3 vfmadd231ps %ymm5, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm5) + ymm2 vfmadd231ps %ymm4, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm4) + ymm1 vsubps %ymm3, %ymm10, %ymm4 vsubps %ymm3, %ymm18, %ymm3 vsubps %ymm2, %ymm8, %ymm5 vsubps %ymm2, %ymm17, %ymm2 vsubps %ymm1, %ymm7, %ymm6 vsubps %ymm1, %ymm16, %ymm1 vmulps %ymm4, %ymm15, %ymm4 vmulps %ymm3, %ymm15, %ymm3 vmulps %ymm5, %ymm12, %ymm5 vmulps %ymm6, %ymm13, %ymm6 vmulps %ymm2, %ymm12, %ymm2 vmulps %ymm1, %ymm13, %ymm1 vpminsd %ymm2, %ymm5, %ymm7 vpminsd %ymm1, %ymm6, %ymm8 vmaxps %ymm8, %ymm7, %ymm7 vpminsd %ymm3, %ymm4, %ymm8 vpmaxsd %ymm3, %ymm4, %ymm3 vpmaxsd %ymm2, %ymm5, %ymm2 vpmaxsd %ymm1, %ymm6, %ymm1 vminps %ymm1, %ymm2, %ymm1 vmaxps %ymm8, %ymm29, %ymm2 vpmovsxbd 0xa4a89(%rip), %ymm8 # 0x1f5da70 vmaxps %ymm7, %ymm2, %ymm2 vminps %ymm3, %ymm0, %ymm3 vminps %ymm1, %ymm3, %ymm1 vmulps 0x66f13(%rip){1to8}, %ymm2, %ymm10 # 0x1f1ff10 vmulps 0x66f0d(%rip){1to8}, %ymm1, %ymm1 # 0x1f1ff14 vcmpleps %ymm1, %ymm10, %k0 jmp 0x1eb8d39 movl $0x4, %eax jmp 0x1eb8d9f vcmpleps %ymm2, %ymm10, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1eb8d39 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) prefetcht0 0xc0(%rcx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rdx, %rcx jne 0x1eb909d vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rcx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm10, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 jmp 0x1eb8d9f vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1eb9126 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rcx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1eb8d9f movq %rdi, %r9 movq %rsi, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1eb91f2 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rcx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm10, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm10, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rsi movq %r9, %rdi vpmovsxbd 0xa488c(%rip), %ymm8 # 0x1f5da70 vpbroadcastd 0xa83cf(%rip), %ymm9 # 0x1f615bc jmp 0x1eb8d9f valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x67cbe(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0xa8415(%rip), %ymm9 # 0x1f61620 vpermt2d %ymm8, %ymm9, %ymm3 vpmovsxbd 0xa840e(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0xa8401(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x594bf(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1eb9238 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm10, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1eb92a2 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rcx jmp 0x1eb91d5 cmpl $0x6, %eax jne 0x1eb8c1a andq $-0x10, %rcx movzbl (%rcx), %eax movq 0x18(%rsp), %rdx movq %r8, 0x28(%rsp) movq 0x8(%rdx), %r8 shll $0x6, %eax leaq 0x30(%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x20(%rsp), %rdx vmovups %ymm10, 0x210(%rsp) vzeroupper callq *(%r8,%rax) vmovups 0x210(%rsp), %ymm10 vpbroadcastd 0xa828e(%rip), %ymm9 # 0x1f615bc vpmovsxbd 0xa4739(%rip), %ymm8 # 0x1f5da70 leaq 0x230(%rsp), %r10 vmovups 0xd0(%rsp), %ymm29 vmovups 0xf0(%rsp), %ymm28 vmovups 0x110(%rsp), %ymm27 vmovups 0x130(%rsp), %ymm26 vmovups 0x150(%rsp), %ymm25 vmovups 0x170(%rsp), %ymm24 vmovups 0x190(%rsp), %ymm23 movq 0x10(%rsp), %rdi vmovups 0x1b0(%rsp), %ymm22 vmovups 0x1d0(%rsp), %ymm21 movq 0x28(%rsp), %r8 vmovups 0x1f0(%rsp), %ymm20 movq 0x8(%rsp), %rsi vbroadcastss 0x20(%rsi), %ymm0 jmp 0x1eb8c1a nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1eb9415 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x25e8, %rsp # imm = 0x25E8 movq 0x70(%rax), %rax movq %rax, 0x2a0(%rsp) movl $0x0, 0x2a8(%rsp) cmpq $0x8, %rax jne 0x1eb9419 addq $0x25e8, %rsp # imm = 0x25E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rsi, %r14 vmovaps 0x10(%rsi), %xmm0 vxorps %xmm2, %xmm2, %xmm2 vmaxss 0xc(%rsi), %xmm2, %xmm1 vmaxss 0x20(%rsi), %xmm2, %xmm3 vandps 0x67a8b(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x37ba6(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x332b5(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem leaq 0x2b0(%rsp), %r8 vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %edi, %edi vucomiss %xmm2, %xmm0 setb %dil vbroadcastss %xmm0, %ymm19 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm20 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x67a48(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm21 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x59257(%rip), %ymm22 # 0x1f12704 vpermps %ymm0, %ymm22, %ymm8 vpermps %ymm0, %ymm6, %ymm6 shll $0x5, %edi xorl %r9d, %r9d vucomiss %xmm2, %xmm4 setb %r9b shll $0x5, %r9d orq $0x40, %r9 xorl %r10d, %r10d vucomiss %xmm2, %xmm5 setb %r10b shll $0x5, %r10d orq $0x80, %r10 movq %rdi, %r11 xorq $0x20, %r11 movq %r9, %rsi xorq $0x20, %rsi movq %r10, %r15 xorq $0x20, %r15 vbroadcastss %xmm3, %ymm0 vbroadcastss 0x679b9(%rip), %ymm2 # 0x1f20ec0 vxorps %ymm2, %ymm7, %ymm23 vxorps %ymm2, %ymm8, %ymm24 vxorps %ymm2, %ymm6, %ymm25 vbroadcastss %xmm1, %ymm26 vpmovsxbd 0xa4547(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0xa8089(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x67983(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0xa80d9(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0xa80d7(%rip), %ymm31 # 0x1f61628 leaq 0x2a0(%rsp), %rbp movq %r15, 0x78(%rsp) vmovss 0x20(%r14), %xmm1 cmpq %rbp, %r8 je 0x1eb9404 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1eb9564 movq (%r8), %rbx testb $0x8, %bl jne 0x1eb95fb vmovaps 0x40(%rbx,%rdi), %ymm1 vfmadd132ps %ymm19, %ymm23, %ymm1 # ymm1 = (ymm1 * ymm19) + ymm23 vmovaps 0x40(%rbx,%r9), %ymm2 vfmadd132ps %ymm20, %ymm24, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm24 vmovaps 0x40(%rbx,%r10), %ymm3 vpmaxsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm21, %ymm25, %ymm3 # ymm3 = (ymm3 * ymm21) + ymm25 vpmaxsd %ymm26, %ymm3, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm8 vmovaps 0x40(%rbx,%r11), %ymm1 vfmadd132ps %ymm19, %ymm23, %ymm1 # ymm1 = (ymm1 * ymm19) + ymm23 vmovaps 0x40(%rbx,%rsi), %ymm2 vfmadd132ps %ymm20, %ymm24, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm24 vmovaps 0x40(%rbx,%r15), %ymm3 vpminsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm21, %ymm25, %ymm3 # ymm3 = (ymm3 * ymm21) + ymm25 vpminsd %ymm0, %ymm3, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm8, %k0 kmovb %k0, %r12d testb $0x8, %bl jne 0x1eb9664 testq %r12, %r12 je 0x1eb966b andq $-0x10, %rbx vmovdqu (%rbx), %ymm1 vmovdqu 0x20(%rbx), %ymm2 vmovdqa64 %ymm27, %ymm3 vpternlogd $0xf8, %ymm28, %ymm8, %ymm3 kmovd %r12d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbx prefetcht0 (%rbx) prefetcht0 0x40(%rbx) prefetcht0 0x80(%rbx) prefetcht0 0xc0(%rbx) xorl %eax, %eax blsrq %r12, %rcx jne 0x1eb9672 testl %eax, %eax je 0x1eb9580 jmp 0x1eb993f movl $0x6, %eax jmp 0x1eb9657 movl $0x4, %eax jmp 0x1eb9657 movq %rsi, %r13 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1eb96dc vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm8, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 movq %r13, %rsi jmp 0x1eb9657 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1eb9765 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm8, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm8, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1eb96d4 vmovdqa %ymm8, %ymm9 movq %r15, %rbp movq %r11, %r15 movq %r10, %r11 movq %r9, %r10 movq %rdi, %r9 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1eb9850 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa %ymm9, %ymm8 vpermd %ymm9, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm9, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm9, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rdx movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r15, %r11 movq %r13, %rsi movq %rbp, %r15 leaq 0x2a0(%rsp), %rbp jmp 0x1eb9657 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vmovdqa64 %ymm29, %ymm3 vpermt2d %ymm8, %ymm30, %ymm3 vpermt2d %ymm4, %ymm31, %ymm3 vpermt2d %ymm6, %ymm31, %ymm3 vpmovsxbd 0xa7db8(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpermps %ymm5, %ymm22, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1eb9881 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm9, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1eb98e3 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbx movq %rdi, %rdx movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r15, %r11 movq %r13, %rsi movq %rbp, %r15 leaq 0x2a0(%rsp), %rbp vmovdqa %ymm9, %ymm8 jmp 0x1eb9657 cmpl $0x6, %eax jne 0x1eb955e vmovdqu %ymm8, 0x280(%rsp) movl %ebx, %r13d andl $0xf, %r13d addq $-0x8, %r13 je 0x1eba027 andq $-0x10, %rbx xorl %ebp, %ebp imulq $0xb0, %rbp, %r15 vmovaps 0x80(%rbx,%r15), %xmm7 vmovaps 0x40(%rbx,%r15), %xmm8 vmovaps 0x70(%rbx,%r15), %xmm9 vmovaps 0x50(%rbx,%r15), %xmm10 vmulps %xmm7, %xmm8, %xmm0 vmovaps 0x60(%rbx,%r15), %xmm11 vmovaps (%rbx,%r15), %xmm3 vmovaps 0x10(%rbx,%r15), %xmm5 vmovaps 0x20(%rbx,%r15), %xmm6 vmovaps 0x30(%rbx,%r15), %xmm12 vfmsub231ps %xmm10, %xmm9, %xmm0 # xmm0 = (xmm9 * xmm10) - xmm0 vmulps %xmm11, %xmm10, %xmm1 vfmsub231ps %xmm12, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm12) - xmm1 vmulps %xmm9, %xmm12, %xmm2 vbroadcastss 0x10(%r14), %xmm13 vbroadcastss 0x14(%r14), %xmm14 vbroadcastss 0x18(%r14), %xmm15 vsubps (%r14){1to4}, %xmm3, %xmm4 vfmsub231ps %xmm8, %xmm11, %xmm2 # xmm2 = (xmm11 * xmm8) - xmm2 vsubps 0x4(%r14){1to4}, %xmm5, %xmm5 vsubps 0x8(%r14){1to4}, %xmm6, %xmm6 vmulps %xmm6, %xmm14, %xmm16 vfmsub231ps %xmm15, %xmm5, %xmm16 # xmm16 = (xmm5 * xmm15) - xmm16 vmulps %xmm4, %xmm15, %xmm17 vfmsub231ps %xmm13, %xmm6, %xmm17 # xmm17 = (xmm6 * xmm13) - xmm17 vmulps %xmm5, %xmm13, %xmm18 vfmsub231ps %xmm14, %xmm4, %xmm18 # xmm18 = (xmm4 * xmm14) - xmm18 vmulps %xmm2, %xmm15, %xmm15 vfmadd231ps %xmm14, %xmm1, %xmm15 # xmm15 = (xmm1 * xmm14) + xmm15 vfmadd231ps %xmm13, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm13) + xmm15 vandps 0x67495(%rip){1to4}, %xmm15, %xmm3 # 0x1f20ec4 vmulps %xmm18, %xmm7, %xmm7 vfmadd231ps %xmm9, %xmm17, %xmm7 # xmm7 = (xmm17 * xmm9) + xmm7 vfmadd231ps %xmm11, %xmm16, %xmm7 # xmm7 = (xmm16 * xmm11) + xmm7 vandpd 0x6751d(%rip){1to2}, %xmm15, %xmm9 # 0x1f20f68 vxorps %xmm7, %xmm9, %xmm7 vmulps %xmm18, %xmm10, %xmm10 vfmadd231ps %xmm17, %xmm8, %xmm10 # xmm10 = (xmm8 * xmm17) + xmm10 vfmadd231ps %xmm16, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm16) + xmm10 vxorps %xmm10, %xmm9, %xmm8 vxorps %xmm10, %xmm10, %xmm10 vcmpnltps %xmm10, %xmm7, %k1 vcmpnltps %xmm10, %xmm8, %k1 {%k1} vcmpneqps %xmm10, %xmm15, %k1 {%k1} vaddps %xmm7, %xmm8, %xmm10 vcmpleps %xmm3, %xmm10, %k0 {%k1} kortestb %k0, %k0 jne 0x1eb9aa2 incq %rbp cmpq %r13, %rbp jne 0x1eb9968 jmp 0x1eba027 vmulps %xmm6, %xmm2, %xmm6 vfmadd213ps %xmm6, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm6 vfmadd213ps %xmm5, %xmm0, %xmm4 # xmm4 = (xmm0 * xmm4) + xmm5 vxorps %xmm4, %xmm9, %xmm4 vmulps 0xc(%r14){1to4}, %xmm3, %xmm5 vmulps 0x20(%r14){1to4}, %xmm3, %xmm6 vcmpleps %xmm6, %xmm4, %k1 vcmpltps %xmm4, %xmm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1eb9a91 vmovaps %xmm7, 0xe0(%rsp) vmovaps %xmm8, 0xf0(%rsp) vmovaps %xmm4, 0x100(%rsp) vmovaps %xmm3, 0x110(%rsp) kmovb %k1, 0x121(%rsp) vmovaps %xmm0, 0x160(%rsp) vmovaps %xmm1, 0x170(%rsp) addq %rbx, %r15 vmovaps %xmm2, 0x180(%rsp) vrcp14ps %xmm3, %xmm0 vfnmadd213ps 0x32bdf(%rip){1to4}, %xmm0, %xmm3 # xmm3 = -(xmm0 * xmm3) + mem vfmadd132ps %xmm0, %xmm0, %xmm3 # xmm3 = (xmm3 * xmm0) + xmm0 vmulps 0x100(%rsp), %xmm3, %xmm4 vmovaps %xmm4, 0x150(%rsp) vmulps 0xe0(%rsp), %xmm3, %xmm0 vmovaps %xmm0, 0x130(%rsp) vmulps 0xf0(%rsp), %xmm3, %xmm0 vmovaps %xmm0, 0x140(%rsp) movq %rdx, 0x10(%rsp) movq (%rdx), %rax movq %rax, 0x70(%rsp) kmovd %k1, %ecx vbroadcastss 0x31e96(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm4, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax movl %ecx, 0xc(%rsp) cmovel %ecx, %eax movzbl %al, %eax tzcntq %rax, %rdx movl 0x90(%r15,%rdx,4), %ecx movq 0x70(%rsp), %rax movq 0x1e8(%rax), %rax movq %rcx, 0x30(%rsp) movq (%rax,%rcx,8), %rcx movl 0x24(%r14), %eax testl %eax, 0x34(%rcx) movq %rdx, 0x18(%rsp) je 0x1eb9c1e movq %rcx, %rax movq 0x10(%rsp), %rdx movq 0x10(%rdx), %rcx movq %rcx, 0x28(%rsp) cmpq $0x0, 0x10(%rcx) movl 0xc(%rsp), %ecx jne 0x1eb9c9c cmpq $0x0, 0x40(%rax) jne 0x1eb9c9c xorl %eax, %eax jmp 0x1eb9c44 movl $0x1, %eax shlxl %edx, %eax, %eax kmovd %eax, %k0 movzbl 0xc(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al movq 0x10(%rsp), %rdx testb %al, %al je 0x1eb9f9d testb %cl, %cl je 0x1eb9a91 kmovd %ecx, %k1 vbroadcastss 0x31dbf(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm4, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %cl, %al movzbl %al, %eax movl %ecx, 0xc(%rsp) movzbl %cl, %ecx cmovnel %eax, %ecx tzcntl %ecx, %edx jmp 0x1eb9bc4 vmovaps %xmm4, 0x190(%rsp) vmovups %ymm26, 0x1a0(%rsp) vmovups %ymm25, 0x1c0(%rsp) vmovups %ymm24, 0x1e0(%rsp) vmovups %ymm23, 0x200(%rsp) movq %rsi, 0x40(%rsp) movq %r11, 0x48(%rsp) movq %r10, 0x50(%rsp) movq %r9, 0x58(%rsp) vmovups %ymm21, 0x220(%rsp) vmovups %ymm20, 0x240(%rsp) vmovups %ymm19, 0x260(%rsp) movq %rdi, 0x60(%rsp) movq %r8, 0x68(%rsp) movq 0x18(%rsp), %rsi vmovss 0x130(%rsp,%rsi,4), %xmm0 vmovss 0x140(%rsp,%rsi,4), %xmm1 movq %rax, %rdi movq 0x8(%rdx), %rax movl 0xa0(%r15,%rsi,4), %ecx vmovss 0x160(%rsp,%rsi,4), %xmm2 vmovss 0x170(%rsp,%rsi,4), %xmm3 vmovss 0x180(%rsp,%rsi,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movq 0x30(%rsp), %rcx movl %ecx, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%r14), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x150(%rsp,%rsi,4), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF leaq 0x24(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%rdi), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %r14, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq %rdi, 0x38(%rsp) movq 0x40(%rdi), %rax testq %rax, %rax je 0x1eb9e22 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1eb9eba movq 0x28(%rsp), %rax movq 0x10(%rax), %rax testq %rax, %rax je 0x1eb9e5f movq 0x28(%rsp), %rcx testb $0x2, (%rcx) jne 0x1eb9e45 movq 0x38(%rsp), %rcx testb $0x40, 0x3e(%rcx) je 0x1eb9e52 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1eb9eba movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1eb9ec6 vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0x1, %eax movq 0x18(%rsp), %rcx shlxl %ecx, %eax, %eax kmovd %eax, %k0 movzbl 0xc(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 vmovaps 0x190(%rsp), %xmm4 vcmpleps 0x20(%r14){1to4}, %xmm4, %k1 kandb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al movq 0x10(%rsp), %rdx movq 0x68(%rsp), %r8 movq 0x60(%rsp), %rdi vmovups 0x260(%rsp), %ymm19 vmovups 0x240(%rsp), %ymm20 vmovups 0x220(%rsp), %ymm21 vbroadcastss 0x587d2(%rip), %ymm22 # 0x1f12704 movq 0x58(%rsp), %r9 movq 0x50(%rsp), %r10 movq 0x48(%rsp), %r11 movq 0x40(%rsp), %rsi vmovups 0x200(%rsp), %ymm23 vmovups 0x1e0(%rsp), %ymm24 vmovups 0x1c0(%rsp), %ymm25 vmovups 0x1a0(%rsp), %ymm26 vpmovsxbd 0xa3b00(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0xa7642(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x66f3c(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0xa7692(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0xa7690(%rip), %ymm31 # 0x1f61628 jmp 0x1eb9c44 movq 0x18(%rsp), %rax vmovss 0x130(%rsp,%rax,4), %xmm0 vmovss 0x140(%rsp,%rax,4), %xmm1 vmovss 0x150(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x20(%r14) vmovss 0x160(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x30(%r14) vmovss 0x170(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x34(%r14) vmovss 0x180(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x38(%r14) vmovss %xmm0, 0x3c(%r14) vmovss %xmm1, 0x40(%r14) movl 0xa0(%r15,%rax,4), %eax movl %eax, 0x44(%r14) movq 0x30(%rsp), %rax movl %eax, 0x48(%r14) movq 0x8(%rdx), %rax movl (%rax), %ecx movl %ecx, 0x4c(%r14) movl 0x4(%rax), %eax movl %eax, 0x50(%r14) jmp 0x1eb9a91 vbroadcastss 0x20(%r14), %ymm0 movq 0x78(%rsp), %r15 leaq 0x2a0(%rsp), %rbp vmovdqu 0x280(%rsp), %ymm8 jmp 0x1eb955e
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1eba059 xorl %eax, %eax jmp 0x1eba7c3 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %rbp movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1eba098 vmovss 0x10(%rbp), %xmm0 vmulss %xmm0, %xmm0, %xmm9 jmp 0x1eba0a3 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm9 vbroadcastss (%rbp), %ymm6 vbroadcastss 0x4(%rbp), %ymm5 vbroadcastss 0x8(%rbp), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 leaq 0x1b0(%rsp), %r8 movl $0x0, 0xc(%rsp) vpmovsxbd 0xa3987(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa74ca(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups %ymm6, 0x30(%rsp) vmovups %ymm5, 0x10(%rsp) vsubps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x140(%rsp) vaddps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x120(%rsp) vsubps %ymm1, %ymm5, %ymm3 vmovups %ymm3, 0x100(%rsp) vaddps %ymm1, %ymm5, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x80(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm10 vmovaps %xmm9, 0x70(%rsp) vmovups %ymm10, 0x160(%rsp) cmpq %r9, %r8 je 0x1eba7ae vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm9, %xmm0 ja 0x1eba170 movq (%r8), %r12 cmpl $0x1, 0x18(%rbx) jne 0x1eba295 testb $0x8, %r12b jne 0x1eba216 vmovaps 0x40(%r12), %ymm0 vmovaps 0x60(%r12), %ymm1 vmaxps %ymm0, %ymm6, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm6, %ymm2, %ymm2 vmaxps 0x80(%r12), %ymm5, %ymm3 vminps 0xa0(%r12), %ymm3, %ymm3 vmovups 0x80(%rsp), %ymm7 vmaxps 0xc0(%r12), %ymm7, %ymm4 vsubps %ymm5, %ymm3, %ymm3 vminps 0xe0(%r12), %ymm4, %ymm4 vsubps %ymm7, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm10, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %edi testb $0x8, %r12b jne 0x1eba3a5 testq %rdi, %rdi je 0x1eba3af andq $-0x10, %r12 vmovdqu (%r12), %ymm0 vmovdqu 0x20(%r12), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1eba3b9 testl %eax, %eax je 0x1eba18d jmp 0x1eba67e testb $0x8, %r12b jne 0x1eba216 vmovaps 0xc0(%r12), %ymm0 vmovaps 0x40(%r12), %ymm1 vmovaps 0x60(%r12), %ymm2 vmovaps 0x80(%r12), %ymm3 vmovaps 0xa0(%r12), %ymm4 vmovaps 0xe0(%r12), %ymm5 vmovups 0x30(%rsp), %ymm6 vmaxps %ymm1, %ymm6, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps 0x30(%rsp), %ymm6, %ymm6 vmovups 0x10(%rsp), %ymm7 vmaxps %ymm3, %ymm7, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps 0x10(%rsp), %ymm7, %ymm7 vmovaps %ymm10, %ymm13 vmovups 0x80(%rsp), %ymm10 vmaxps %ymm0, %ymm10, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm10, %ymm8, %ymm8 vmovaps %ymm13, %ymm10 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vmovups 0x30(%rsp), %ymm6 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm5, %k3 vmovups 0x10(%rsp), %ymm5 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1eba216 movl $0x6, %eax jmp 0x1eba288 movl $0x4, %eax jmp 0x1eba288 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1eba41d vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r12 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1eba288 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1eba4b2 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 jmp 0x1eba288 movq %rdi, %r10 vmovaps %ymm10, %ymm13 vmovdqa %ymm7, %ymm10 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1eba584 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm10, %ymm7 vpermd %ymm10, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps %ymm13, %ymm10 movq %r10, %rdi jmp 0x1eba288 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x6692c(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0xa7083(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0xa707c(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0xa706f(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x5812d(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1eba5ca popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1eba634 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r12 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovdqa %ymm10, %ymm7 jmp 0x1eba577 cmpl $0x6, %eax jne 0x1eba170 movl %r12d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x60(%rsp) je 0x1eba170 movq %rdi, 0x50(%rsp) vmovups %ymm7, 0x180(%rsp) movq %r8, 0x58(%rsp) andq $-0x10, %r12 addq $0xa0, %r12 xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x68(%rsp) movq $-0x10, %r14 xorl %r13d, %r13d movl (%r12,%r14), %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmpq %rcx, %rax je 0x1eba70c movq (%rbx), %rcx movq 0x1e8(%rcx), %rcx movq (%rcx,%rax,8), %rdi movl %eax, 0x44(%rbx) movl 0x10(%r12,%r14), %eax movl %eax, 0x40(%rbx) movq %rbp, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b addq $0x4, %r14 jne 0x1eba6ce movq 0x68(%rsp), %rax orb %r13b, %al incq %r15 addq $0xb0, %r12 cmpq 0x60(%rsp), %r15 jne 0x1eba6bf testb $0x1, %al vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps 0x70(%rsp), %xmm9 movq 0x58(%rsp), %r8 vpmovsxbd 0xa3329(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa6e6c(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x180(%rsp), %ymm7 vmovups 0x160(%rsp), %ymm10 movq 0x50(%rsp), %rdi je 0x1eba170 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1eba798 vmovss 0x10(%rbp), %xmm3 vmulss %xmm3, %xmm3, %xmm9 jmp 0x1eba7a3 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm9 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1eba106 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ebb4df xorl %eax, %eax jmp 0x1ebbc20 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r12 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ebb520 vmovss 0x10(%r12), %xmm0 vmulss %xmm0, %xmm0, %xmm9 jmp 0x1ebb52b vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm9 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r12), %ymm6 vbroadcastss 0x4(%r12), %ymm5 vbroadcastss 0x8(%r12), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0xa24f5(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa6038(%rip), %ymm12 # 0x1f615bc vmovups %ymm6, 0x30(%rsp) vmovups %ymm5, 0x10(%rsp) vsubps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x140(%rsp) vaddps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x120(%rsp) vsubps %ymm1, %ymm5, %ymm3 vmovups %ymm3, 0x100(%rsp) vaddps %ymm1, %ymm5, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x80(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm10 vmovaps %xmm9, 0x70(%rsp) vmovups %ymm10, 0x160(%rsp) cmpq %r9, %r8 je 0x1ebbc0b vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm9, %xmm0 ja 0x1ebb5fa movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1ebb709 testb $0x8, %bpl jne 0x1ebb694 vmovaps 0x40(%rbp), %ymm0 vmovaps 0x60(%rbp), %ymm1 vmaxps %ymm0, %ymm6, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm6, %ymm2, %ymm2 vmaxps 0x80(%rbp), %ymm5, %ymm3 vminps 0xa0(%rbp), %ymm3, %ymm3 vmovups 0x80(%rsp), %ymm7 vmaxps 0xc0(%rbp), %ymm7, %ymm4 vsubps %ymm5, %ymm3, %ymm3 vminps 0xe0(%rbp), %ymm4, %ymm4 vsubps %ymm7, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm10, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %edi testb $0x8, %bpl jne 0x1ebb809 testq %rdi, %rdi je 0x1ebb813 andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ebb81d testl %eax, %eax je 0x1ebb617 jmp 0x1ebbae2 testb $0x8, %bpl jne 0x1ebb694 vmovaps 0xc0(%rbp), %ymm0 vmovaps 0x40(%rbp), %ymm1 vmovaps 0x60(%rbp), %ymm2 vmovaps 0x80(%rbp), %ymm3 vmovaps 0xa0(%rbp), %ymm4 vmovaps 0xe0(%rbp), %ymm5 vmovups 0x30(%rsp), %ymm6 vmaxps %ymm1, %ymm6, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps 0x30(%rsp), %ymm6, %ymm6 vmovups 0x10(%rsp), %ymm7 vmaxps %ymm3, %ymm7, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps 0x10(%rsp), %ymm7, %ymm7 vmovaps %ymm10, %ymm13 vmovups 0x80(%rsp), %ymm10 vmaxps %ymm0, %ymm10, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm10, %ymm8, %ymm8 vmovaps %ymm13, %ymm10 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vmovups 0x30(%rsp), %ymm6 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm5, %k3 vmovups 0x10(%rsp), %ymm5 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ebb694 movl $0x6, %eax jmp 0x1ebb6fc movl $0x4, %eax jmp 0x1ebb6fc vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ebb881 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ebb6fc vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ebb916 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 jmp 0x1ebb6fc movq %rdi, %r10 vmovaps %ymm10, %ymm13 vmovdqa %ymm7, %ymm10 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ebb9e8 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm10, %ymm7 vpermd %ymm10, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps %ymm13, %ymm10 movq %r10, %rdi jmp 0x1ebb6fc valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x654c8(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0xa5c1f(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0xa5c18(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0xa5c0b(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x56cc9(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ebba2e popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ebba98 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovdqa %ymm10, %ymm7 jmp 0x1ebb9db cmpl $0x6, %eax jne 0x1ebb5fa movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x60(%rsp) je 0x1ebb5fa movq %rdi, 0x50(%rsp) vmovups %ymm7, 0x180(%rsp) movq %r8, 0x58(%rsp) andq $-0x10, %rbp addq $0x40, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x68(%rsp) xorl %r13d, %r13d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ebbb6a movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r12, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b incq %r14 cmpq $0x4, %r14 jne 0x1ebbb2a movq 0x68(%rsp), %rax orb %r13b, %al incq %r15 addq $0x50, %rbp cmpq 0x60(%rsp), %r15 jne 0x1ebbb1f testb $0x1, %al movq 0x58(%rsp), %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps 0x70(%rsp), %xmm9 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0xa1ec6(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa5a09(%rip), %ymm12 # 0x1f615bc vmovups 0x180(%rsp), %ymm7 vmovups 0x160(%rsp), %ymm10 movq 0x50(%rsp), %rdi je 0x1ebb5fa vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ebbbf5 vmovss 0x10(%r12), %xmm3 vmulss %xmm3, %xmm3, %xmm9 jmp 0x1ebbc00 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm9 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ebb590 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, true, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMvIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ebcc15 xorl %eax, %eax jmp 0x1ebd37f pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %rbp movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ebcc54 vmovss 0x10(%rbp), %xmm0 vmulss %xmm0, %xmm0, %xmm9 jmp 0x1ebcc5f vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm9 vbroadcastss (%rbp), %ymm6 vbroadcastss 0x4(%rbp), %ymm5 vbroadcastss 0x8(%rbp), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 leaq 0x1b0(%rsp), %r8 movl $0x0, 0xc(%rsp) vpmovsxbd 0xa0dcb(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa490e(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups %ymm6, 0x30(%rsp) vmovups %ymm5, 0x10(%rsp) vsubps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x140(%rsp) vaddps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x120(%rsp) vsubps %ymm1, %ymm5, %ymm3 vmovups %ymm3, 0x100(%rsp) vaddps %ymm1, %ymm5, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x80(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm10 vmovaps %xmm9, 0x70(%rsp) vmovups %ymm10, 0x160(%rsp) cmpq %r9, %r8 je 0x1ebd36a vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm9, %xmm0 ja 0x1ebcd2c movq (%r8), %r12 cmpl $0x1, 0x18(%rbx) jne 0x1ebce51 testb $0x8, %r12b jne 0x1ebcdd2 vmovaps 0x40(%r12), %ymm0 vmovaps 0x60(%r12), %ymm1 vmaxps %ymm0, %ymm6, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm6, %ymm2, %ymm2 vmaxps 0x80(%r12), %ymm5, %ymm3 vminps 0xa0(%r12), %ymm3, %ymm3 vmovups 0x80(%rsp), %ymm7 vmaxps 0xc0(%r12), %ymm7, %ymm4 vsubps %ymm5, %ymm3, %ymm3 vminps 0xe0(%r12), %ymm4, %ymm4 vsubps %ymm7, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm10, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %edi testb $0x8, %r12b jne 0x1ebcf61 testq %rdi, %rdi je 0x1ebcf6b andq $-0x10, %r12 vmovdqu (%r12), %ymm0 vmovdqu 0x20(%r12), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ebcf75 testl %eax, %eax je 0x1ebcd49 jmp 0x1ebd23a testb $0x8, %r12b jne 0x1ebcdd2 vmovaps 0xc0(%r12), %ymm0 vmovaps 0x40(%r12), %ymm1 vmovaps 0x60(%r12), %ymm2 vmovaps 0x80(%r12), %ymm3 vmovaps 0xa0(%r12), %ymm4 vmovaps 0xe0(%r12), %ymm5 vmovups 0x30(%rsp), %ymm6 vmaxps %ymm1, %ymm6, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps 0x30(%rsp), %ymm6, %ymm6 vmovups 0x10(%rsp), %ymm7 vmaxps %ymm3, %ymm7, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps 0x10(%rsp), %ymm7, %ymm7 vmovaps %ymm10, %ymm13 vmovups 0x80(%rsp), %ymm10 vmaxps %ymm0, %ymm10, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm10, %ymm8, %ymm8 vmovaps %ymm13, %ymm10 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vmovups 0x30(%rsp), %ymm6 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm5, %k3 vmovups 0x10(%rsp), %ymm5 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ebcdd2 movl $0x6, %eax jmp 0x1ebce44 movl $0x4, %eax jmp 0x1ebce44 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ebcfd9 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r12 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ebce44 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ebd06e vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 jmp 0x1ebce44 movq %rdi, %r10 vmovaps %ymm10, %ymm13 vmovdqa %ymm7, %ymm10 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ebd140 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm10, %ymm7 vpermd %ymm10, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps %ymm13, %ymm10 movq %r10, %rdi jmp 0x1ebce44 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x63d70(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0xa44c7(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0xa44c0(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0xa44b3(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x55571(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ebd186 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ebd1f0 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r12 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovdqa %ymm10, %ymm7 jmp 0x1ebd133 cmpl $0x6, %eax jne 0x1ebcd2c movl %r12d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x60(%rsp) je 0x1ebcd2c movq %rdi, 0x50(%rsp) vmovups %ymm7, 0x180(%rsp) movq %r8, 0x58(%rsp) andq $-0x10, %r12 addq $0xa0, %r12 xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x68(%rsp) movq $-0x10, %r14 xorl %r13d, %r13d movl (%r12,%r14), %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmpq %rcx, %rax je 0x1ebd2c8 movq (%rbx), %rcx movq 0x1e8(%rcx), %rcx movq (%rcx,%rax,8), %rdi movl %eax, 0x44(%rbx) movl 0x10(%r12,%r14), %eax movl %eax, 0x40(%rbx) movq %rbp, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b addq $0x4, %r14 jne 0x1ebd28a movq 0x68(%rsp), %rax orb %r13b, %al incq %r15 addq $0xb0, %r12 cmpq 0x60(%rsp), %r15 jne 0x1ebd27b testb $0x1, %al vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps 0x70(%rsp), %xmm9 movq 0x58(%rsp), %r8 vpmovsxbd 0xa076d(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa42b0(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x180(%rsp), %ymm7 vmovups 0x160(%rsp), %ymm10 movq 0x50(%rsp), %rdi je 0x1ebcd2c vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ebd354 vmovss 0x10(%rbp), %xmm3 vmulss %xmm3, %xmm3, %xmm9 jmp 0x1ebd35f vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm9 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ebccc2 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, true, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ebd3cf pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2658, %rsp # imm = 0x2658 movq 0x70(%rax), %rax movq %rax, 0x310(%rsp) movl $0x0, 0x318(%rsp) cmpq $0x8, %rax jne 0x1ebd3d3 addq $0x2658, %rsp # imm = 0x2658 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rdx, %r8 vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x63ad1(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vcmpltps 0x33bea(%rip){1to4}, %xmm4, %k1 # 0x1ef0fe8 vbroadcastss 0x2f30c(%rip), %xmm26 # 0x1eec714 vdivps %xmm0, %xmm26, %xmm0 vbroadcastss 0x63b48(%rip), %xmm0 {%k1} # 0x1f20f60 vmulps 0x62aee(%rip){1to4}, %xmm0, %xmm4 # 0x1f1ff10 vmulps 0x62ae8(%rip){1to4}, %xmm0, %xmm0 # 0x1f1ff14 vbroadcastss (%rsi), %ymm27 vbroadcastss 0x4(%rsi), %ymm28 leaq 0x320(%rsp), %r10 vbroadcastss 0x8(%rsi), %ymm29 xorl %r11d, %r11d vucomiss %xmm1, %xmm4 setb %r11b vbroadcastss %xmm4, %ymm30 vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3] vbroadcastsd %xmm5, %ymm31 vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0] vbroadcastss 0x63a6b(%rip), %ymm7 # 0x1f20edc vpermps %ymm4, %ymm7, %ymm8 vbroadcastss %xmm0, %ymm9 vbroadcastss 0x55280(%rip), %ymm4 # 0x1f12704 vpermps %ymm0, %ymm4, %ymm10 vpermps %ymm0, %ymm7, %ymm11 shll $0x5, %r11d xorl %edi, %edi vucomiss %xmm1, %xmm5 setb %dil shll $0x5, %edi orq $0x40, %rdi xorl %r9d, %r9d vucomiss %xmm1, %xmm6 setb %r9b shll $0x5, %r9d orq $0x80, %r9 movq %r11, %rbx xorq $0x20, %rbx movq %rdi, %r14 xorq $0x20, %r14 movq %r9, %r15 xorq $0x20, %r15 vbroadcastss %xmm2, %ymm12 vbroadcastss %xmm3, %ymm0 vpmovsxbd 0xa058f(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0xa40d2(%rip), %ymm14 # 0x1f615bc leaq 0x310(%rsp), %r12 movq %rdx, 0x20(%rsp) movq %rsi, 0x18(%rsp) vmovups %ymm27, 0x180(%rsp) vmovups %ymm28, 0x160(%rsp) vmovups %ymm29, 0x140(%rsp) movq %r11, 0x10(%rsp) vmovups %ymm30, 0x120(%rsp) vmovups %ymm31, 0x100(%rsp) vmovups %ymm8, 0x2d0(%rsp) vmovups %ymm9, 0x2b0(%rsp) vmovups %ymm10, 0x290(%rsp) vmovups %ymm11, 0x270(%rsp) movq %rdi, 0x60(%rsp) movq %r9, 0x58(%rsp) movq %rbx, 0x50(%rsp) movq %r14, 0x48(%rsp) movq %r15, 0x40(%rsp) vmovups %ymm12, 0x250(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r12, %r10 je 0x1ebd3be vmovss -0x8(%r10), %xmm2 addq $-0x10, %r10 vucomiss %xmm1, %xmm2 ja 0x1ebd574 movq (%r10), %rbp testb $0x8, %bpl jne 0x1ebd628 vmovaps 0x40(%rbp,%r11), %ymm1 vsubps %ymm27, %ymm1, %ymm1 vmulps %ymm1, %ymm30, %ymm1 vmovaps 0x40(%rbp,%rdi), %ymm2 vsubps %ymm28, %ymm2, %ymm2 vmulps %ymm2, %ymm31, %ymm2 vmaxps %ymm2, %ymm1, %ymm1 vmovaps 0x40(%rbp,%r9), %ymm2 vsubps %ymm29, %ymm2, %ymm2 vmulps %ymm2, %ymm8, %ymm2 vmaxps %ymm12, %ymm2, %ymm2 vmaxps %ymm2, %ymm1, %ymm16 vmovaps 0x40(%rbp,%rbx), %ymm1 vsubps %ymm27, %ymm1, %ymm1 vmovaps 0x40(%rbp,%r14), %ymm2 vmulps %ymm1, %ymm9, %ymm1 vsubps %ymm28, %ymm2, %ymm2 vmulps %ymm2, %ymm10, %ymm2 vminps %ymm2, %ymm1, %ymm1 vmovaps 0x40(%rbp,%r15), %ymm2 vsubps %ymm29, %ymm2, %ymm2 vmulps %ymm2, %ymm11, %ymm2 vminps %ymm0, %ymm2, %ymm2 vminps %ymm2, %ymm1, %ymm1 vcmpleps %ymm1, %ymm16, %k0 kmovb %k0, %r13d testb $0x8, %bpl jne 0x1ebd692 testq %r13, %r13 je 0x1ebd699 andq $-0x10, %rbp vmovdqu (%rbp), %ymm1 vmovdqu 0x20(%rbp), %ymm2 vmovdqa %ymm13, %ymm3 vpternlogd $0xf8, %ymm14, %ymm16, %ymm3 kmovd %r13d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %r13, %rcx jne 0x1ebd6a0 testl %eax, %eax je 0x1ebd590 jmp 0x1ebda2b movl $0x6, %eax jmp 0x1ebd685 movl $0x4, %eax jmp 0x1ebd685 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ebd702 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbp vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r10) vpermd %ymm16, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r10) addq $0x10, %r10 jmp 0x1ebd685 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ebd78d vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rbp vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r10) vpermd %ymm16, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r10) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r10) vpermd %ymm16, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r10) addq $0x20, %r10 jmp 0x1ebd685 vmovdqa64 %ymm16, %ymm17 vmovdqa %ymm14, %ymm15 vmovdqa %ymm13, %ymm14 vmovaps %ymm12, %ymm13 vmovaps %ymm11, %ymm12 vmovaps %ymm10, %ymm11 vmovaps %ymm9, %ymm10 vmovaps %ymm8, %ymm9 vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ebd8d5 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbp vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r10) vmovdqa64 %ymm17, %ymm16 vpermd %ymm17, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r10) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r10) vpermd %ymm17, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r10) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r10) vpermd %ymm17, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r10) addq $0x30, %r10 movq 0x20(%rsp), %r8 movq 0x18(%rsp), %rsi vbroadcastss 0x2ee94(%rip), %xmm26 # 0x1eec714 vmovups 0x180(%rsp), %ymm27 vmovups 0x160(%rsp), %ymm28 vmovups 0x140(%rsp), %ymm29 movq 0x10(%rsp), %r11 vmovups 0x120(%rsp), %ymm30 vmovups 0x100(%rsp), %ymm31 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 vmovaps %ymm13, %ymm12 vmovdqa %ymm14, %ymm13 vmovdqa %ymm15, %ymm14 jmp 0x1ebd685 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x635db(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0xa3d31(%rip), %ymm16 # 0x1f61620 vpermt2d %ymm8, %ymm16, %ymm3 vpmovsxbd 0xa3d2a(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0xa3d1d(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x54ddb(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ebd91c popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r10) vpermd %ymm17, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r10) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r10 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ebd986 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbp movq 0x20(%rsp), %r8 movq 0x18(%rsp), %rsi vbroadcastss 0x2ed44(%rip), %xmm26 # 0x1eec714 vmovups 0x180(%rsp), %ymm27 vmovups 0x160(%rsp), %ymm28 vmovups 0x140(%rsp), %ymm29 movq 0x10(%rsp), %r11 vmovups 0x120(%rsp), %ymm30 vmovups 0x100(%rsp), %ymm31 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 vmovaps %ymm13, %ymm12 vmovdqa %ymm14, %ymm13 vmovdqa %ymm15, %ymm14 vmovdqa64 %ymm17, %ymm16 jmp 0x1ebd685 cmpl $0x6, %eax jne 0x1ebd56f movq %r13, 0x68(%rsp) vmovdqu64 %ymm16, 0x2f0(%rsp) movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x70(%rsp) je 0x1ebe327 andq $-0x10, %rbp movq (%r8), %r9 xorl %eax, %eax movq %r9, 0x28(%rsp) movq %rax, 0x78(%rsp) leaq (%rax,%rax,4), %rax shlq $0x4, %rax movl 0x30(%rbp,%rax), %edx movq 0x228(%r9), %rcx movq (%rcx,%rdx,8), %rdx movl (%rbp,%rax), %r9d movl 0x4(%rbp,%rax), %edi vmovups (%rdx,%r9,4), %xmm0 movl 0x10(%rbp,%rax), %r9d vmovups (%rdx,%r9,4), %xmm1 movl 0x20(%rbp,%rax), %r9d vmovups (%rdx,%r9,4), %xmm2 movq 0x28(%rsp), %r9 movl 0x34(%rbp,%rax), %edx movq (%rcx,%rdx,8), %rdx vmovups (%rdx,%rdi,4), %xmm3 movl 0x14(%rbp,%rax), %edi vmovups (%rdx,%rdi,4), %xmm4 movl 0x24(%rbp,%rax), %edi vmovups (%rdx,%rdi,4), %xmm5 movl 0x38(%rbp,%rax), %edx movq (%rcx,%rdx,8), %rdx movl 0x8(%rbp,%rax), %edi vmovups (%rdx,%rdi,4), %xmm6 movl 0x18(%rbp,%rax), %edi vmovups (%rdx,%rdi,4), %xmm7 movl 0x28(%rbp,%rax), %edi vmovups (%rdx,%rdi,4), %xmm8 movl 0x3c(%rbp,%rax), %edx movq (%rcx,%rdx,8), %rcx movl 0xc(%rbp,%rax), %edx vmovups (%rcx,%rdx,4), %xmm9 movl 0x1c(%rbp,%rax), %edx vmovups (%rcx,%rdx,4), %xmm10 movl 0x2c(%rbp,%rax), %edx vmovups (%rcx,%rdx,4), %xmm11 vunpcklps %xmm6, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] vunpckhps %xmm6, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3] vunpcklps %xmm9, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm9[0],xmm3[1],xmm9[1] vunpckhps %xmm9, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm9[2],xmm3[3],xmm9[3] vunpcklps %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] vunpcklps %xmm6, %xmm12, %xmm3 # xmm3 = xmm12[0],xmm6[0],xmm12[1],xmm6[1] vunpckhps %xmm6, %xmm12, %xmm6 # xmm6 = xmm12[2],xmm6[2],xmm12[3],xmm6[3] vunpcklps %xmm7, %xmm1, %xmm9 # xmm9 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] vunpckhps %xmm7, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3] vunpcklps %xmm10, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm10[0],xmm4[1],xmm10[1] vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3] vunpcklps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] vunpcklps %xmm7, %xmm9, %xmm10 # xmm10 = xmm9[0],xmm7[0],xmm9[1],xmm7[1] vunpckhps %xmm7, %xmm9, %xmm12 # xmm12 = xmm9[2],xmm7[2],xmm9[3],xmm7[3] vunpcklps %xmm8, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm8[0],xmm2[1],xmm8[1] vunpckhps %xmm8, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] vunpcklps %xmm11, %xmm5, %xmm7 # xmm7 = xmm5[0],xmm11[0],xmm5[1],xmm11[1] vunpckhps %xmm11, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm11[2],xmm5[3],xmm11[3] vunpcklps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] vunpcklps %xmm7, %xmm4, %xmm11 # xmm11 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] vmovaps 0x30(%rbp,%rax), %xmm5 vmovaps %xmm5, 0x240(%rsp) vmovaps 0x40(%rbp,%rax), %xmm5 vmovaps %xmm5, 0xf0(%rsp) vunpckhps %xmm7, %xmm4, %xmm13 # xmm13 = xmm4[2],xmm7[2],xmm4[3],xmm7[3] movb $0x0, 0xf(%rsp) vbroadcastss (%rsi), %xmm14 vbroadcastss 0x4(%rsi), %xmm15 vbroadcastss 0x8(%rsi), %xmm16 vbroadcastss 0x10(%rsi), %xmm5 vbroadcastss 0x14(%rsi), %xmm7 leaq 0xf(%rsp), %rax movq %rax, 0x1d0(%rsp) vbroadcastss 0x18(%rsi), %xmm9 vsubps %xmm14, %xmm3, %xmm4 vsubps %xmm15, %xmm6, %xmm6 vsubps %xmm16, %xmm0, %xmm8 vsubps %xmm14, %xmm10, %xmm17 vsubps %xmm15, %xmm12, %xmm19 vsubps %xmm16, %xmm1, %xmm20 vsubps %xmm14, %xmm11, %xmm14 vsubps %xmm15, %xmm13, %xmm21 vsubps %xmm16, %xmm2, %xmm22 vsubps %xmm4, %xmm14, %xmm11 vsubps %xmm6, %xmm21, %xmm13 vsubps %xmm8, %xmm22, %xmm12 vsubps %xmm17, %xmm4, %xmm15 vsubps %xmm19, %xmm6, %xmm18 vsubps %xmm20, %xmm8, %xmm16 vsubps %xmm14, %xmm17, %xmm0 vsubps %xmm21, %xmm19, %xmm1 vsubps %xmm22, %xmm20, %xmm2 vaddps %xmm4, %xmm14, %xmm3 vaddps %xmm6, %xmm21, %xmm10 vaddps %xmm8, %xmm22, %xmm23 vmulps %xmm12, %xmm10, %xmm24 vfmsub231ps %xmm23, %xmm13, %xmm24 # xmm24 = (xmm13 * xmm23) - xmm24 vmulps %xmm11, %xmm23, %xmm23 vfmsub231ps %xmm3, %xmm12, %xmm23 # xmm23 = (xmm12 * xmm3) - xmm23 vmulps %xmm3, %xmm13, %xmm3 vfmsub231ps %xmm10, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm10) - xmm3 vmulps %xmm3, %xmm9, %xmm3 vfmadd231ps %xmm23, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm23) + xmm3 vfmadd231ps %xmm24, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm24) + xmm3 vaddps %xmm17, %xmm4, %xmm10 vaddps %xmm19, %xmm6, %xmm23 vaddps %xmm20, %xmm8, %xmm24 vmulps %xmm16, %xmm23, %xmm25 vfmsub231ps %xmm24, %xmm18, %xmm25 # xmm25 = (xmm18 * xmm24) - xmm25 vmulps %xmm15, %xmm24, %xmm24 vfmsub231ps %xmm10, %xmm16, %xmm24 # xmm24 = (xmm16 * xmm10) - xmm24 vmulps %xmm18, %xmm10, %xmm10 vfmsub231ps %xmm23, %xmm15, %xmm10 # xmm10 = (xmm15 * xmm23) - xmm10 vmulps %xmm10, %xmm9, %xmm10 vfmadd231ps %xmm24, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm24) + xmm10 vfmadd231ps %xmm25, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm25) + xmm10 vbroadcastss 0x2dd66(%rip), %xmm25 # 0x1eeba20 vaddps %xmm14, %xmm17, %xmm14 vaddps %xmm21, %xmm19, %xmm17 vaddps %xmm22, %xmm20, %xmm19 vmulps %xmm2, %xmm17, %xmm20 vfmsub231ps %xmm19, %xmm1, %xmm20 # xmm20 = (xmm1 * xmm19) - xmm20 vmulps %xmm0, %xmm19, %xmm19 vfmsub231ps %xmm14, %xmm2, %xmm19 # xmm19 = (xmm2 * xmm14) - xmm19 vmulps %xmm1, %xmm14, %xmm14 vfmsub231ps %xmm17, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm17) - xmm14 vmulps %xmm14, %xmm9, %xmm21 vfmadd231ps %xmm19, %xmm7, %xmm21 # xmm21 = (xmm7 * xmm19) + xmm21 vfmadd231ps %xmm20, %xmm5, %xmm21 # xmm21 = (xmm5 * xmm20) + xmm21 vaddps %xmm3, %xmm10, %xmm14 vaddps %xmm14, %xmm21, %xmm14 vandps 0x631b0(%rip){1to4}, %xmm14, %xmm17 # 0x1f20ec4 vmulps 0x631ae(%rip){1to4}, %xmm17, %xmm19 # 0x1f20ecc vminps %xmm10, %xmm3, %xmm20 vminps %xmm21, %xmm20, %xmm20 vbroadcastss 0x6318c(%rip), %xmm22 # 0x1f20ec0 vxorps %xmm22, %xmm19, %xmm22 vcmpnltps %xmm22, %xmm20, %k0 vmaxps %xmm10, %xmm3, %xmm20 vmaxps %xmm21, %xmm20, %xmm20 vcmpleps %xmm19, %xmm20, %k1 korw %k1, %k0, %k0 kshiftlb $0x4, %k0, %k0 kshiftrb $0x4, %k0, %k0 kortestb %k0, %k0 movl $0x1, %edi je 0x1ebe314 vmulps %xmm18, %xmm12, %xmm19 vmulps %xmm16, %xmm11, %xmm20 vmulps %xmm15, %xmm13, %xmm21 vmulps %xmm1, %xmm16, %xmm22 vmulps %xmm2, %xmm15, %xmm23 vmulps %xmm0, %xmm18, %xmm24 vfmsub213ps %xmm19, %xmm16, %xmm13 # xmm13 = (xmm16 * xmm13) - xmm19 vfmsub213ps %xmm20, %xmm15, %xmm12 # xmm12 = (xmm15 * xmm12) - xmm20 vfmsub213ps %xmm21, %xmm18, %xmm11 # xmm11 = (xmm18 * xmm11) - xmm21 vfmsub213ps %xmm22, %xmm18, %xmm2 # xmm2 = (xmm18 * xmm2) - xmm22 vfmsub213ps %xmm23, %xmm16, %xmm0 # xmm0 = (xmm16 * xmm0) - xmm23 vfmsub213ps %xmm24, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm1) - xmm24 vbroadcastss 0x630ff(%rip), %xmm18 # 0x1f20ec4 vandps %xmm18, %xmm19, %xmm15 vandps %xmm18, %xmm22, %xmm16 vcmpltps %xmm16, %xmm15, %k1 vandps %xmm18, %xmm20, %xmm15 vandps %xmm18, %xmm23, %xmm16 vcmpltps %xmm16, %xmm15, %k2 vandps %xmm18, %xmm21, %xmm15 vandps %xmm18, %xmm24, %xmm16 vcmpltps %xmm16, %xmm15, %k3 vmovaps %xmm13, %xmm2 {%k1} vmovaps %xmm12, %xmm0 {%k2} vmovaps %xmm11, %xmm1 {%k3} vmulps %xmm1, %xmm9, %xmm9 vfmadd213ps %xmm9, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm7) + xmm9 vfmadd213ps %xmm7, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm5) + xmm7 vaddps %xmm5, %xmm5, %xmm5 vmulps %xmm1, %xmm8, %xmm7 vfmadd213ps %xmm7, %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + xmm7 vfmadd213ps %xmm6, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm4) + xmm6 vaddps %xmm4, %xmm4, %xmm4 vrcp14ps %xmm5, %xmm6 vmovaps %xmm6, %xmm7 vfnmadd213ps %xmm26, %xmm5, %xmm7 # xmm7 = -(xmm5 * xmm7) + xmm26 vfmadd132ps %xmm6, %xmm6, %xmm7 # xmm7 = (xmm7 * xmm6) + xmm6 vmulps %xmm7, %xmm4, %xmm6 vcmpgeps 0xc(%rsi){1to4}, %xmm6, %k1 vbroadcastss 0x63062(%rip), %xmm4 # 0x1f20ec0 vxorps %xmm4, %xmm5, %xmm4 vcmpleps 0x20(%rsi){1to4}, %xmm6, %k1 {%k1} vcmpneqps %xmm4, %xmm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ebe314 vmovaps %xmm3, 0x1a0(%rsp) vmovaps %xmm10, 0x1b0(%rsp) vmovaps %xmm14, 0x1c0(%rsp) movq %rax, 0x1d0(%rsp) kmovb %k1, 0x1d8(%rsp) vmovaps %xmm6, 0x200(%rsp) vmovaps %xmm2, 0x210(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps %xmm1, 0x230(%rsp) vcmpnltps 0x3310e(%rip){1to4}, %xmm17, %k2 # 0x1ef0fe8 vrcp14ps %xmm14, %xmm0 vfnmadd213ps %xmm26, %xmm0, %xmm14 # xmm14 = -(xmm0 * xmm14) + xmm26 vfmadd132ps %xmm0, %xmm0, %xmm14 {%k2} {z} # xmm14 {%k2} {z} = (xmm14 * xmm0) + xmm0 vmulps %xmm3, %xmm14, %xmm0 vminps %xmm26, %xmm0, %xmm0 vmovaps %xmm0, 0x1e0(%rsp) vmulps %xmm10, %xmm14, %xmm0 vminps %xmm26, %xmm0, %xmm0 vmovaps %xmm0, 0x1f0(%rsp) kmovd %k1, %r13d vblendmps %xmm6, %xmm25, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax cmovel %r13d, %eax movzbl %al, %eax tzcntq %rax, %r12 movl 0x240(%rsp,%r12,4), %ebx movq 0x1e8(%r9), %rax movq (%rax,%rbx,8), %r14 movl 0x24(%rsi), %eax testl %eax, 0x34(%r14) je 0x1ebdf80 movq 0x10(%r8), %r15 cmpq $0x0, 0x10(%r15) jne 0x1ebdfeb cmpq $0x0, 0x40(%r14) jne 0x1ebdfeb xorl %eax, %eax jmp 0x1ebdf9b shlxl %r12d, %edi, %eax kmovd %eax, %k0 movzbl %r13b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %r13d movb $0x1, %al testb %al, %al je 0x1ebe29d testb %r13b, %r13b je 0x1ebe314 kmovd %r13d, %k1 vblendmps %xmm6, %xmm25, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %r13b, %al movzbl %al, %eax movzbl %r13b, %ecx cmovnel %eax, %ecx tzcntl %ecx, %r12d jmp 0x1ebdf4e vmovss 0x1e0(%rsp,%r12,4), %xmm0 vmovss 0x1f0(%rsp,%r12,4), %xmm1 movq 0x8(%r8), %rax movl 0xf0(%rsp,%r12,4), %ecx vmovss 0x210(%rsp,%r12,4), %xmm2 vmovss 0x220(%rsp,%r12,4), %xmm3 vmovss 0x230(%rsp,%r12,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movl %ebx, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%rsi), %xmm0 vmovss %xmm0, 0x30(%rsp) vmovss 0x200(%rsp,%r12,4), %xmm0 vmovss %xmm0, 0x20(%rsi) movl $0xffffffff, 0x34(%rsp) # imm = 0xFFFFFFFF leaq 0x34(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%r14), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %rsi, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq 0x40(%r14), %rax testq %rax, %rax vmovaps %xmm6, 0xe0(%rsp) je 0x1ebe179 leaq 0x80(%rsp), %rdi movq %r10, 0x38(%rsp) vzeroupper callq *%rax vmovaps 0xe0(%rsp), %xmm6 movq 0x28(%rsp), %r9 movl $0x1, %edi vbroadcastss 0x2d8fe(%rip), %xmm25 # 0x1eeba20 vmovups 0x100(%rsp), %ymm31 vmovups 0x120(%rsp), %ymm30 movq 0x10(%rsp), %r11 vmovups 0x140(%rsp), %ymm29 movq 0x38(%rsp), %r10 vmovups 0x160(%rsp), %ymm28 vmovups 0x180(%rsp), %ymm27 vbroadcastss 0x2e5b6(%rip), %xmm26 # 0x1eec714 movq 0x18(%rsp), %rsi movq 0x20(%rsp), %r8 movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ebe26c movq 0x10(%r15), %rax testq %rax, %rax je 0x1ebe211 testb $0x2, (%r15) jne 0x1ebe193 testb $0x40, 0x3e(%r14) je 0x1ebe204 leaq 0x80(%rsp), %rdi movq %r10, %r14 vzeroupper callq *%rax vmovaps 0xe0(%rsp), %xmm6 movq 0x28(%rsp), %r9 movl $0x1, %edi vbroadcastss 0x2d860(%rip), %xmm25 # 0x1eeba20 vmovups 0x100(%rsp), %ymm31 vmovups 0x120(%rsp), %ymm30 movq 0x10(%rsp), %r11 vmovups 0x140(%rsp), %ymm29 movq %r14, %r10 vmovups 0x160(%rsp), %ymm28 vmovups 0x180(%rsp), %ymm27 vbroadcastss 0x2e51a(%rip), %xmm26 # 0x1eec714 movq 0x18(%rsp), %rsi movq 0x20(%rsp), %r8 movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ebe26c movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ebe277 vmovss 0x30(%rsp), %xmm0 vmovss %xmm0, 0x20(%rsi) shlxl %r12d, %edi, %eax kmovd %eax, %k0 movzbl %r13b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 vcmpleps 0x20(%rsi){1to4}, %xmm6, %k1 kandb %k1, %k0, %k0 jmp 0x1ebdf95 vmovss 0x1e0(%rsp,%r12,4), %xmm0 vmovss 0x1f0(%rsp,%r12,4), %xmm1 vmovss 0x200(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x20(%rsi) vmovss 0x210(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x30(%rsi) vmovss 0x220(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x34(%rsi) vmovss 0x230(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x38(%rsi) vmovss %xmm0, 0x3c(%rsi) vmovss %xmm1, 0x40(%rsi) movl 0xf0(%rsp,%r12,4), %eax movl %eax, 0x44(%rsi) movl %ebx, 0x48(%rsi) movq 0x8(%r8), %rax movl (%rax), %ecx movl %ecx, 0x4c(%rsi) movl 0x4(%rax), %eax movl %eax, 0x50(%rsi) movq 0x78(%rsp), %rax incq %rax cmpq 0x70(%rsp), %rax jne 0x1ebda66 vbroadcastss 0x20(%rsi), %ymm0 vmovups 0x2d0(%rsp), %ymm8 vmovups 0x2b0(%rsp), %ymm9 vmovups 0x290(%rsp), %ymm10 vmovups 0x270(%rsp), %ymm11 movq 0x60(%rsp), %rdi movq 0x58(%rsp), %r9 movq 0x50(%rsp), %rbx movq 0x48(%rsp), %r14 movq 0x40(%rsp), %r15 vmovups 0x250(%rsp), %ymm12 vpmovsxbd 0x9f6f4(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0xa3237(%rip), %ymm14 # 0x1f615bc leaq 0x310(%rsp), %r12 vmovdqu64 0x2f0(%rsp), %ymm16 movq 0x68(%rsp), %r13 jmp 0x1ebd56f
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, true, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ebe3b3 xorl %eax, %eax jmp 0x1ebeaf4 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r12 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ebe3f4 vmovss 0x10(%r12), %xmm0 vmulss %xmm0, %xmm0, %xmm9 jmp 0x1ebe3ff vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm9 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r12), %ymm6 vbroadcastss 0x4(%r12), %ymm5 vbroadcastss 0x8(%r12), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x9f621(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa3164(%rip), %ymm12 # 0x1f615bc vmovups %ymm6, 0x30(%rsp) vmovups %ymm5, 0x10(%rsp) vsubps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x140(%rsp) vaddps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x120(%rsp) vsubps %ymm1, %ymm5, %ymm3 vmovups %ymm3, 0x100(%rsp) vaddps %ymm1, %ymm5, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x80(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm10 vmovaps %xmm9, 0x70(%rsp) vmovups %ymm10, 0x160(%rsp) cmpq %r9, %r8 je 0x1ebeadf vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm9, %xmm0 ja 0x1ebe4ce movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1ebe5dd testb $0x8, %bpl jne 0x1ebe568 vmovaps 0x40(%rbp), %ymm0 vmovaps 0x60(%rbp), %ymm1 vmaxps %ymm0, %ymm6, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm6, %ymm2, %ymm2 vmaxps 0x80(%rbp), %ymm5, %ymm3 vminps 0xa0(%rbp), %ymm3, %ymm3 vmovups 0x80(%rsp), %ymm7 vmaxps 0xc0(%rbp), %ymm7, %ymm4 vsubps %ymm5, %ymm3, %ymm3 vminps 0xe0(%rbp), %ymm4, %ymm4 vsubps %ymm7, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm10, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %edi testb $0x8, %bpl jne 0x1ebe6dd testq %rdi, %rdi je 0x1ebe6e7 andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ebe6f1 testl %eax, %eax je 0x1ebe4eb jmp 0x1ebe9b6 testb $0x8, %bpl jne 0x1ebe568 vmovaps 0xc0(%rbp), %ymm0 vmovaps 0x40(%rbp), %ymm1 vmovaps 0x60(%rbp), %ymm2 vmovaps 0x80(%rbp), %ymm3 vmovaps 0xa0(%rbp), %ymm4 vmovaps 0xe0(%rbp), %ymm5 vmovups 0x30(%rsp), %ymm6 vmaxps %ymm1, %ymm6, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps 0x30(%rsp), %ymm6, %ymm6 vmovups 0x10(%rsp), %ymm7 vmaxps %ymm3, %ymm7, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps 0x10(%rsp), %ymm7, %ymm7 vmovaps %ymm10, %ymm13 vmovups 0x80(%rsp), %ymm10 vmaxps %ymm0, %ymm10, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm10, %ymm8, %ymm8 vmovaps %ymm13, %ymm10 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vmovups 0x30(%rsp), %ymm6 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm5, %k3 vmovups 0x10(%rsp), %ymm5 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ebe568 movl $0x6, %eax jmp 0x1ebe5d0 movl $0x4, %eax jmp 0x1ebe5d0 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ebe755 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ebe5d0 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ebe7ea vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 jmp 0x1ebe5d0 movq %rdi, %r10 vmovaps %ymm10, %ymm13 vmovdqa %ymm7, %ymm10 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ebe8bc vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm10, %ymm7 vpermd %ymm10, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps %ymm13, %ymm10 movq %r10, %rdi jmp 0x1ebe5d0 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x625f4(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0xa2d4b(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0xa2d44(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0xa2d37(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x53df5(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ebe902 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ebe96c vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovdqa %ymm10, %ymm7 jmp 0x1ebe8af cmpl $0x6, %eax jne 0x1ebe4ce movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x60(%rsp) je 0x1ebe4ce movq %rdi, 0x50(%rsp) vmovups %ymm7, 0x180(%rsp) movq %r8, 0x58(%rsp) andq $-0x10, %rbp addq $0x40, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x68(%rsp) xorl %r13d, %r13d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ebea3e movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r12, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b incq %r14 cmpq $0x4, %r14 jne 0x1ebe9fe movq 0x68(%rsp), %rax orb %r13b, %al incq %r15 addq $0x50, %rbp cmpq 0x60(%rsp), %r15 jne 0x1ebe9f3 testb $0x1, %al movq 0x58(%rsp), %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps 0x70(%rsp), %xmm9 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x9eff2(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa2b35(%rip), %ymm12 # 0x1f615bc vmovups 0x180(%rsp), %ymm7 vmovups 0x160(%rsp), %ymm10 movq 0x50(%rsp), %rdi je 0x1ebe4ce vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ebeac9 vmovss 0x10(%r12), %xmm3 vmulss %xmm3, %xmm3, %xmm9 jmp 0x1ebead4 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm9 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ebe464 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMvIntersector1Woop<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ebebd1 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2658, %rsp # imm = 0x2658 vmovdqa 0x10(%rsi), %xmm0 vpandd 0x6239c(%rip){1to4}, %xmm0, %xmm6 # 0x1f20ec4 vprolq $0x20, %xmm6, %xmm1 xorl %ecx, %ecx vucomiss %xmm1, %xmm6 setbe %cl vshufps $0x4e, %xmm6, %xmm6, %xmm2 # xmm2 = xmm6[2,3,0,1] vmaxss %xmm1, %xmm6, %xmm1 vucomiss %xmm2, %xmm1 movl $0x2, %edi cmoval %ecx, %edi leal 0x1(%rdi), %ecx xorl %r8d, %r8d cmpl $0x3, %ecx cmovel %r8d, %ecx leal 0x1(%rcx), %r9d cmpl $0x3, %r9d cmovel %r8d, %r9d movl %edi, %r8d vmovss 0x10(%rsi,%r8,4), %xmm2 vxorps %xmm1, %xmm1, %xmm1 vucomiss %xmm2, %xmm1 movl %ecx, %r10d cmoval %r9d, %r10d cmoval %ecx, %r9d vmovss 0x10(%rsi,%r10,4), %xmm4 vmovss 0x10(%rsi,%r9,4), %xmm3 vbroadcastss (%rsi,%r10,4), %xmm12 vbroadcastss (%rsi,%r9,4), %xmm13 vbroadcastss (%rsi,%r8,4), %xmm14 movq 0x70(%rax), %rax movq %rax, 0x310(%rsp) movl $0x0, 0x318(%rsp) cmpq $0x8, %rax jne 0x1ebebd5 addq $0x2658, %rsp # imm = 0x2658 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vxorps %xmm15, %xmm15, %xmm15 vmovss %xmm2, %xmm15, %xmm5 # xmm5 = xmm2[0],xmm15[1,2,3] vrcp14ss %xmm5, %xmm15, %xmm5 vfnmadd213ss 0x3240b(%rip), %xmm5, %xmm2 # xmm2 = -(xmm5 * xmm2) + mem vmulss %xmm2, %xmm5, %xmm2 vmulss %xmm2, %xmm3, %xmm3 vmulss %xmm2, %xmm4, %xmm4 leaq 0x320(%rsp), %r11 vmaxss 0xc(%rsi), %xmm1, %xmm5 vmaxss 0x20(%rsi), %xmm1, %xmm7 vbroadcastss 0x323d4(%rip), %xmm8 # 0x1ef0fe8 vcmpltps %xmm8, %xmm6, %k1 vmovaps %xmm8, %xmm0 {%k1} vrcp14ps %xmm0, %xmm6 vfnmadd213ps 0x2dae3(%rip){1to4}, %xmm6, %xmm0 # xmm0 = -(xmm6 * xmm0) + mem vfmadd132ps %xmm6, %xmm6, %xmm0 # xmm0 = (xmm0 * xmm6) + xmm6 xorl %ebx, %ebx vucomiss %xmm1, %xmm0 setb %bl vbroadcastss %xmm0, %ymm16 vmovshdup %xmm0, %xmm6 # xmm6 = xmm0[1,1,3,3] vbroadcastsd %xmm6, %ymm17 vbroadcastss 0x62284(%rip), %ymm8 # 0x1f20edc vshufpd $0x1, %xmm0, %xmm0, %xmm9 # xmm9 = xmm0[1,0] vpermps %ymm0, %ymm8, %ymm18 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss 0x53a93(%rip), %ymm19 # 0x1f12704 vbroadcastss %xmm0, %ymm10 vpermps %ymm0, %ymm19, %ymm11 vpermps %ymm0, %ymm8, %ymm8 shll $0x5, %ebx xorl %r14d, %r14d vucomiss %xmm1, %xmm6 setb %r14b shll $0x5, %r14d orq $0x40, %r14 xorl %ebp, %ebp vucomiss %xmm1, %xmm9 setb %bpl shll $0x5, %ebp orq $0x80, %rbp movq %rbx, %rdi xorq $0x20, %rdi movq %r14, %rax xorq $0x20, %rax movq %rax, 0x88(%rsp) movq %rbp, %r15 xorq $0x20, %r15 vbroadcastss %xmm7, %ymm0 vbroadcastss 0x621ea(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm10, %ymm20 vxorps %ymm1, %ymm11, %ymm21 vxorps %ymm1, %ymm8, %ymm22 vbroadcastss %xmm5, %ymm23 vbroadcastss %xmm4, %xmm24 vbroadcastss %xmm3, %xmm25 vbroadcastss %xmm2, %xmm26 vpmovsxbd 0x9ed66(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0xa28a8(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x621a2(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0xa28f8(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0xa28f6(%rip), %ymm31 # 0x1f61628 movq %rdi, 0x8(%rsp) movq %rbx, 0x80(%rsp) movq %r14, 0x18(%rsp) movq %rbp, 0x78(%rsp) vmovss 0x20(%rsi), %xmm1 leaq 0x310(%rsp), %rax cmpq %rax, %r11 je 0x1ebebc0 vmovss -0x8(%r11), %xmm2 addq $-0x10, %r11 vucomiss %xmm1, %xmm2 ja 0x1ebed4e movq (%r11), %r13 testb $0x8, %r13b jne 0x1ebedfc vmovaps 0x40(%r13,%rbx), %ymm1 vfmadd132ps %ymm16, %ymm20, %ymm1 # ymm1 = (ymm1 * ymm16) + ymm20 vmovaps 0x40(%r13,%r14), %ymm2 vfmadd132ps %ymm17, %ymm21, %ymm2 # ymm2 = (ymm2 * ymm17) + ymm21 vmovaps 0x40(%r13,%rbp), %ymm3 vpmaxsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm18, %ymm22, %ymm3 # ymm3 = (ymm3 * ymm18) + ymm22 vpmaxsd %ymm23, %ymm3, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm8 vmovaps 0x40(%r13,%rdi), %ymm1 vfmadd132ps %ymm16, %ymm20, %ymm1 # ymm1 = (ymm1 * ymm16) + ymm20 movq 0x88(%rsp), %rax vmovaps 0x40(%r13,%rax), %ymm2 vfmadd132ps %ymm17, %ymm21, %ymm2 # ymm2 = (ymm2 * ymm17) + ymm21 vmovaps 0x40(%r13,%r15), %ymm3 vpminsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm18, %ymm22, %ymm3 # ymm3 = (ymm3 * ymm18) + ymm22 vpminsd %ymm0, %ymm3, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm8, %k0 kmovb %k0, %r12d testb $0x8, %r13b jne 0x1ebee6e testq %r12, %r12 je 0x1ebee75 andq $-0x10, %r13 vmovdqu (%r13), %ymm1 vmovdqu 0x20(%r13), %ymm2 vmovdqa64 %ymm27, %ymm3 vpternlogd $0xf8, %ymm28, %ymm8, %ymm3 kmovd %r12d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %r13 prefetcht0 (%r13) prefetcht0 0x40(%r13) prefetcht0 0x80(%r13) prefetcht0 0xc0(%r13) xorl %eax, %eax blsrq %r12, %rcx jne 0x1ebee7c testl %eax, %eax je 0x1ebed72 jmp 0x1ebf13f movl $0x6, %eax jmp 0x1ebee61 movl $0x4, %eax jmp 0x1ebee61 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ebeee5 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %r13 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r11) vpermd %ymm8, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r11) addq $0x10, %r11 movq 0x8(%rsp), %rdi jmp 0x1ebee61 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ebef6e vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %r13 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r11) vpermd %ymm8, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r11) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r11) vpermd %ymm8, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r11) addq $0x20, %r11 jmp 0x1ebeedb vmovdqa %ymm8, %ymm9 movq %rbx, %r14 movq %r11, %rbx movq %r10, %r11 movq %r8, %r10 movq %rsi, %r8 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ebf04e vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %r13 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%rbx) vmovdqa %ymm9, %ymm8 vpermd %ymm9, %ymm7, %ymm3 vmovd %xmm3, 0x8(%rbx) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%rbx) vpermd %ymm9, %ymm6, %ymm3 vmovd %xmm3, 0x18(%rbx) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%rbx) vpermd %ymm9, %ymm4, %ymm1 vmovd %xmm1, 0x28(%rbx) addq $0x30, %rbx movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %rbx, %r11 vxorps %xmm15, %xmm15, %xmm15 movq %r14, %rbx movq 0x18(%rsp), %r14 jmp 0x1ebeedb valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vmovdqa64 %ymm29, %ymm3 vpermt2d %ymm8, %ymm30, %ymm3 vpermt2d %ymm4, %ymm31, %ymm3 vpermt2d %ymm6, %ymm31, %ymm3 vpmovsxbd 0xa25ba(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpermps %ymm5, %ymm19, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ebf07f popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%rbx) vpermd %ymm9, %ymm3, %ymm4 vmovd %xmm4, 0x8(%rbx) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %rbx vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ebf0e1 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %r13 movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 vxorps %xmm15, %xmm15, %xmm15 movq %rbx, %r11 movq %r14, %rbx movq 0x18(%rsp), %r14 movq 0x8(%rsp), %rdi vmovdqa %ymm9, %ymm8 jmp 0x1ebee61 cmpl $0x6, %eax jne 0x1ebed49 vmovdqu %ymm8, 0x2f0(%rsp) movl %r13d, %ebp andl $0xf, %ebp addq $-0x8, %rbp je 0x1ebf8df andq $-0x10, %r13 xorl %ebx, %ebx imulq $0xb0, %rbx, %r14 addq %r13, %r14 movq %r10, %rax shlq $0x4, %rax movq %r9, %rcx shlq $0x4, %rcx movq %r8, %rdi vmovaps (%rax,%r14), %xmm0 shlq $0x4, %rdi vmovaps 0x30(%rax,%r14), %xmm1 vmovaps 0x60(%rax,%r14), %xmm2 vsubps %xmm12, %xmm0, %xmm8 vmovaps (%rcx,%r14), %xmm0 vmovaps 0x30(%rcx,%r14), %xmm4 vmovaps 0x60(%rcx,%r14), %xmm5 vmovaps (%rdi,%r14), %xmm3 vmovaps 0x30(%rdi,%r14), %xmm6 vmovaps 0x60(%rdi,%r14), %xmm7 vsubps %xmm13, %xmm0, %xmm9 vsubps %xmm14, %xmm3, %xmm3 vsubps %xmm12, %xmm1, %xmm10 vsubps %xmm13, %xmm4, %xmm11 vsubps %xmm14, %xmm6, %xmm4 vsubps %xmm12, %xmm2, %xmm6 vsubps %xmm13, %xmm5, %xmm2 vsubps %xmm14, %xmm7, %xmm5 vfnmadd231ps %xmm24, %xmm3, %xmm8 # xmm8 = -(xmm3 * xmm24) + xmm8 vfnmadd231ps %xmm25, %xmm3, %xmm9 # xmm9 = -(xmm3 * xmm25) + xmm9 vfnmadd231ps %xmm24, %xmm4, %xmm10 # xmm10 = -(xmm4 * xmm24) + xmm10 vfnmadd231ps %xmm25, %xmm4, %xmm11 # xmm11 = -(xmm4 * xmm25) + xmm11 vfnmadd231ps %xmm24, %xmm5, %xmm6 # xmm6 = -(xmm5 * xmm24) + xmm6 vfnmadd231ps %xmm25, %xmm5, %xmm2 # xmm2 = -(xmm5 * xmm25) + xmm2 vmulps %xmm6, %xmm11, %xmm0 vmulps %xmm2, %xmm10, %xmm1 vmulps %xmm2, %xmm8, %xmm2 vmulps %xmm6, %xmm9, %xmm6 vmulps %xmm10, %xmm9, %xmm7 vmulps %xmm11, %xmm8, %xmm8 vcmpnltps %xmm1, %xmm0, %k1 vcmpnltps %xmm8, %xmm7, %k1 {%k1} vcmpnltps %xmm6, %xmm2, %k0 {%k1} vcmpleps %xmm1, %xmm0, %k1 vcmpleps %xmm8, %xmm7, %k1 {%k1} vcmpleps %xmm6, %xmm2, %k1 {%k1} korw %k1, %k0, %k0 kshiftlb $0x4, %k0, %k0 kshiftrb $0x4, %k0, %k0 kortestb %k0, %k0 jne 0x1ebf27e incq %rbx cmpq %rbp, %rbx jne 0x1ebf167 jmp 0x1ebf8df vsubps %xmm1, %xmm0, %xmm1 vsubps %xmm6, %xmm2, %xmm0 vsubps %xmm8, %xmm7, %xmm6 vaddps %xmm0, %xmm1, %xmm2 vaddps %xmm2, %xmm6, %xmm2 vcmpneqps %xmm15, %xmm2, %k1 vrcp14ps %xmm2, %xmm7 vfnmadd213ps 0x2d46a(%rip){1to4}, %xmm7, %xmm2 # xmm2 = -(xmm7 * xmm2) + mem vfmadd132ps %xmm7, %xmm7, %xmm2 # xmm2 = (xmm2 * xmm7) + xmm7 vmulps %xmm3, %xmm26, %xmm3 vmulps %xmm4, %xmm26, %xmm4 vmulps %xmm5, %xmm26, %xmm5 vmulps %xmm5, %xmm6, %xmm5 vfmadd231ps %xmm4, %xmm0, %xmm5 # xmm5 = (xmm0 * xmm4) + xmm5 vfmadd231ps %xmm3, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm3) + xmm5 vmulps %xmm2, %xmm5, %xmm10 vcmpgtps 0xc(%rsi){1to4}, %xmm10, %k1 {%k1} vcmpleps 0x20(%rsi){1to4}, %xmm10, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ebf26d vmovaps (%r14), %xmm3 vmovaps 0x10(%r14), %xmm4 vmovaps 0x20(%r14), %xmm5 vmovaps 0x60(%r14), %xmm6 vsubps %xmm3, %xmm6, %xmm6 vmovaps 0x70(%r14), %xmm7 vsubps %xmm4, %xmm7, %xmm7 vmovaps 0x80(%r14), %xmm8 vsubps %xmm5, %xmm8, %xmm8 vsubps 0x30(%r14), %xmm3, %xmm3 vsubps 0x40(%r14), %xmm4, %xmm4 vsubps 0x50(%r14), %xmm5, %xmm5 vmulps %xmm4, %xmm8, %xmm9 vfmsub231ps %xmm5, %xmm7, %xmm9 # xmm9 = (xmm7 * xmm5) - xmm9 vmulps %xmm6, %xmm5, %xmm5 vfmsub231ps %xmm8, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm8) - xmm5 vmulps %xmm7, %xmm3, %xmm3 vfmsub231ps %xmm4, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm4) - xmm3 vmovaps %xmm1, 0xf0(%rsp) vmovaps %xmm0, 0x100(%rsp) vmovaps %xmm10, 0x110(%rsp) vmovaps %xmm2, 0x120(%rsp) kmovb %k1, 0x130(%rsp) vmovaps %xmm9, 0x170(%rsp) vmovaps %xmm5, 0x180(%rsp) vmovaps %xmm3, 0x190(%rsp) vmovaps %xmm10, 0x160(%rsp) vmulps %xmm2, %xmm1, %xmm1 vmovaps %xmm1, 0x140(%rsp) vmulps %xmm0, %xmm2, %xmm0 vmovaps %xmm0, 0x150(%rsp) movq (%rdx), %rax movq %rax, 0x70(%rsp) kmovd %k1, %ecx vbroadcastss 0x2c654(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm10, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax movl %ecx, 0x4(%rsp) cmovel %ecx, %eax movzbl %al, %eax tzcntq %rax, %rcx movl 0x90(%r14,%rcx,4), %edi movq 0x70(%rsp), %rax movq 0x1e8(%rax), %rax movq %rdi, 0x38(%rsp) movq (%rax,%rdi,8), %rdi movl 0x24(%rsi), %eax testl %eax, 0x34(%rdi) movq %rcx, 0x10(%rsp) je 0x1ebf44e movq 0x10(%rdx), %rax cmpq $0x0, 0x10(%rax) movl 0x4(%rsp), %ecx jne 0x1ebf4c7 cmpq $0x0, 0x40(%rdi) jne 0x1ebf4c7 xorl %eax, %eax jmp 0x1ebf46f movl $0x1, %eax shlxl %ecx, %eax, %eax kmovd %eax, %k0 movzbl 0x4(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al testb %al, %al je 0x1ebf85f testb %cl, %cl je 0x1ebf26d kmovd %ecx, %k1 vbroadcastss 0x2c594(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm10, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %cl, %al movzbl %al, %eax movl %ecx, 0x4(%rsp) movzbl %cl, %ecx cmovnel %eax, %ecx tzcntl %ecx, %ecx jmp 0x1ebf406 movq %rax, 0x28(%rsp) vmovaps %xmm10, 0x1a0(%rsp) vmovaps %xmm26, 0x1b0(%rsp) vmovaps %xmm25, 0x1c0(%rsp) vmovaps %xmm24, 0x1d0(%rsp) vmovups %ymm23, 0x210(%rsp) vmovups %ymm22, 0x230(%rsp) vmovups %ymm21, 0x250(%rsp) vmovups %ymm20, 0x270(%rsp) vmovups %ymm18, 0x290(%rsp) vmovups %ymm17, 0x2b0(%rsp) vmovups %ymm16, 0x2d0(%rsp) movq %r11, 0x48(%rsp) vmovaps %xmm14, 0x1e0(%rsp) vmovaps %xmm13, 0x1f0(%rsp) vmovaps %xmm12, 0x200(%rsp) movq %r10, 0x50(%rsp) movq %r8, 0x58(%rsp) movq %r9, 0x60(%rsp) movq %rdi, %r8 movq 0x10(%rsp), %rdi vmovss 0x140(%rsp,%rdi,4), %xmm0 vmovss 0x150(%rsp,%rdi,4), %xmm1 movq %rdx, 0x68(%rsp) movq 0x8(%rdx), %rax movl 0xa0(%r14,%rdi,4), %ecx vmovss 0x170(%rsp,%rdi,4), %xmm2 vmovss 0x180(%rsp,%rdi,4), %xmm3 vmovss 0x190(%rsp,%rdi,4), %xmm4 vmovss %xmm2, 0xc0(%rsp) vmovss %xmm3, 0xc4(%rsp) vmovss %xmm4, 0xc8(%rsp) vmovss %xmm0, 0xcc(%rsp) vmovss %xmm1, 0xd0(%rsp) movl %ecx, 0xd4(%rsp) movq 0x38(%rsp), %rcx movl %ecx, 0xd8(%rsp) movl (%rax), %ecx movl %ecx, 0xdc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xe0(%rsp) vmovss 0x20(%rsi), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x160(%rsp,%rdi,4), %xmm0 vmovss %xmm0, 0x20(%rsi) movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF leaq 0x24(%rsp), %rcx movq %rcx, 0x90(%rsp) movq 0x18(%r8), %rcx movq %rcx, 0x98(%rsp) movq %rax, 0xa0(%rsp) movq %rsi, 0x30(%rsp) movq %rsi, 0xa8(%rsp) leaq 0xc0(%rsp), %rax movq %rax, 0xb0(%rsp) movl $0x1, 0xb8(%rsp) movq %r8, 0x40(%rsp) movq 0x40(%r8), %rax testq %rax, %rax je 0x1ebf698 leaq 0x90(%rsp), %rdi vzeroupper callq *%rax movq 0x90(%rsp), %rax cmpl $0x0, (%rax) je 0x1ebf730 movq 0x28(%rsp), %rax movq 0x10(%rax), %rax testq %rax, %rax je 0x1ebf6d5 movq 0x28(%rsp), %rcx testb $0x2, (%rcx) jne 0x1ebf6bb movq 0x40(%rsp), %rcx testb $0x40, 0x3e(%rcx) je 0x1ebf6c8 leaq 0x90(%rsp), %rdi vzeroupper callq *%rax movq 0x90(%rsp), %rax cmpl $0x0, (%rax) je 0x1ebf730 movq 0xa8(%rsp), %rax movq 0xb0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ebf740 movq 0x30(%rsp), %rax vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%rax) movl $0x1, %eax movq 0x10(%rsp), %rcx shlxl %ecx, %eax, %eax kmovd %eax, %k0 movzbl 0x4(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 movq 0x30(%rsp), %rsi vmovaps 0x1a0(%rsp), %xmm10 vcmpleps 0x20(%rsi){1to4}, %xmm10, %k1 kandb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al movq 0x68(%rsp), %rdx movq 0x60(%rsp), %r9 movq 0x58(%rsp), %r8 movq 0x50(%rsp), %r10 vmovaps 0x200(%rsp), %xmm12 vmovaps 0x1f0(%rsp), %xmm13 vmovaps 0x1e0(%rsp), %xmm14 vxorps %xmm15, %xmm15, %xmm15 movq 0x48(%rsp), %r11 vmovups 0x2d0(%rsp), %ymm16 vmovups 0x2b0(%rsp), %ymm17 vmovups 0x290(%rsp), %ymm18 vbroadcastss 0x52f20(%rip), %ymm19 # 0x1f12704 vmovups 0x270(%rsp), %ymm20 vmovups 0x250(%rsp), %ymm21 vmovups 0x230(%rsp), %ymm22 vmovups 0x210(%rsp), %ymm23 vmovaps 0x1d0(%rsp), %xmm24 vmovaps 0x1c0(%rsp), %xmm25 vmovaps 0x1b0(%rsp), %xmm26 vpmovsxbd 0x9e23e(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0xa1d80(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x6167a(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0xa1dd0(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0xa1dce(%rip), %ymm31 # 0x1f61628 jmp 0x1ebf46f movq 0x10(%rsp), %rax vmovss 0x140(%rsp,%rax,4), %xmm0 vmovss 0x150(%rsp,%rax,4), %xmm1 vmovss 0x160(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x20(%rsi) vmovss 0x170(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x30(%rsi) vmovss 0x180(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x34(%rsi) vmovss 0x190(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x38(%rsi) vmovss %xmm0, 0x3c(%rsi) vmovss %xmm1, 0x40(%rsi) movl 0xa0(%r14,%rax,4), %eax movl %eax, 0x44(%rsi) movq 0x38(%rsp), %rax movl %eax, 0x48(%rsi) movq 0x8(%rdx), %rax movl (%rax), %ecx movl %ecx, 0x4c(%rsi) movl 0x4(%rax), %eax movl %eax, 0x50(%rsi) jmp 0x1ebf26d vbroadcastss 0x20(%rsi), %ymm0 movq 0x80(%rsp), %rbx movq 0x18(%rsp), %r14 movq 0x78(%rsp), %rbp movq 0x8(%rsp), %rdi vmovdqu 0x2f0(%rsp), %ymm8 jmp 0x1ebed49
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMvIntersector1Woop<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ebf91b xorl %eax, %eax jmp 0x1ec0084 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %rbp movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ebf95a vmovss 0x10(%rbp), %xmm0 vmulss %xmm0, %xmm0, %xmm9 jmp 0x1ebf965 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm9 vbroadcastss (%rbp), %ymm6 vbroadcastss 0x4(%rbp), %ymm5 vbroadcastss 0x8(%rbp), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 leaq 0x1b0(%rsp), %r8 movl $0x0, 0xc(%rsp) vpmovsxbd 0x9e0c5(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa1c08(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups %ymm6, 0x30(%rsp) vmovups %ymm5, 0x10(%rsp) vsubps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x140(%rsp) vaddps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x120(%rsp) vsubps %ymm1, %ymm5, %ymm3 vmovups %ymm3, 0x100(%rsp) vaddps %ymm1, %ymm5, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x80(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm10 vmovaps %xmm9, 0x70(%rsp) vmovups %ymm10, 0x160(%rsp) cmpq %r9, %r8 je 0x1ec006f vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm9, %xmm0 ja 0x1ebfa32 movq (%r8), %r12 cmpl $0x1, 0x18(%rbx) jne 0x1ebfb57 testb $0x8, %r12b jne 0x1ebfad8 vmovaps 0x40(%r12), %ymm0 vmovaps 0x60(%r12), %ymm1 vmaxps %ymm0, %ymm6, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm6, %ymm2, %ymm2 vmaxps 0x80(%r12), %ymm5, %ymm3 vminps 0xa0(%r12), %ymm3, %ymm3 vmovups 0x80(%rsp), %ymm7 vmaxps 0xc0(%r12), %ymm7, %ymm4 vsubps %ymm5, %ymm3, %ymm3 vminps 0xe0(%r12), %ymm4, %ymm4 vsubps %ymm7, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm10, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %edi testb $0x8, %r12b jne 0x1ebfc67 testq %rdi, %rdi je 0x1ebfc71 andq $-0x10, %r12 vmovdqu (%r12), %ymm0 vmovdqu 0x20(%r12), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ebfc7b testl %eax, %eax je 0x1ebfa4f jmp 0x1ebff40 testb $0x8, %r12b jne 0x1ebfad8 vmovaps 0xc0(%r12), %ymm0 vmovaps 0x40(%r12), %ymm1 vmovaps 0x60(%r12), %ymm2 vmovaps 0x80(%r12), %ymm3 vmovaps 0xa0(%r12), %ymm4 vmovaps 0xe0(%r12), %ymm5 vmovups 0x30(%rsp), %ymm6 vmaxps %ymm1, %ymm6, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps 0x30(%rsp), %ymm6, %ymm6 vmovups 0x10(%rsp), %ymm7 vmaxps %ymm3, %ymm7, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps 0x10(%rsp), %ymm7, %ymm7 vmovaps %ymm10, %ymm13 vmovups 0x80(%rsp), %ymm10 vmaxps %ymm0, %ymm10, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm10, %ymm8, %ymm8 vmovaps %ymm13, %ymm10 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vmovups 0x30(%rsp), %ymm6 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm5, %k3 vmovups 0x10(%rsp), %ymm5 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ebfad8 movl $0x6, %eax jmp 0x1ebfb4a movl $0x4, %eax jmp 0x1ebfb4a vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ebfcdf vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r12 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ebfb4a vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ebfd74 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 jmp 0x1ebfb4a movq %rdi, %r10 vmovaps %ymm10, %ymm13 vmovdqa %ymm7, %ymm10 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ebfe46 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm10, %ymm7 vpermd %ymm10, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps %ymm13, %ymm10 movq %r10, %rdi jmp 0x1ebfb4a valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x6106a(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0xa17c1(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0xa17ba(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0xa17ad(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x5286b(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ebfe8c popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ebfef6 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r12 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovdqa %ymm10, %ymm7 jmp 0x1ebfe39 cmpl $0x6, %eax jne 0x1ebfa32 movl %r12d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x60(%rsp) je 0x1ebfa32 movq %rdi, 0x50(%rsp) vmovups %ymm7, 0x180(%rsp) movq %r8, 0x58(%rsp) andq $-0x10, %r12 addq $0xa0, %r12 xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x68(%rsp) movq $-0x4, %r14 xorl %r13d, %r13d movl (%r12,%r14,4), %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmpq %rcx, %rax je 0x1ebffcd movq (%rbx), %rcx movq 0x1e8(%rcx), %rcx movq (%rcx,%rax,8), %rdi movl %eax, 0x44(%rbx) movl 0x10(%r12,%r14,4), %eax movl %eax, 0x40(%rbx) movq %rbp, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b incq %r14 jne 0x1ebff90 movq 0x68(%rsp), %rax orb %r13b, %al incq %r15 addq $0xb0, %r12 cmpq 0x60(%rsp), %r15 jne 0x1ebff81 testb $0x1, %al vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps 0x70(%rsp), %xmm9 movq 0x58(%rsp), %r8 vpmovsxbd 0x9da68(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0xa15ab(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x180(%rsp), %ymm7 vmovups 0x160(%rsp), %ymm10 movq 0x50(%rsp), %rdi je 0x1ebfa32 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ec0059 vmovss 0x10(%rbp), %xmm3 vmulss %xmm3, %xmm3, %xmm9 jmp 0x1ec0064 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm9 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ebf9c8 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMvMBIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ec00d3 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x25e8, %rsp # imm = 0x25E8 movq 0x70(%rax), %rax movq %rax, 0x2a0(%rsp) movl $0x0, 0x2a8(%rsp) cmpq $0x8, %rax jne 0x1ec00d7 addq $0x25e8, %rsp # imm = 0x25E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rsi, %r14 vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x60dcd(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x30ee8(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x2c5f7(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem leaq 0x2b0(%rsp), %r8 vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %edi, %edi vucomiss %xmm1, %xmm0 setb %dil vbroadcastss %xmm0, %ymm19 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm20 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x60d8a(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm21 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x52599(%rip), %ymm22 # 0x1f12704 vpermps %ymm0, %ymm22, %ymm8 vpermps %ymm0, %ymm6, %ymm6 shll $0x5, %edi xorl %r9d, %r9d vucomiss %xmm1, %xmm4 setb %r9b shll $0x5, %r9d orq $0x40, %r9 xorl %r10d, %r10d vucomiss %xmm1, %xmm5 setb %r10b shll $0x5, %r10d orq $0x80, %r10 movq %rdi, %r11 xorq $0x20, %r11 movq %r9, %rsi xorq $0x20, %rsi movq %r10, %r15 xorq $0x20, %r15 vbroadcastss %xmm2, %ymm23 vbroadcastss 0x60cfa(%rip), %ymm1 # 0x1f20ec0 vbroadcastss %xmm3, %ymm0 vxorps %ymm1, %ymm7, %ymm24 vxorps %ymm1, %ymm8, %ymm25 vxorps %ymm1, %ymm6, %ymm26 vpmovsxbd 0x9d889(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0xa13cb(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x60cc5(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0xa141b(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0xa1419(%rip), %ymm31 # 0x1f61628 leaq 0x2a0(%rsp), %r13 vmovss 0x20(%r14), %xmm1 cmpq %r13, %r8 je 0x1ec00c2 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1ec021d movq (%r8), %rbx testb $0x8, %bl jne 0x1ec0306 movq %rbx, %rax andq $-0x10, %rax vbroadcastss 0x1c(%r14), %ymm1 vmovaps 0x100(%rax,%rdi), %ymm2 vfmadd213ps 0x40(%rax,%rdi), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vfmadd213ps %ymm24, %ymm19, %ymm2 # ymm2 = (ymm19 * ymm2) + ymm24 vmaxps %ymm2, %ymm23, %ymm2 vmovaps 0x100(%rax,%r9), %ymm3 vfmadd213ps 0x40(%rax,%r9), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vmovaps 0x100(%rax,%r10), %ymm4 vfmadd213ps 0x40(%rax,%r10), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm25, %ymm20, %ymm3 # ymm3 = (ymm20 * ymm3) + ymm25 vfmadd213ps %ymm26, %ymm21, %ymm4 # ymm4 = (ymm21 * ymm4) + ymm26 vmaxps %ymm4, %ymm3, %ymm3 vmaxps %ymm3, %ymm2, %ymm8 vmovaps 0x100(%rax,%r11), %ymm2 vfmadd213ps 0x40(%rax,%r11), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vmovaps 0x100(%rax,%rsi), %ymm3 vfmadd213ps 0x40(%rax,%rsi), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vfmadd213ps %ymm24, %ymm19, %ymm2 # ymm2 = (ymm19 * ymm2) + ymm24 vfmadd213ps %ymm25, %ymm20, %ymm3 # ymm3 = (ymm20 * ymm3) + ymm25 vmovaps 0x100(%rax,%r15), %ymm4 vfmadd213ps 0x40(%rax,%r15), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm26, %ymm21, %ymm4 # ymm4 = (ymm21 * ymm4) + ymm26 vminps %ymm4, %ymm3, %ymm3 vminps %ymm2, %ymm0, %ymm2 vminps %ymm3, %ymm2, %ymm2 movl %ebx, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ec037a vcmpleps %ymm2, %ymm8, %k0 kmovb %k0, %r12d testb $0x8, %bl jne 0x1ec0373 testq %r12, %r12 je 0x1ec0396 andq $-0x10, %rbx vmovdqu (%rbx), %ymm1 vmovdqu 0x20(%rbx), %ymm2 vmovdqa64 %ymm27, %ymm3 vpternlogd $0xf8, %ymm28, %ymm8, %ymm3 kmovd %r12d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbx prefetcht0 (%rbx) prefetcht0 0x40(%rbx) prefetcht0 0x80(%rbx) prefetcht0 0xc0(%rbx) xorl %eax, %eax blsrq %r12, %rcx jne 0x1ec039d testl %eax, %eax je 0x1ec0239 jmp 0x1ec065d movl $0x6, %eax jmp 0x1ec0366 vcmpleps %ymm2, %ymm8, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1ec0302 movl $0x4, %eax jmp 0x1ec0366 movq %rsi, %rbp vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ec0407 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm8, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 movq %rbp, %rsi jmp 0x1ec0366 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ec0490 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm8, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm8, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec03ff vmovdqa %ymm8, %ymm9 movq %r11, %r13 movq %r10, %r11 movq %r9, %r10 movq %rdi, %r9 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ec0571 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa %ymm9, %ymm8 vpermd %ymm9, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm9, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm9, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rdx movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r13, %r11 movq %rbp, %rsi leaq 0x2a0(%rsp), %r13 jmp 0x1ec0366 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vmovdqa64 %ymm29, %ymm3 vpermt2d %ymm8, %ymm30, %ymm3 vpermt2d %ymm4, %ymm31, %ymm3 vpermt2d %ymm6, %ymm31, %ymm3 vpmovsxbd 0xa1097(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpermps %ymm5, %ymm22, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ec05a2 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm9, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ec0604 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbx movq %rdi, %rdx movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r13, %r11 movq %rbp, %rsi leaq 0x2a0(%rsp), %r13 vmovdqa %ymm9, %ymm8 jmp 0x1ec0366 cmpl $0x6, %eax jne 0x1ec0217 vmovdqu %ymm8, 0x280(%rsp) movl %ebx, %eax andl $0xf, %eax addq $-0x8, %rax je 0x1ec0dc8 andq $-0x10, %rbx xorl %r13d, %r13d leaq (,%r13,4), %rbp addq %r13, %rbp shlq $0x6, %rbp vbroadcastss 0x1c(%r14), %xmm0 vmovaps 0x90(%rbx,%rbp), %xmm3 vmovaps 0xa0(%rbx,%rbp), %xmm5 vmovaps 0xb0(%rbx,%rbp), %xmm6 vmovaps 0xc0(%rbx,%rbp), %xmm1 vfmadd213ps (%rbx,%rbp), %xmm0, %xmm3 # xmm3 = (xmm0 * xmm3) + mem vfmadd213ps 0x10(%rbx,%rbp), %xmm0, %xmm5 # xmm5 = (xmm0 * xmm5) + mem vfmadd213ps 0x20(%rbx,%rbp), %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + mem vmovaps 0xd0(%rbx,%rbp), %xmm2 vmovaps 0xe0(%rbx,%rbp), %xmm4 vfmadd213ps 0x30(%rbx,%rbp), %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + mem vfmadd213ps 0x40(%rbx,%rbp), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vfmadd213ps 0x50(%rbx,%rbp), %xmm0, %xmm4 # xmm4 = (xmm0 * xmm4) + mem vmovaps 0xf0(%rbx,%rbp), %xmm7 vmovaps 0x100(%rbx,%rbp), %xmm8 vmovaps 0x110(%rbx,%rbp), %xmm9 vfmadd213ps 0x60(%rbx,%rbp), %xmm0, %xmm7 # xmm7 = (xmm0 * xmm7) + mem vfmadd213ps 0x70(%rbx,%rbp), %xmm0, %xmm8 # xmm8 = (xmm0 * xmm8) + mem vfmadd213ps 0x80(%rbx,%rbp), %xmm0, %xmm9 # xmm9 = (xmm0 * xmm9) + mem vsubps %xmm1, %xmm3, %xmm10 vsubps %xmm2, %xmm5, %xmm11 vsubps %xmm4, %xmm6, %xmm12 vsubps %xmm3, %xmm7, %xmm7 vsubps %xmm5, %xmm8, %xmm8 vsubps %xmm6, %xmm9, %xmm9 vmulps %xmm9, %xmm11, %xmm0 vfmsub231ps %xmm12, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm12) - xmm0 vmulps %xmm7, %xmm12, %xmm1 vfmsub231ps %xmm10, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm10) - xmm1 vmulps %xmm8, %xmm10, %xmm2 vbroadcastss 0x10(%r14), %xmm13 vbroadcastss 0x14(%r14), %xmm14 vbroadcastss 0x18(%r14), %xmm15 vsubps (%r14){1to4}, %xmm3, %xmm4 vfmsub231ps %xmm11, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm11) - xmm2 vsubps 0x4(%r14){1to4}, %xmm5, %xmm5 vsubps 0x8(%r14){1to4}, %xmm6, %xmm6 vmulps %xmm6, %xmm14, %xmm16 vfmsub231ps %xmm15, %xmm5, %xmm16 # xmm16 = (xmm5 * xmm15) - xmm16 vmulps %xmm4, %xmm15, %xmm17 vfmsub231ps %xmm13, %xmm6, %xmm17 # xmm17 = (xmm6 * xmm13) - xmm17 vmulps %xmm5, %xmm13, %xmm18 vfmsub231ps %xmm14, %xmm4, %xmm18 # xmm18 = (xmm4 * xmm14) - xmm18 vmulps %xmm2, %xmm15, %xmm15 vfmadd231ps %xmm14, %xmm1, %xmm15 # xmm15 = (xmm1 * xmm14) + xmm15 vfmadd231ps %xmm13, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm13) + xmm15 vandps 0x60701(%rip){1to4}, %xmm15, %xmm3 # 0x1f20ec4 vmulps %xmm18, %xmm9, %xmm13 vfmadd231ps %xmm8, %xmm17, %xmm13 # xmm13 = (xmm17 * xmm8) + xmm13 vfmadd231ps %xmm7, %xmm16, %xmm13 # xmm13 = (xmm16 * xmm7) + xmm13 vandpd 0x60789(%rip){1to2}, %xmm15, %xmm9 # 0x1f20f68 vxorps %xmm13, %xmm9, %xmm7 vmulps %xmm18, %xmm12, %xmm8 vfmadd231ps %xmm17, %xmm11, %xmm8 # xmm8 = (xmm11 * xmm17) + xmm8 vfmadd231ps %xmm16, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm16) + xmm8 vxorps %xmm8, %xmm9, %xmm8 vxorps %xmm10, %xmm10, %xmm10 vcmpnltps %xmm10, %xmm7, %k1 vcmpnltps %xmm10, %xmm8, %k1 {%k1} vcmpneqps %xmm10, %xmm15, %k1 {%k1} vaddps %xmm7, %xmm8, %xmm10 vcmpleps %xmm3, %xmm10, %k0 {%k1} kortestb %k0, %k0 jne 0x1ec0837 incq %r13 cmpq %rax, %r13 jne 0x1ec0685 jmp 0x1ec0dc8 vmulps %xmm6, %xmm2, %xmm6 vfmadd213ps %xmm6, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm6 vfmadd213ps %xmm5, %xmm0, %xmm4 # xmm4 = (xmm0 * xmm4) + xmm5 vxorps %xmm4, %xmm9, %xmm4 vmulps 0xc(%r14){1to4}, %xmm3, %xmm5 vmulps 0x20(%r14){1to4}, %xmm3, %xmm6 vcmpleps %xmm6, %xmm4, %k1 vcmpltps %xmm4, %xmm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ec0826 movq %rax, 0x30(%rsp) vmovaps %xmm7, 0xe0(%rsp) vmovaps %xmm8, 0xf0(%rsp) vmovaps %xmm4, 0x100(%rsp) vmovaps %xmm3, 0x110(%rsp) kmovb %k1, 0x121(%rsp) vmovaps %xmm0, 0x160(%rsp) vmovaps %xmm1, 0x170(%rsp) addq %rbx, %rbp vmovaps %xmm2, 0x180(%rsp) vrcp14ps %xmm3, %xmm0 vfnmadd213ps 0x2be45(%rip){1to4}, %xmm0, %xmm3 # xmm3 = -(xmm0 * xmm3) + mem vfmadd132ps %xmm0, %xmm0, %xmm3 # xmm3 = (xmm3 * xmm0) + xmm0 vmulps 0x100(%rsp), %xmm3, %xmm4 vmovaps %xmm4, 0x150(%rsp) vmulps 0xe0(%rsp), %xmm3, %xmm0 vmovaps %xmm0, 0x130(%rsp) vmulps 0xf0(%rsp), %xmm3, %xmm0 vmovaps %xmm0, 0x140(%rsp) movq %rdx, 0x10(%rsp) movq (%rdx), %rax movq %rax, 0x78(%rsp) kmovd %k1, %ecx vbroadcastss 0x2b0fc(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm4, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax movl %ecx, 0xc(%rsp) cmovel %ecx, %eax movzbl %al, %eax tzcntq %rax, %rdx movl 0x120(%rbp,%rdx,4), %ecx movq 0x78(%rsp), %rax movq 0x1e8(%rax), %rax movq %rcx, 0x38(%rsp) movq (%rax,%rcx,8), %rcx movl 0x24(%r14), %eax testl %eax, 0x34(%rcx) movq %rdx, 0x18(%rsp) je 0x1ec09b7 movq %rcx, %rax movq 0x10(%rsp), %rdx movq 0x10(%rdx), %rcx movq %rcx, 0x28(%rsp) cmpq $0x0, 0x10(%rcx) movl 0xc(%rsp), %ecx jne 0x1ec0a3a cmpq $0x0, 0x40(%rax) jne 0x1ec0a3a xorl %eax, %eax jmp 0x1ec09dd movl $0x1, %eax shlxl %edx, %eax, %eax kmovd %eax, %k0 movzbl 0xc(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al movq 0x10(%rsp), %rdx testb %al, %al je 0x1ec0d3a testb %cl, %cl movq 0x30(%rsp), %rax je 0x1ec0826 kmovd %ecx, %k1 vbroadcastss 0x2b021(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm4, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %cl, %al movzbl %al, %eax movl %ecx, 0xc(%rsp) movzbl %cl, %ecx cmovnel %eax, %ecx tzcntl %ecx, %edx jmp 0x1ec095e vmovaps %xmm4, 0x190(%rsp) vmovups %ymm26, 0x1a0(%rsp) vmovups %ymm25, 0x1c0(%rsp) vmovups %ymm24, 0x1e0(%rsp) vmovups %ymm23, 0x200(%rsp) movq %rsi, 0x48(%rsp) movq %r11, 0x50(%rsp) movq %r10, 0x58(%rsp) movq %r9, 0x60(%rsp) vmovups %ymm21, 0x220(%rsp) vmovups %ymm20, 0x240(%rsp) vmovups %ymm19, 0x260(%rsp) movq %rdi, 0x68(%rsp) movq %r8, 0x70(%rsp) movq 0x18(%rsp), %rsi vmovss 0x130(%rsp,%rsi,4), %xmm0 vmovss 0x140(%rsp,%rsi,4), %xmm1 movq %rax, %rdi movq 0x8(%rdx), %rax movl 0x130(%rbp,%rsi,4), %ecx vmovss 0x160(%rsp,%rsi,4), %xmm2 vmovss 0x170(%rsp,%rsi,4), %xmm3 vmovss 0x180(%rsp,%rsi,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movq 0x38(%rsp), %rcx movl %ecx, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%r14), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x150(%rsp,%rsi,4), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF leaq 0x24(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%rdi), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %r14, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq %rdi, 0x40(%rsp) movq 0x40(%rdi), %rax testq %rax, %rax je 0x1ec0bbf leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec0c57 movq 0x28(%rsp), %rax movq 0x10(%rax), %rax testq %rax, %rax je 0x1ec0bfc movq 0x28(%rsp), %rcx testb $0x2, (%rcx) jne 0x1ec0be2 movq 0x40(%rsp), %rcx testb $0x40, 0x3e(%rcx) je 0x1ec0bef leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec0c57 movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ec0c63 vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0x1, %eax movq 0x18(%rsp), %rcx shlxl %ecx, %eax, %eax kmovd %eax, %k0 movzbl 0xc(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 vmovaps 0x190(%rsp), %xmm4 vcmpleps 0x20(%r14){1to4}, %xmm4, %k1 kandb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al movq 0x10(%rsp), %rdx movq 0x70(%rsp), %r8 movq 0x68(%rsp), %rdi vmovups 0x260(%rsp), %ymm19 vmovups 0x240(%rsp), %ymm20 vmovups 0x220(%rsp), %ymm21 vbroadcastss 0x51a35(%rip), %ymm22 # 0x1f12704 movq 0x60(%rsp), %r9 movq 0x58(%rsp), %r10 movq 0x50(%rsp), %r11 movq 0x48(%rsp), %rsi vmovups 0x200(%rsp), %ymm23 vmovups 0x1e0(%rsp), %ymm24 vmovups 0x1c0(%rsp), %ymm25 vmovups 0x1a0(%rsp), %ymm26 vpmovsxbd 0x9cd63(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0xa08a5(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x6019f(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0xa08f5(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0xa08f3(%rip), %ymm31 # 0x1f61628 jmp 0x1ec09dd movq 0x18(%rsp), %rax vmovss 0x130(%rsp,%rax,4), %xmm0 vmovss 0x140(%rsp,%rax,4), %xmm1 vmovss 0x150(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x20(%r14) vmovss 0x160(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x30(%r14) vmovss 0x170(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x34(%r14) vmovss 0x180(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x38(%r14) vmovss %xmm0, 0x3c(%r14) vmovss %xmm1, 0x40(%r14) movl 0x130(%rbp,%rax,4), %eax movl %eax, 0x44(%r14) movq 0x38(%rsp), %rax movl %eax, 0x48(%r14) movq 0x8(%rdx), %rax movl (%rax), %ecx movl %ecx, 0x4c(%r14) movl 0x4(%rax), %eax movl %eax, 0x50(%r14) movq 0x30(%rsp), %rax jmp 0x1ec0826 vbroadcastss 0x20(%r14), %ymm0 leaq 0x2a0(%rsp), %r13 vmovdqu 0x280(%rsp), %ymm8 jmp 0x1ec0217
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMvMBIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ec0df5 xorl %eax, %eax jmp 0x1ec15f5 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r15 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ec0e35 vmovss 0x10(%r15), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1ec0e40 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 vbroadcastss (%r15), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r15), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r15), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 leaq 0x1b0(%rsp), %r8 movl $0x0, 0xc(%rsp) vpmovsxbd 0x9cbdf(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0xa0722(%rip), %ymm13 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ec15e0 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1ec0f18 movq (%r8), %r12 cmpl $0x1, 0x18(%rbx) jne 0x1ec10a8 testb $0x8, %r12b jne 0x1ec1029 movq %r12, %rax andq $-0x10, %rax vbroadcastss 0xc(%r15), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm8, %ymm3, %ymm3 vmovups 0x40(%rsp), %ymm8 vmaxps %ymm5, %ymm8, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm8, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps 0x180(%rsp), %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %r12d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ec1215 kmovb %k0, %edi testb $0x8, %r12b jne 0x1ec1201 testq %rdi, %rdi je 0x1ec120b andq $-0x10, %r12 vmovdqu (%r12), %ymm0 vmovdqu 0x20(%r12), %ymm1 vmovdqa %ymm12, %ymm2 vpternlogd $0xf8, %ymm13, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ec122e testl %eax, %eax je 0x1ec0f35 jmp 0x1ec14c5 testb $0x8, %r12b jne 0x1ec1029 movq %r12, %rax andq $-0x10, %rax vbroadcastss 0xc(%r15), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm9 vmaxps %ymm3, %ymm9, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmovups 0x40(%rsp), %ymm11 vmaxps %ymm5, %ymm11, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm11, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edx andb %cl, %dl movzbl %dl, %edi movl %r12d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1ec1029 vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %dil, %al movzbl %al, %edi jmp 0x1ec1029 movl $0x6, %eax jmp 0x1ec109b movl $0x4, %eax jmp 0x1ec109b vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1ec1025 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ec1292 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r12 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ec109b vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ec131b vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec109b movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ec13d7 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1ec109b valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x5fad9(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0xa0230(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0xa0229(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0xa021c(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x512da(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ec141d popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ec1487 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r12 vmovdqa %ymm9, %ymm7 jmp 0x1ec13cf cmpl $0x6, %eax jne 0x1ec0f18 movl %r12d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1ec0f18 movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %r12 addq $0x130, %r12 # imm = 0x130 xorl %ebp, %ebp xorl %eax, %eax movq %rax, 0x28(%rsp) movq $-0x10, %r14 xorl %r13d, %r13d movl (%r12,%r14), %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmpq %rcx, %rax je 0x1ec1552 movq (%rbx), %rcx movq 0x1e8(%rcx), %rcx movq (%rcx,%rax,8), %rdi movl %eax, 0x44(%rbx) movl 0x10(%r12,%r14), %eax movl %eax, 0x40(%rbx) movq %r15, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b addq $0x4, %r14 jne 0x1ec1514 movq 0x28(%rsp), %rax orb %r13b, %al incq %rbp addq $0x140, %r12 # imm = 0x140 cmpq 0x20(%rsp), %rbp jne 0x1ec1505 testb $0x1, %al vmovaps 0x30(%rsp), %xmm10 movq 0x18(%rsp), %r8 vpmovsxbd 0x9c4ef(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0xa0032(%rip), %ymm13 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1ec0f18 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ec15ca vmovss 0x10(%r15), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ec15d5 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ec0ea2 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiMBIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2608, %rsp # imm = 0x2608 movq %rdx, 0x8(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ec1639 movq 0x70(%rax), %rax movq %rax, 0x2c0(%rsp) movl $0x0, 0x2c8(%rsp) cmpq $0x8, %rax jne 0x1ec164e addq $0x2608, %rsp # imm = 0x2608 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 leaq 0x2d0(%rsp), %r8 vxorps %xmm27, %xmm27, %xmm27 vmaxss 0xc(%rsi), %xmm27, %xmm1 vmaxss 0x20(%rsi), %xmm27, %xmm2 vandps 0x5f84b(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec4 vbroadcastss 0x2f966(%rip), %xmm4 # 0x1ef0fe8 vcmpltps %xmm4, %xmm3, %k1 vmovaps %xmm4, %xmm0 {%k1} vrcp14ps %xmm0, %xmm3 vfnmadd213ps 0x2b075(%rip){1to4}, %xmm3, %xmm0 # xmm0 = -(xmm3 * xmm0) + mem vfmadd132ps %xmm3, %xmm3, %xmm0 # xmm0 = (xmm0 * xmm3) + xmm3 xorl %edi, %edi vucomiss %xmm27, %xmm0 setb %dil vbroadcastss %xmm0, %ymm28 vmovshdup %xmm0, %xmm3 # xmm3 = xmm0[1,1,3,3] vbroadcastsd %xmm3, %ymm29 vbroadcastss 0x5f813(%rip), %ymm4 # 0x1f20edc vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vpermps %ymm0, %ymm4, %ymm30 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss 0x51022(%rip), %ymm31 # 0x1f12704 vbroadcastss %xmm0, %ymm6 vpermps %ymm0, %ymm31, %ymm7 vpermps %ymm0, %ymm4, %ymm4 shll $0x5, %edi xorl %r9d, %r9d vucomiss %xmm27, %xmm3 setb %r9b shll $0x5, %r9d orq $0x40, %r9 xorl %r10d, %r10d vucomiss %xmm27, %xmm5 setb %r10b shll $0x5, %r10d orq $0x80, %r10 movq %rdi, %r11 xorq $0x20, %r11 movq %r9, %rbx xorq $0x20, %rbx movq %r10, %r14 xorq $0x20, %r14 vbroadcastss %xmm1, %ymm8 vbroadcastss %xmm2, %ymm0 vbroadcastss 0x5f776(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm6, %ymm9 vxorps %ymm1, %ymm7, %ymm10 vxorps %ymm1, %ymm4, %ymm11 vpmovsxbd 0x9c311(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x9fe54(%rip), %ymm13 # 0x1f615bc leaq 0x2c0(%rsp), %r15 movq %rsi, 0x18(%rsp) movq %rdi, 0x58(%rsp) movq %r9, 0x50(%rsp) movq %r10, 0x48(%rsp) movq %r11, 0x40(%rsp) movq %rbx, 0x38(%rsp) movq %r14, 0x30(%rsp) vmovups %ymm8, 0x280(%rsp) vmovups %ymm9, 0x260(%rsp) vmovups %ymm10, 0x240(%rsp) vmovups %ymm11, 0x220(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r15, %r8 je 0x1ec1639 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1ec17bc movq (%r8), %rbp testb $0x8, %bpl jne 0x1ec18a8 movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0x1c(%rsi), %ymm1 vmovaps 0x100(%rax,%rdi), %ymm2 vfmadd213ps 0x40(%rax,%rdi), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vfmadd213ps %ymm9, %ymm28, %ymm2 # ymm2 = (ymm28 * ymm2) + ymm9 vmaxps %ymm2, %ymm8, %ymm2 vmovaps 0x100(%rax,%r9), %ymm3 vfmadd213ps 0x40(%rax,%r9), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vmovaps 0x100(%rax,%r10), %ymm4 vfmadd213ps 0x40(%rax,%r10), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm10, %ymm29, %ymm3 # ymm3 = (ymm29 * ymm3) + ymm10 vfmadd213ps %ymm11, %ymm30, %ymm4 # ymm4 = (ymm30 * ymm4) + ymm11 vmaxps %ymm4, %ymm3, %ymm3 vmaxps %ymm3, %ymm2, %ymm15 vmovaps 0x100(%rax,%r11), %ymm2 vfmadd213ps 0x40(%rax,%r11), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vmovaps 0x100(%rax,%rbx), %ymm3 vfmadd213ps 0x40(%rax,%rbx), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vfmadd213ps %ymm9, %ymm28, %ymm2 # ymm2 = (ymm28 * ymm2) + ymm9 vfmadd213ps %ymm10, %ymm29, %ymm3 # ymm3 = (ymm29 * ymm3) + ymm10 vmovaps 0x100(%rax,%r14), %ymm4 vfmadd213ps 0x40(%rax,%r14), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm11, %ymm30, %ymm4 # ymm4 = (ymm30 * ymm4) + ymm11 vminps %ymm4, %ymm3, %ymm3 vminps %ymm2, %ymm0, %ymm2 vminps %ymm3, %ymm2, %ymm2 movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ec191d vcmpleps %ymm2, %ymm15, %k0 kmovb %k0, %r12d testb $0x8, %bpl jne 0x1ec1916 testq %r12, %r12 je 0x1ec1939 andq $-0x10, %rbp vmovdqu (%rbp), %ymm1 vmovdqu 0x20(%rbp), %ymm2 vmovdqa %ymm12, %ymm3 vpternlogd $0xf8, %ymm13, %ymm15, %ymm3 kmovd %r12d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %r12, %rcx jne 0x1ec1940 testl %eax, %eax je 0x1ec17d8 jmp 0x1ec1c8b movl $0x6, %eax jmp 0x1ec1909 vcmpleps %ymm2, %ymm15, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1ec18a4 movl $0x4, %eax jmp 0x1ec1909 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ec19a4 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbp vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm15, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 jmp 0x1ec1909 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ec1a2d vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rbp vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm15, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm15, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec1909 movq %r12, %r13 vmovdqa64 %ymm15, %ymm16 movq %r15, %r12 vmovdqa %ymm13, %ymm14 vmovdqa %ymm12, %ymm13 vmovaps %ymm11, %ymm12 vmovaps %ymm10, %ymm11 vmovaps %ymm9, %ymm10 vmovaps %ymm8, %ymm9 movq %r14, %r15 movq %rbx, %r14 movq %r11, %rbx movq %r10, %r11 movq %r9, %r10 movq %rdi, %r9 movq %rsi, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ec1b66 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbp vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa64 %ymm16, %ymm15 vpermd %ymm16, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm16, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm16, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rsi vxorps %xmm27, %xmm27, %xmm27 movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %rbx, %r11 movq %r14, %rbx movq %r15, %r14 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 vmovdqa %ymm13, %ymm12 vmovdqa %ymm14, %ymm13 movq %r12, %r15 movq %r13, %r12 jmp 0x1ec1909 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x5f34a(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x9faa1(%rip), %ymm15 # 0x1f61620 vpermt2d %ymm8, %ymm15, %ymm3 vpmovsxbd 0x9fa9a(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x9fa8d(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpermps %ymm5, %ymm31, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ec1bac popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm16, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ec1c0e vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbp movq %rdi, %rsi vxorps %xmm27, %xmm27, %xmm27 movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %rbx, %r11 movq %r14, %rbx movq %r15, %r14 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 vmovdqa %ymm13, %ymm12 vmovdqa %ymm14, %ymm13 movq %r12, %r15 vmovdqa64 %ymm16, %ymm15 jmp 0x1ec1b5e cmpl $0x6, %eax jne 0x1ec17b7 movq %r12, 0x60(%rsp) vmovups %ymm15, 0x2a0(%rsp) movq %r8, 0x68(%rsp) movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x70(%rsp) je 0x1ec2560 andq $-0x10, %rbp movq 0x8(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) xorl %eax, %eax movq %rax, 0x78(%rsp) leaq (%rax,%rax,4), %rax shlq $0x4, %rax vmovss 0x1c(%rsi), %xmm0 movl 0x30(%rbp,%rax), %ecx movq 0x28(%rsp), %rdx movq 0x1e8(%rdx), %rdx movq (%rdx,%rcx,8), %rcx vmovss 0x28(%rcx), %xmm1 vmovss 0x2c(%rcx), %xmm2 vmovss 0x30(%rcx), %xmm3 vsubss %xmm2, %xmm0, %xmm0 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm2, %xmm0, %xmm0 vmulss %xmm0, %xmm1, %xmm0 vroundss $0x9, %xmm0, %xmm0, %xmm2 vaddss 0x2ecab(%rip), %xmm1, %xmm1 # 0x1ef09cc vminss %xmm1, %xmm2, %xmm1 vmaxss %xmm1, %xmm27, %xmm1 vcvttss2si %xmm1, %edx movslq %edx, %rdx movq 0xe0(%rcx), %r12 imulq $0x38, %rdx, %r13 movl (%rbp,%rax), %ecx movq %rcx, 0x10(%rsp) movq (%r12,%r13), %rbx vmovups (%rbx,%rcx,4), %xmm5 movl 0x10(%rbp,%rax), %edx vmovups (%rbx,%rdx,4), %xmm3 movl 0x20(%rbp,%rax), %esi vmovups (%rbx,%rsi,4), %xmm2 movl 0x4(%rbp,%rax), %edi vmovups (%rbx,%rdi,4), %xmm7 movl 0x14(%rbp,%rax), %r8d vmovups (%rbx,%r8,4), %xmm6 movl 0x24(%rbp,%rax), %r9d vmovups (%rbx,%r9,4), %xmm4 movl 0x8(%rbp,%rax), %r10d vmovups (%rbx,%r10,4), %xmm10 movl 0x18(%rbp,%rax), %r11d vmovups (%rbx,%r11,4), %xmm9 movl 0x28(%rbp,%rax), %ecx vmovups (%rbx,%rcx,4), %xmm8 movl 0xc(%rbp,%rax), %r15d vmovups (%rbx,%r15,4), %xmm15 movl 0x1c(%rbp,%rax), %r14d vmovups (%rbx,%r14,4), %xmm12 movq 0x38(%r12,%r13), %r12 movl 0x2c(%rbp,%rax), %r13d vmovups (%rbx,%r13,4), %xmm11 movq 0x10(%rsp), %rbx vmovups (%r12,%rbx,4), %xmm20 vmovups (%r12,%rdx,4), %xmm16 vmovups (%r12,%rsi,4), %xmm13 movq 0x18(%rsp), %rsi vmovups (%r12,%rdi,4), %xmm22 vmovups (%r12,%r8,4), %xmm18 vmovups (%r12,%r9,4), %xmm14 vmovups (%r12,%r10,4), %xmm24 vmovups (%r12,%r11,4), %xmm21 vmovups (%r12,%rcx,4), %xmm17 vmovups (%r12,%r15,4), %xmm25 vmovups (%r12,%r14,4), %xmm23 vmovups (%r12,%r13,4), %xmm19 vmovaps 0x30(%rbp,%rax), %xmm26 vmovaps %xmm26, 0x1b0(%rsp) vmovaps 0x40(%rbp,%rax), %xmm26 movq 0x78(%rsp), %rax vsubss %xmm1, %xmm0, %xmm0 vunpcklps %xmm10, %xmm5, %xmm1 # xmm1 = xmm5[0],xmm10[0],xmm5[1],xmm10[1] vunpckhps %xmm10, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm10[2],xmm5[3],xmm10[3] vunpcklps %xmm15, %xmm7, %xmm10 # xmm10 = xmm7[0],xmm15[0],xmm7[1],xmm15[1] vunpckhps %xmm15, %xmm7, %xmm7 # xmm7 = xmm7[2],xmm15[2],xmm7[3],xmm15[3] vunpcklps %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] vunpcklps %xmm10, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] vunpckhps %xmm10, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3] vunpcklps %xmm9, %xmm3, %xmm10 # xmm10 = xmm3[0],xmm9[0],xmm3[1],xmm9[1] vunpckhps %xmm9, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm9[2],xmm3[3],xmm9[3] vunpcklps %xmm12, %xmm6, %xmm9 # xmm9 = xmm6[0],xmm12[0],xmm6[1],xmm12[1] vunpckhps %xmm12, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm12[2],xmm6[3],xmm12[3] vunpcklps %xmm6, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] vunpcklps %xmm9, %xmm10, %xmm6 # xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1] vunpckhps %xmm9, %xmm10, %xmm9 # xmm9 = xmm10[2],xmm9[2],xmm10[3],xmm9[3] vunpcklps %xmm8, %xmm2, %xmm10 # xmm10 = xmm2[0],xmm8[0],xmm2[1],xmm8[1] vunpckhps %xmm8, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] vunpcklps %xmm11, %xmm4, %xmm8 # xmm8 = xmm4[0],xmm11[0],xmm4[1],xmm11[1] vunpckhps %xmm11, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm11[2],xmm4[3],xmm11[3] vunpcklps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] vunpcklps %xmm8, %xmm10, %xmm4 # xmm4 = xmm10[0],xmm8[0],xmm10[1],xmm8[1] vunpckhps %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3] vunpcklps %xmm24, %xmm20, %xmm10 # xmm10 = xmm20[0],xmm24[0],xmm20[1],xmm24[1] vunpckhps %xmm24, %xmm20, %xmm11 # xmm11 = xmm20[2],xmm24[2],xmm20[3],xmm24[3] vunpcklps %xmm25, %xmm22, %xmm12 # xmm12 = xmm22[0],xmm25[0],xmm22[1],xmm25[1] vunpckhps %xmm25, %xmm22, %xmm15 # xmm15 = xmm22[2],xmm25[2],xmm22[3],xmm25[3] vunpcklps %xmm15, %xmm11, %xmm11 # xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1] vunpcklps %xmm12, %xmm10, %xmm15 # xmm15 = xmm10[0],xmm12[0],xmm10[1],xmm12[1] vunpckhps %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3] vunpcklps %xmm21, %xmm16, %xmm12 # xmm12 = xmm16[0],xmm21[0],xmm16[1],xmm21[1] vunpckhps %xmm21, %xmm16, %xmm16 # xmm16 = xmm16[2],xmm21[2],xmm16[3],xmm21[3] vunpcklps %xmm23, %xmm18, %xmm20 # xmm20 = xmm18[0],xmm23[0],xmm18[1],xmm23[1] vunpckhps %xmm23, %xmm18, %xmm18 # xmm18 = xmm18[2],xmm23[2],xmm18[3],xmm23[3] vunpcklps %xmm18, %xmm16, %xmm16 # xmm16 = xmm16[0],xmm18[0],xmm16[1],xmm18[1] vunpcklps %xmm20, %xmm12, %xmm18 # xmm18 = xmm12[0],xmm20[0],xmm12[1],xmm20[1] vunpckhps %xmm20, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm20[2],xmm12[3],xmm20[3] vunpcklps %xmm17, %xmm13, %xmm20 # xmm20 = xmm13[0],xmm17[0],xmm13[1],xmm17[1] vunpckhps %xmm17, %xmm13, %xmm13 # xmm13 = xmm13[2],xmm17[2],xmm13[3],xmm17[3] vunpcklps %xmm19, %xmm14, %xmm17 # xmm17 = xmm14[0],xmm19[0],xmm14[1],xmm19[1] vunpckhps %xmm19, %xmm14, %xmm14 # xmm14 = xmm14[2],xmm19[2],xmm14[3],xmm19[3] vunpcklps %xmm14, %xmm13, %xmm13 # xmm13 = xmm13[0],xmm14[0],xmm13[1],xmm14[1] vunpcklps %xmm17, %xmm20, %xmm14 # xmm14 = xmm20[0],xmm17[0],xmm20[1],xmm17[1] vunpckhps %xmm17, %xmm20, %xmm17 # xmm17 = xmm20[2],xmm17[2],xmm20[3],xmm17[3] vbroadcastss %xmm0, %xmm19 vmovss 0x2a7e2(%rip), %xmm20 # 0x1eec714 vsubss %xmm0, %xmm20, %xmm0 vbroadcastss %xmm0, %xmm0 vmulps %xmm15, %xmm19, %xmm15 vmulps %xmm10, %xmm19, %xmm10 vmulps %xmm11, %xmm19, %xmm11 vfmadd231ps %xmm7, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm7) + xmm15 vfmadd231ps %xmm1, %xmm0, %xmm10 # xmm10 = (xmm0 * xmm1) + xmm10 vfmadd231ps %xmm5, %xmm0, %xmm11 # xmm11 = (xmm0 * xmm5) + xmm11 vmulps %xmm18, %xmm19, %xmm1 vmulps %xmm12, %xmm19, %xmm5 vmulps %xmm16, %xmm19, %xmm7 vfmadd231ps %xmm6, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm6) + xmm1 vfmadd231ps %xmm9, %xmm0, %xmm5 # xmm5 = (xmm0 * xmm9) + xmm5 vfmadd231ps %xmm3, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm3) + xmm7 vmulps %xmm14, %xmm19, %xmm3 vmulps %xmm17, %xmm19, %xmm6 vmulps %xmm13, %xmm19, %xmm9 vfmadd231ps %xmm4, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm4) + xmm3 vfmadd231ps %xmm8, %xmm0, %xmm6 # xmm6 = (xmm0 * xmm8) + xmm6 vfmadd231ps %xmm2, %xmm0, %xmm9 # xmm9 = (xmm0 * xmm2) + xmm9 vmovaps %xmm26, 0xe0(%rsp) vsubps %xmm1, %xmm15, %xmm8 vsubps %xmm5, %xmm10, %xmm12 vsubps %xmm7, %xmm11, %xmm13 vsubps %xmm15, %xmm3, %xmm7 vsubps %xmm10, %xmm6, %xmm14 vsubps %xmm11, %xmm9, %xmm9 vmulps %xmm9, %xmm12, %xmm0 vfmsub231ps %xmm13, %xmm14, %xmm0 # xmm0 = (xmm14 * xmm13) - xmm0 vmulps %xmm7, %xmm13, %xmm1 vfmsub231ps %xmm8, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm8) - xmm1 vmulps %xmm14, %xmm8, %xmm2 vbroadcastss 0x10(%rsi), %xmm3 vbroadcastss 0x14(%rsi), %xmm16 vbroadcastss 0x18(%rsi), %xmm17 vsubps (%rsi){1to4}, %xmm15, %xmm4 vfmsub231ps %xmm12, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm12) - xmm2 vsubps 0x4(%rsi){1to4}, %xmm10, %xmm5 vsubps 0x8(%rsi){1to4}, %xmm11, %xmm6 vmulps %xmm6, %xmm16, %xmm10 vfmsub231ps %xmm17, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm17) - xmm10 vmulps %xmm4, %xmm17, %xmm11 vfmsub231ps %xmm3, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm3) - xmm11 vmulps %xmm5, %xmm3, %xmm15 vfmsub231ps %xmm16, %xmm4, %xmm15 # xmm15 = (xmm4 * xmm16) - xmm15 vmulps %xmm17, %xmm2, %xmm17 vfmadd231ps %xmm16, %xmm1, %xmm17 # xmm17 = (xmm1 * xmm16) + xmm17 vfmadd231ps %xmm3, %xmm0, %xmm17 # xmm17 = (xmm0 * xmm3) + xmm17 vandps 0x5ee7f(%rip){1to4}, %xmm17, %xmm3 # 0x1f20ec4 vmulps %xmm15, %xmm9, %xmm16 vfmadd231ps %xmm14, %xmm11, %xmm16 # xmm16 = (xmm11 * xmm14) + xmm16 vfmadd231ps %xmm7, %xmm10, %xmm16 # xmm16 = (xmm10 * xmm7) + xmm16 vandpd 0x5ef07(%rip){1to2}, %xmm17, %xmm9 # 0x1f20f68 vxorps %xmm16, %xmm9, %xmm7 vmulps %xmm15, %xmm13, %xmm13 vfmadd231ps %xmm11, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm11) + xmm13 vfmadd231ps %xmm10, %xmm8, %xmm13 # xmm13 = (xmm8 * xmm10) + xmm13 vxorps %xmm13, %xmm9, %xmm8 vxorps %xmm10, %xmm10, %xmm10 vcmpnltps %xmm10, %xmm7, %k1 vcmpnltps %xmm10, %xmm8, %k1 {%k1} vcmpneqps %xmm10, %xmm17, %k1 {%k1} vaddps %xmm7, %xmm8, %xmm10 vcmpleps %xmm3, %xmm10, %k0 {%k1} kortestb %k0, %k0 jne 0x1ec20b9 incq %rax cmpq 0x70(%rsp), %rax jne 0x1ec1cce jmp 0x1ec2560 vmulps %xmm6, %xmm2, %xmm6 vfmadd213ps %xmm6, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm6 vfmadd213ps %xmm5, %xmm0, %xmm4 # xmm4 = (xmm0 * xmm4) + xmm5 vxorps %xmm4, %xmm9, %xmm4 vmulps 0xc(%rsi){1to4}, %xmm3, %xmm5 vmulps 0x20(%rsi){1to4}, %xmm3, %xmm6 vcmpleps %xmm6, %xmm4, %k1 vcmpltps %xmm4, %xmm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ec20a6 movq %rax, %rbx vmovaps %xmm7, 0xf0(%rsp) vmovaps %xmm8, 0x100(%rsp) vmovaps %xmm4, 0x110(%rsp) vmovaps %xmm3, 0x120(%rsp) kmovb %k1, 0x131(%rsp) vmovaps %xmm0, 0x170(%rsp) vmovaps %xmm1, 0x180(%rsp) vmovaps %xmm2, 0x190(%rsp) vrcp14ps %xmm3, %xmm0 vfnmadd213ps 0x2a5c8(%rip){1to4}, %xmm0, %xmm3 # xmm3 = -(xmm0 * xmm3) + mem vfmadd132ps %xmm0, %xmm0, %xmm3 # xmm3 = (xmm3 * xmm0) + xmm0 vmulps 0x110(%rsp), %xmm3, %xmm4 vmovaps %xmm4, 0x160(%rsp) vmulps 0xf0(%rsp), %xmm3, %xmm0 vmovaps %xmm0, 0x140(%rsp) vmulps 0x100(%rsp), %xmm3, %xmm0 vmovaps %xmm0, 0x150(%rsp) kmovd %k1, %r14d vbroadcastss 0x2988c(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm4, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax cmovel %r14d, %eax movzbl %al, %eax tzcntq %rax, %r15 movl 0x1b0(%rsp,%r15,4), %r12d movq 0x28(%rsp), %rax movq 0x1e8(%rax), %rax movq (%rax,%r12,8), %r13 movl 0x24(%rsi), %eax testl %eax, 0x34(%r13) je 0x1ec220f movq 0x8(%rsp), %rax movq 0x10(%rax), %rax cmpq $0x0, 0x10(%rax) jne 0x1ec228b cmpq $0x0, 0x40(%r13) jne 0x1ec228b xorl %eax, %eax jmp 0x1ec222f movl $0x1, %eax shlxl %r15d, %eax, %eax kmovd %eax, %k0 movzbl %r14b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %r14d movb $0x1, %al testb %al, %al je 0x1ec24db testb %r14b, %r14b movq %rbx, %rax je 0x1ec20a6 kmovd %r14d, %k1 vbroadcastss 0x297cf(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm4, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %r14b, %al movzbl %al, %eax movzbl %r14b, %ecx cmovnel %eax, %ecx tzcntl %ecx, %r15d jmp 0x1ec21cb movq %rax, 0x10(%rsp) vmovaps %xmm4, 0x1a0(%rsp) vmovups %ymm30, 0x1c0(%rsp) vmovups %ymm29, 0x1e0(%rsp) vmovups %ymm28, 0x200(%rsp) vmovss 0x140(%rsp,%r15,4), %xmm0 vmovss 0x150(%rsp,%r15,4), %xmm1 movq 0x8(%rsp), %rax movq 0x8(%rax), %rax movl 0xe0(%rsp,%r15,4), %ecx vmovss 0x170(%rsp,%r15,4), %xmm2 vmovss 0x180(%rsp,%r15,4), %xmm3 vmovss 0x190(%rsp,%r15,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movl %r12d, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%rsi), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x160(%rsp,%r15,4), %xmm0 vmovss %xmm0, 0x20(%rsi) movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF leaq 0x24(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%r13), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %rsi, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq 0x40(%r13), %rax testq %rax, %rax je 0x1ec23d0 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec2464 movq 0x10(%rsp), %rax movq 0x10(%rax), %rax testq %rax, %rax je 0x1ec2409 movq 0x10(%rsp), %rcx testb $0x2, (%rcx) jne 0x1ec23ef testb $0x40, 0x3e(%r13) je 0x1ec23fc leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec2464 movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ec2474 movq 0x18(%rsp), %rax vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%rax) movl $0x1, %eax shlxl %r15d, %eax, %eax kmovd %eax, %k0 movzbl %r14b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 movq 0x18(%rsp), %rsi vmovaps 0x1a0(%rsp), %xmm4 vcmpleps 0x20(%rsi){1to4}, %xmm4, %k1 kandb %k1, %k0, %k0 kmovd %k0, %r14d movb $0x1, %al vxorps %xmm27, %xmm27, %xmm27 vmovups 0x200(%rsp), %ymm28 vmovups 0x1e0(%rsp), %ymm29 vmovups 0x1c0(%rsp), %ymm30 vbroadcastss 0x5022e(%rip), %ymm31 # 0x1f12704 jmp 0x1ec222f vmovss 0x140(%rsp,%r15,4), %xmm0 vmovss 0x150(%rsp,%r15,4), %xmm1 vmovss 0x160(%rsp,%r15,4), %xmm2 vmovss %xmm2, 0x20(%rsi) vmovss 0x170(%rsp,%r15,4), %xmm2 vmovss %xmm2, 0x30(%rsi) vmovss 0x180(%rsp,%r15,4), %xmm2 vmovss %xmm2, 0x34(%rsi) vmovss 0x190(%rsp,%r15,4), %xmm2 vmovss %xmm2, 0x38(%rsi) vmovss %xmm0, 0x3c(%rsi) vmovss %xmm1, 0x40(%rsi) movl 0xe0(%rsp,%r15,4), %eax movl %eax, 0x44(%rsi) movl %r12d, 0x48(%rsi) movq 0x8(%rsp), %rax movq 0x8(%rax), %rax movl (%rax), %ecx movl %ecx, 0x4c(%rsi) movl 0x4(%rax), %eax movl %eax, 0x50(%rsi) movq %rbx, %rax jmp 0x1ec20a6 vbroadcastss 0x20(%rsi), %ymm0 movq 0x68(%rsp), %r8 movq 0x58(%rsp), %rdi movq 0x50(%rsp), %r9 movq 0x48(%rsp), %r10 movq 0x40(%rsp), %r11 movq 0x38(%rsp), %rbx movq 0x30(%rsp), %r14 vmovups 0x280(%rsp), %ymm8 vmovups 0x260(%rsp), %ymm9 vmovups 0x240(%rsp), %ymm10 vmovups 0x220(%rsp), %ymm11 vpmovsxbd 0x9b4ba(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x9effd(%rip), %ymm13 # 0x1f615bc leaq 0x2c0(%rsp), %r15 vmovups 0x2a0(%rsp), %ymm15 movq 0x60(%rsp), %r12 jmp 0x1ec17b7
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiMBIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ec25eb xorl %eax, %eax jmp 0x1ec2dd4 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r13 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ec262b vmovss 0x10(%r13), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1ec2636 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r13), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r13), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r13), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x9b3e0(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x9ef23(%rip), %ymm13 # 0x1f615bc vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ec2dbf vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1ec270f movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1ec2894 testb $0x8, %bpl jne 0x1ec281f movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%r13), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm8, %ymm3, %ymm3 vmovups 0x40(%rsp), %ymm8 vmaxps %ymm5, %ymm8, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm8, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps 0x180(%rsp), %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ec29fc kmovb %k0, %edi testb $0x8, %bpl jne 0x1ec29e8 testq %rdi, %rdi je 0x1ec29f2 andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm12, %ymm2 vpternlogd $0xf8, %ymm13, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ec2a15 testl %eax, %eax je 0x1ec272c jmp 0x1ec2cac testb $0x8, %bpl jne 0x1ec281f movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%r13), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm9 vmaxps %ymm3, %ymm9, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmovups 0x40(%rsp), %ymm11 vmaxps %ymm5, %ymm11, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm11, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edx andb %cl, %dl movzbl %dl, %edi movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1ec281f vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %dil, %al movzbl %al, %edi jmp 0x1ec281f movl $0x6, %eax jmp 0x1ec2887 movl $0x4, %eax jmp 0x1ec2887 vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1ec281b vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ec2a79 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ec2887 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ec2b02 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec2887 movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ec2bbe vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1ec2887 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x5e2f2(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x9ea49(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x9ea42(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x9ea35(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x4faf3(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ec2c04 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ec2c6e vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovdqa %ymm9, %ymm7 jmp 0x1ec2bb6 cmpl $0x6, %eax jne 0x1ec270f movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1ec270f movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %rbp addq $0x40, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x28(%rsp) xorl %r12d, %r12d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ec2d34 movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r13, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r12b incq %r14 cmpq $0x4, %r14 jne 0x1ec2cf4 movq 0x28(%rsp), %rax orb %r12b, %al incq %r15 addq $0x50, %rbp cmpq 0x20(%rsp), %r15 jne 0x1ec2ce9 testb $0x1, %al movq 0x18(%rsp), %r8 vmovaps 0x30(%rsp), %xmm10 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x9ad08(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x9e84b(%rip), %ymm13 # 0x1f615bc vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1ec270f vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ec2da9 vmovss 0x10(%r13), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ec2db4 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ec2699 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, true, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMvMBIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ec2e23 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2638, %rsp # imm = 0x2638 movq 0x70(%rax), %rax movq %rax, 0x2f0(%rsp) movl $0x0, 0x2f8(%rsp) cmpq $0x8, %rax jne 0x1ec2e27 addq $0x2638, %rsp # imm = 0x2638 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x5e080(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vcmpltps 0x2e199(%rip){1to4}, %xmm4, %k1 # 0x1ef0fe8 vbroadcastss 0x298bb(%rip), %xmm26 # 0x1eec714 vdivps %xmm0, %xmm26, %xmm0 vbroadcastss 0x5e0f7(%rip), %xmm0 {%k1} # 0x1f20f60 vmulps 0x5d09d(%rip){1to4}, %xmm0, %xmm4 # 0x1f1ff10 vmulps 0x5d097(%rip){1to4}, %xmm0, %xmm0 # 0x1f1ff14 vbroadcastss (%rsi), %ymm27 vbroadcastss 0x4(%rsi), %ymm28 leaq 0x300(%rsp), %r8 vbroadcastss 0x8(%rsi), %ymm29 xorl %r9d, %r9d vucomiss %xmm1, %xmm4 setb %r9b vbroadcastss %xmm4, %ymm30 vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3] vbroadcastsd %xmm5, %ymm31 vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0] vbroadcastss 0x5e01a(%rip), %ymm7 # 0x1f20edc vpermps %ymm4, %ymm7, %ymm8 vbroadcastss %xmm0, %ymm9 vbroadcastss 0x4f82f(%rip), %ymm4 # 0x1f12704 vpermps %ymm0, %ymm4, %ymm10 vpermps %ymm0, %ymm7, %ymm11 shll $0x5, %r9d xorl %r10d, %r10d vucomiss %xmm1, %xmm5 setb %r10b shll $0x5, %r10d orq $0x40, %r10 xorl %r11d, %r11d vucomiss %xmm1, %xmm6 setb %r11b shll $0x5, %r11d orq $0x80, %r11 movq %r9, %rdi xorq $0x20, %rdi movq %r10, %rbx xorq $0x20, %rbx movq %r11, %r14 xorq $0x20, %r14 vbroadcastss %xmm2, %ymm12 vbroadcastss %xmm3, %ymm0 vpmovsxbd 0x9ab3c(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0x9e67f(%rip), %ymm14 # 0x1f615bc leaq 0x2f0(%rsp), %r15 movq %rdx, 0x38(%rsp) movq %rsi, 0x30(%rsp) vmovups %ymm27, 0x170(%rsp) vmovups %ymm28, 0x150(%rsp) vmovups %ymm29, 0x130(%rsp) movq %r9, 0x28(%rsp) vmovups %ymm30, 0x110(%rsp) vmovups %ymm31, 0xf0(%rsp) movq %r10, 0x20(%rsp) movq %r11, 0x18(%rsp) movq %rdi, 0x8(%rsp) vmovups %ymm8, 0x2b0(%rsp) vmovups %ymm9, 0x290(%rsp) vmovups %ymm10, 0x270(%rsp) vmovups %ymm11, 0x250(%rsp) movq %rbx, 0x60(%rsp) movq %r14, 0x58(%rsp) vmovups %ymm12, 0x230(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r15, %r8 je 0x1ec2e12 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1ec2fd6 movq (%r8), %r13 testb $0x8, %r13b jne 0x1ec30e1 movq %r13, %rax andq $-0x10, %rax vbroadcastss 0x1c(%rsi), %ymm1 vmovaps 0x100(%rax,%r9), %ymm2 vfmadd213ps 0x40(%rax,%r9), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vsubps %ymm27, %ymm2, %ymm2 vmulps %ymm2, %ymm30, %ymm2 vmovaps 0x100(%rax,%r10), %ymm3 vmaxps %ymm2, %ymm12, %ymm2 vfmadd213ps 0x40(%rax,%r10), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vsubps %ymm28, %ymm3, %ymm3 vmovaps 0x100(%rax,%r11), %ymm4 vmulps %ymm3, %ymm31, %ymm3 vfmadd213ps 0x40(%rax,%r11), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vsubps %ymm29, %ymm4, %ymm4 vmulps %ymm4, %ymm8, %ymm4 vmaxps %ymm4, %ymm3, %ymm3 vmaxps %ymm3, %ymm2, %ymm16 vmovaps 0x100(%rax,%rdi), %ymm2 vfmadd213ps 0x40(%rax,%rdi), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vsubps %ymm27, %ymm2, %ymm2 vmulps %ymm2, %ymm9, %ymm2 vmovaps 0x100(%rax,%rbx), %ymm3 vfmadd213ps 0x40(%rax,%rbx), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vsubps %ymm28, %ymm3, %ymm3 vmulps %ymm3, %ymm10, %ymm3 vmovaps 0x100(%rax,%r14), %ymm4 vfmadd213ps 0x40(%rax,%r14), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vsubps %ymm29, %ymm4, %ymm4 vmulps %ymm4, %ymm11, %ymm4 vminps %ymm4, %ymm3, %ymm3 vminps %ymm2, %ymm0, %ymm2 vminps %ymm3, %ymm2, %ymm2 movl %r13d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ec315c vcmpleps %ymm2, %ymm16, %k0 kmovb %k0, %r12d testb $0x8, %r13b jne 0x1ec3155 testq %r12, %r12 je 0x1ec3178 andq $-0x10, %r13 vmovdqu (%r13), %ymm1 vmovdqu 0x20(%r13), %ymm2 vmovdqa %ymm13, %ymm3 vpternlogd $0xf8, %ymm14, %ymm16, %ymm3 kmovd %r12d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %r13 prefetcht0 (%r13) prefetcht0 0x40(%r13) prefetcht0 0x80(%r13) prefetcht0 0xc0(%r13) xorl %eax, %eax blsrq %r12, %rcx jne 0x1ec317f testl %eax, %eax je 0x1ec2ff2 jmp 0x1ec354e movl $0x6, %eax jmp 0x1ec3148 vcmpleps %ymm2, %ymm16, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1ec30dd movl $0x4, %eax jmp 0x1ec3148 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ec31e9 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %r13 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm16, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 movq 0x8(%rsp), %rdi jmp 0x1ec3148 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ec3274 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %r13 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm16, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm16, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec31df vmovdqa64 %ymm16, %ymm17 vmovdqa %ymm14, %ymm15 vmovdqa %ymm13, %ymm14 vmovaps %ymm12, %ymm13 vmovaps %ymm11, %ymm12 vmovaps %ymm10, %ymm11 vmovaps %ymm9, %ymm10 vmovaps %ymm8, %ymm9 vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ec33da vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %r13 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa64 %ymm17, %ymm16 vpermd %ymm17, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm17, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm17, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq 0x38(%rsp), %rdx movq 0x30(%rsp), %rsi vbroadcastss 0x293ad(%rip), %xmm26 # 0x1eec714 vmovups 0x170(%rsp), %ymm27 vmovups 0x150(%rsp), %ymm28 vmovups 0x130(%rsp), %ymm29 movq 0x28(%rsp), %r9 vmovups 0x110(%rsp), %ymm30 vmovups 0xf0(%rsp), %ymm31 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 movq 0x20(%rsp), %r10 movq 0x18(%rsp), %r11 movq 0x8(%rsp), %rdi vmovaps %ymm13, %ymm12 vmovdqa %ymm14, %ymm13 vmovdqa %ymm15, %ymm14 jmp 0x1ec3148 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x5dad6(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x9e22c(%rip), %ymm16 # 0x1f61620 vpermt2d %ymm8, %ymm16, %ymm3 vpmovsxbd 0x9e225(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x9e218(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x4f2d6(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ec3421 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm17, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ec348b vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %r13 movq 0x38(%rsp), %rdx movq 0x30(%rsp), %rsi vbroadcastss 0x2923f(%rip), %xmm26 # 0x1eec714 vmovups 0x170(%rsp), %ymm27 vmovups 0x150(%rsp), %ymm28 vmovups 0x130(%rsp), %ymm29 movq 0x28(%rsp), %r9 vmovups 0x110(%rsp), %ymm30 vmovups 0xf0(%rsp), %ymm31 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 movq 0x20(%rsp), %r10 movq 0x18(%rsp), %r11 movq 0x8(%rsp), %rdi vmovaps %ymm13, %ymm12 vmovdqa %ymm14, %ymm13 vmovdqa %ymm15, %ymm14 vmovdqa64 %ymm17, %ymm16 jmp 0x1ec3148 cmpl $0x6, %eax jne 0x1ec2fd1 movq %r12, 0x68(%rsp) vmovdqu64 %ymm16, 0x2d0(%rsp) movl %r13d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x70(%rsp) je 0x1ec3e2b andq $-0x10, %r13 xorl %eax, %eax movq %r13, 0x48(%rsp) movq %rax, 0x78(%rsp) leaq (%rax,%rax,4), %rax shlq $0x6, %rax vbroadcastss 0x1c(%rsi), %xmm0 vmovaps 0x90(%r13,%rax), %xmm1 vmovaps 0xa0(%r13,%rax), %xmm2 vmovaps 0xb0(%r13,%rax), %xmm3 vmovaps 0xc0(%r13,%rax), %xmm8 vfmadd213ps (%r13,%rax), %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + mem vfmadd213ps 0x10(%r13,%rax), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vfmadd213ps 0x20(%r13,%rax), %xmm0, %xmm3 # xmm3 = (xmm0 * xmm3) + mem vmovaps 0xd0(%r13,%rax), %xmm11 vmovaps 0xe0(%r13,%rax), %xmm12 vfmadd213ps 0x30(%r13,%rax), %xmm0, %xmm8 # xmm8 = (xmm0 * xmm8) + mem vfmadd213ps 0x40(%r13,%rax), %xmm0, %xmm11 # xmm11 = (xmm0 * xmm11) + mem vfmadd213ps 0x50(%r13,%rax), %xmm0, %xmm12 # xmm12 = (xmm0 * xmm12) + mem vmovaps 0xf0(%r13,%rax), %xmm13 vmovaps 0x100(%r13,%rax), %xmm14 vmovaps 0x110(%r13,%rax), %xmm15 vfmadd213ps 0x60(%r13,%rax), %xmm0, %xmm13 # xmm13 = (xmm0 * xmm13) + mem vfmadd213ps 0x70(%r13,%rax), %xmm0, %xmm14 # xmm14 = (xmm0 * xmm14) + mem movq %rax, 0x10(%rsp) vfmadd213ps 0x80(%r13,%rax), %xmm0, %xmm15 # xmm15 = (xmm0 * xmm15) + mem vbroadcastss (%rsi), %xmm0 vbroadcastss 0x4(%rsi), %xmm16 vbroadcastss 0x8(%rsi), %xmm17 vbroadcastss 0x10(%rsi), %xmm5 vbroadcastss 0x14(%rsi), %xmm7 leaq 0x7(%rsp), %rax movq %rax, 0x1c0(%rsp) vbroadcastss 0x18(%rsi), %xmm10 vsubps %xmm0, %xmm1, %xmm4 vsubps %xmm16, %xmm2, %xmm6 vsubps %xmm17, %xmm3, %xmm9 vsubps %xmm0, %xmm8, %xmm19 vsubps %xmm16, %xmm11, %xmm20 vsubps %xmm17, %xmm12, %xmm21 vsubps %xmm0, %xmm13, %xmm22 vsubps %xmm16, %xmm14, %xmm14 vsubps %xmm17, %xmm15, %xmm16 vsubps %xmm4, %xmm22, %xmm11 vsubps %xmm6, %xmm14, %xmm13 vsubps %xmm9, %xmm16, %xmm12 vsubps %xmm19, %xmm4, %xmm15 vsubps %xmm20, %xmm6, %xmm18 vsubps %xmm21, %xmm9, %xmm17 vsubps %xmm22, %xmm19, %xmm0 vsubps %xmm14, %xmm20, %xmm1 vsubps %xmm16, %xmm21, %xmm2 vaddps %xmm4, %xmm22, %xmm3 vaddps %xmm6, %xmm14, %xmm8 vaddps %xmm9, %xmm16, %xmm23 vmulps %xmm12, %xmm8, %xmm24 vfmsub231ps %xmm23, %xmm13, %xmm24 # xmm24 = (xmm13 * xmm23) - xmm24 vmulps %xmm11, %xmm23, %xmm23 vfmsub231ps %xmm3, %xmm12, %xmm23 # xmm23 = (xmm12 * xmm3) - xmm23 vmulps %xmm3, %xmm13, %xmm3 vfmsub231ps %xmm8, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm8) - xmm3 vmulps %xmm3, %xmm10, %xmm3 vfmadd231ps %xmm23, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm23) + xmm3 vfmadd231ps %xmm24, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm24) + xmm3 vaddps %xmm19, %xmm4, %xmm8 vaddps %xmm20, %xmm6, %xmm23 vaddps %xmm21, %xmm9, %xmm24 vmulps %xmm17, %xmm23, %xmm25 vfmsub231ps %xmm24, %xmm18, %xmm25 # xmm25 = (xmm18 * xmm24) - xmm25 vmulps %xmm15, %xmm24, %xmm24 vfmsub231ps %xmm8, %xmm17, %xmm24 # xmm24 = (xmm17 * xmm8) - xmm24 vmulps %xmm18, %xmm8, %xmm8 vfmsub231ps %xmm23, %xmm15, %xmm8 # xmm8 = (xmm15 * xmm23) - xmm8 vmulps %xmm8, %xmm10, %xmm8 vfmadd231ps %xmm24, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm24) + xmm8 vfmadd231ps %xmm25, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm25) + xmm8 vbroadcastss 0x282b9(%rip), %xmm25 # 0x1eeba20 vaddps %xmm22, %xmm19, %xmm19 vaddps %xmm14, %xmm20, %xmm14 vaddps %xmm16, %xmm21, %xmm16 vmulps %xmm2, %xmm14, %xmm20 vfmsub231ps %xmm16, %xmm1, %xmm20 # xmm20 = (xmm1 * xmm16) - xmm20 vmulps %xmm0, %xmm16, %xmm16 vfmsub231ps %xmm19, %xmm2, %xmm16 # xmm16 = (xmm2 * xmm19) - xmm16 vmulps %xmm1, %xmm19, %xmm19 vfmsub231ps %xmm14, %xmm0, %xmm19 # xmm19 = (xmm0 * xmm14) - xmm19 vmulps %xmm19, %xmm10, %xmm19 vfmadd231ps %xmm16, %xmm7, %xmm19 # xmm19 = (xmm7 * xmm16) + xmm19 vfmadd231ps %xmm20, %xmm5, %xmm19 # xmm19 = (xmm5 * xmm20) + xmm19 vaddps %xmm3, %xmm8, %xmm14 vaddps %xmm14, %xmm19, %xmm14 vandps 0x5d701(%rip){1to4}, %xmm14, %xmm16 # 0x1f20ec4 vmulps 0x5d6ff(%rip){1to4}, %xmm16, %xmm20 # 0x1f20ecc vminps %xmm8, %xmm3, %xmm21 vminps %xmm19, %xmm21, %xmm21 vbroadcastss 0x5d6dd(%rip), %xmm22 # 0x1f20ec0 vxorps %xmm22, %xmm20, %xmm22 vcmpnltps %xmm22, %xmm21, %k0 vmaxps %xmm8, %xmm3, %xmm21 vmaxps %xmm19, %xmm21, %xmm19 vcmpleps %xmm20, %xmm19, %k1 korw %k1, %k0, %k0 kshiftlb $0x4, %k0, %k0 kshiftrb $0x4, %k0, %k0 kortestb %k0, %k0 je 0x1ec3e13 vmulps %xmm18, %xmm12, %xmm19 vmulps %xmm17, %xmm11, %xmm20 vmulps %xmm15, %xmm13, %xmm21 vmulps %xmm1, %xmm17, %xmm22 vmulps %xmm2, %xmm15, %xmm23 vmulps %xmm0, %xmm18, %xmm24 vfmsub213ps %xmm19, %xmm17, %xmm13 # xmm13 = (xmm17 * xmm13) - xmm19 vfmsub213ps %xmm20, %xmm15, %xmm12 # xmm12 = (xmm15 * xmm12) - xmm20 vfmsub213ps %xmm21, %xmm18, %xmm11 # xmm11 = (xmm18 * xmm11) - xmm21 vfmsub213ps %xmm22, %xmm18, %xmm2 # xmm2 = (xmm18 * xmm2) - xmm22 vfmsub213ps %xmm23, %xmm17, %xmm0 # xmm0 = (xmm17 * xmm0) - xmm23 vfmsub213ps %xmm24, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm1) - xmm24 vbroadcastss 0x5d655(%rip), %xmm18 # 0x1f20ec4 vandps %xmm18, %xmm19, %xmm15 vandps %xmm18, %xmm22, %xmm17 vcmpltps %xmm17, %xmm15, %k1 vandps %xmm18, %xmm20, %xmm15 vandps %xmm18, %xmm23, %xmm17 vcmpltps %xmm17, %xmm15, %k2 vandps %xmm18, %xmm21, %xmm15 vandps %xmm18, %xmm24, %xmm17 vcmpltps %xmm17, %xmm15, %k3 vmovaps %xmm13, %xmm2 {%k1} vmovaps %xmm12, %xmm0 {%k2} vmovaps %xmm11, %xmm1 {%k3} vmulps %xmm1, %xmm10, %xmm10 vfmadd213ps %xmm10, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm7) + xmm10 vfmadd213ps %xmm7, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm5) + xmm7 vaddps %xmm5, %xmm5, %xmm5 vmulps %xmm1, %xmm9, %xmm7 vfmadd213ps %xmm7, %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + xmm7 vfmadd213ps %xmm6, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm4) + xmm6 vaddps %xmm4, %xmm4, %xmm4 vrcp14ps %xmm5, %xmm6 vmovaps %xmm6, %xmm7 vfnmadd213ps %xmm26, %xmm5, %xmm7 # xmm7 = -(xmm5 * xmm7) + xmm26 vfmadd132ps %xmm6, %xmm6, %xmm7 # xmm7 = (xmm7 * xmm6) + xmm6 vmulps %xmm7, %xmm4, %xmm6 vcmpgeps 0xc(%rsi){1to4}, %xmm6, %k1 vbroadcastss 0x5d5b8(%rip), %xmm4 # 0x1f20ec0 vxorps %xmm4, %xmm5, %xmm4 vcmpleps 0x20(%rsi){1to4}, %xmm6, %k1 {%k1} vcmpneqps %xmm4, %xmm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ec3e13 vmovaps %xmm3, 0x190(%rsp) vmovaps %xmm8, 0x1a0(%rsp) vmovaps %xmm14, 0x1b0(%rsp) movq 0x10(%rsp), %rax addq 0x48(%rsp), %rax movq %rax, 0x10(%rsp) leaq 0x7(%rsp), %rax movq %rax, 0x1c0(%rsp) kmovb %k1, 0x1c8(%rsp) vmovaps %xmm6, 0x1f0(%rsp) vmovaps %xmm2, 0x200(%rsp) vmovaps %xmm0, 0x210(%rsp) vmovaps %xmm1, 0x220(%rsp) vcmpnltps 0x2d650(%rip){1to4}, %xmm16, %k2 # 0x1ef0fe8 vrcp14ps %xmm14, %xmm0 vfnmadd213ps %xmm26, %xmm0, %xmm14 # xmm14 = -(xmm0 * xmm14) + xmm26 vfmadd132ps %xmm0, %xmm0, %xmm14 {%k2} {z} # xmm14 {%k2} {z} = (xmm14 * xmm0) + xmm0 vmulps %xmm3, %xmm14, %xmm0 vminps %xmm26, %xmm0, %xmm0 vmovaps %xmm0, 0x1d0(%rsp) vmulps %xmm8, %xmm14, %xmm0 vminps %xmm26, %xmm0, %xmm0 vmovaps %xmm0, 0x1e0(%rsp) movq (%rdx), %r13 kmovd %k1, %r15d vblendmps %xmm6, %xmm25, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax cmovel %r15d, %eax movzbl %al, %eax tzcntq %rax, %rbx movq 0x10(%rsp), %rax movl 0x120(%rax,%rbx,4), %r14d movq 0x1e8(%r13), %rax movq (%rax,%r14,8), %rbp movl 0x24(%rsi), %eax testl %eax, 0x34(%rbp) je 0x1ec3a46 movq 0x10(%rdx), %r12 cmpq $0x0, 0x10(%r12) jne 0x1ec3ab5 cmpq $0x0, 0x40(%rbp) jne 0x1ec3ab5 xorl %eax, %eax jmp 0x1ec3a66 movl $0x1, %eax shlxl %ebx, %eax, %eax kmovd %eax, %k0 movzbl %r15b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %r15d movb $0x1, %al testb %al, %al je 0x1ec3d9d testb %r15b, %r15b je 0x1ec3e13 kmovd %r15d, %k1 vblendmps %xmm6, %xmm25, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %r15b, %al movzbl %al, %eax movzbl %r15b, %ecx cmovnel %eax, %ecx tzcntl %ecx, %ebx jmp 0x1ec3a0f vmovss 0x1d0(%rsp,%rbx,4), %xmm0 vmovss 0x1e0(%rsp,%rbx,4), %xmm1 movq 0x8(%rdx), %rax movq 0x10(%rsp), %rcx movl 0x130(%rcx,%rbx,4), %ecx vmovss 0x200(%rsp,%rbx,4), %xmm2 vmovss 0x210(%rsp,%rbx,4), %xmm3 vmovss 0x220(%rsp,%rbx,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movl %r14d, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%rsi), %xmm0 vmovss %xmm0, 0x40(%rsp) vmovss 0x1f0(%rsp,%rbx,4), %xmm0 vmovss %xmm0, 0x20(%rsi) movl $0xffffffff, 0x44(%rsp) # imm = 0xFFFFFFFF leaq 0x44(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%rbp), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %rsi, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq 0x40(%rbp), %rax testq %rax, %rax vmovaps %xmm6, 0xe0(%rsp) je 0x1ec3c56 leaq 0x80(%rsp), %rdi movq %r8, 0x50(%rsp) vzeroupper callq *%rax vmovaps 0xe0(%rsp), %xmm6 vbroadcastss 0x27e3f(%rip), %xmm25 # 0x1eeba20 movq 0x8(%rsp), %rdi movq 0x18(%rsp), %r11 movq 0x20(%rsp), %r10 vmovups 0xf0(%rsp), %ymm31 vmovups 0x110(%rsp), %ymm30 movq 0x28(%rsp), %r9 vmovups 0x130(%rsp), %ymm29 movq 0x50(%rsp), %r8 vmovups 0x150(%rsp), %ymm28 vmovups 0x170(%rsp), %ymm27 vbroadcastss 0x28ad9(%rip), %xmm26 # 0x1eec714 movq 0x30(%rsp), %rsi movq 0x38(%rsp), %rdx movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec3d67 movq 0x10(%r12), %rax testq %rax, %rax je 0x1ec3d07 testb $0x2, (%r12) jne 0x1ec3c75 testb $0x40, 0x3e(%rbp) je 0x1ec3cfa leaq 0x80(%rsp), %rdi movq %r8, %r12 vzeroupper callq *%rax vmovaps 0xe0(%rsp), %xmm6 vbroadcastss 0x27d88(%rip), %xmm25 # 0x1eeba20 movq 0x8(%rsp), %rdi movq 0x18(%rsp), %r11 movq 0x20(%rsp), %r10 vmovups 0xf0(%rsp), %ymm31 vmovups 0x110(%rsp), %ymm30 movq 0x28(%rsp), %r9 vmovups 0x130(%rsp), %ymm29 movq %r12, %r8 vmovups 0x150(%rsp), %ymm28 vmovups 0x170(%rsp), %ymm27 vbroadcastss 0x28a24(%rip), %xmm26 # 0x1eec714 movq 0x30(%rsp), %rsi movq 0x38(%rsp), %rdx movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec3d67 movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edi movl %edi, 0x44(%rax) movl 0x18(%rcx), %edi movl %edi, 0x48(%rax) movl 0x1c(%rcx), %edi movl %edi, 0x4c(%rax) movq 0x8(%rsp), %rdi movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ec3d72 vmovss 0x40(%rsp), %xmm0 vmovss %xmm0, 0x20(%rsi) movl $0x1, %eax shlxl %ebx, %eax, %eax kmovd %eax, %k0 movzbl %r15b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 vcmpleps 0x20(%rsi){1to4}, %xmm6, %k1 kandb %k1, %k0, %k0 jmp 0x1ec3a60 vmovss 0x1d0(%rsp,%rbx,4), %xmm0 vmovss 0x1e0(%rsp,%rbx,4), %xmm1 vmovss 0x1f0(%rsp,%rbx,4), %xmm2 vmovss %xmm2, 0x20(%rsi) vmovss 0x200(%rsp,%rbx,4), %xmm2 vmovss %xmm2, 0x30(%rsi) vmovss 0x210(%rsp,%rbx,4), %xmm2 vmovss %xmm2, 0x34(%rsi) vmovss 0x220(%rsp,%rbx,4), %xmm2 vmovss %xmm2, 0x38(%rsi) vmovss %xmm0, 0x3c(%rsi) vmovss %xmm1, 0x40(%rsi) movq 0x10(%rsp), %rax movl 0x130(%rax,%rbx,4), %eax movl %eax, 0x44(%rsi) movl %r14d, 0x48(%rsi) movq 0x8(%rdx), %rax movl (%rax), %ecx movl %ecx, 0x4c(%rsi) movl 0x4(%rax), %eax movl %eax, 0x50(%rsi) movq 0x78(%rsp), %rax incq %rax cmpq 0x70(%rsp), %rax movq 0x48(%rsp), %r13 jne 0x1ec3587 vbroadcastss 0x20(%rsi), %ymm0 vmovups 0x2b0(%rsp), %ymm8 vmovups 0x290(%rsp), %ymm9 vmovups 0x270(%rsp), %ymm10 vmovups 0x250(%rsp), %ymm11 movq 0x60(%rsp), %rbx movq 0x58(%rsp), %r14 vmovups 0x230(%rsp), %ymm12 vpmovsxbd 0x99bff(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0x9d742(%rip), %ymm14 # 0x1f615bc leaq 0x2f0(%rsp), %r15 vmovdqu64 0x2d0(%rsp), %ymm16 movq 0x68(%rsp), %r12 jmp 0x1ec2fd1 nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, true, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMvMBIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ec3ea9 xorl %eax, %eax jmp 0x1ec46a8 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r15 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ec3ee9 vmovss 0x10(%r15), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1ec3ef4 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 vbroadcastss (%r15), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r15), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r15), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 leaq 0x1b0(%rsp), %r8 movl $0x0, 0xc(%rsp) vpmovsxbd 0x99b2b(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x9d66e(%rip), %ymm13 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ec4693 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1ec3fcc movq (%r8), %r12 cmpl $0x1, 0x18(%rbx) jne 0x1ec415c testb $0x8, %r12b jne 0x1ec40dd movq %r12, %rax andq $-0x10, %rax vbroadcastss 0xc(%r15), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm8, %ymm3, %ymm3 vmovups 0x40(%rsp), %ymm8 vmaxps %ymm5, %ymm8, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm8, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps 0x180(%rsp), %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %r12d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ec42c9 kmovb %k0, %edi testb $0x8, %r12b jne 0x1ec42b5 testq %rdi, %rdi je 0x1ec42bf andq $-0x10, %r12 vmovdqu (%r12), %ymm0 vmovdqu 0x20(%r12), %ymm1 vmovdqa %ymm12, %ymm2 vpternlogd $0xf8, %ymm13, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ec42e2 testl %eax, %eax je 0x1ec3fe9 jmp 0x1ec4579 testb $0x8, %r12b jne 0x1ec40dd movq %r12, %rax andq $-0x10, %rax vbroadcastss 0xc(%r15), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm9 vmaxps %ymm3, %ymm9, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmovups 0x40(%rsp), %ymm11 vmaxps %ymm5, %ymm11, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm11, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edx andb %cl, %dl movzbl %dl, %edi movl %r12d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1ec40dd vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %dil, %al movzbl %al, %edi jmp 0x1ec40dd movl $0x6, %eax jmp 0x1ec414f movl $0x4, %eax jmp 0x1ec414f vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1ec40d9 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ec4346 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r12 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ec414f vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ec43cf vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec414f movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ec448b vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1ec414f valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x5ca25(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x9d17c(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x9d175(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x9d168(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x4e226(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ec44d1 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ec453b vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r12 vmovdqa %ymm9, %ymm7 jmp 0x1ec4483 cmpl $0x6, %eax jne 0x1ec3fcc movl %r12d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1ec3fcc movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %r12 addq $0x130, %r12 # imm = 0x130 xorl %ebp, %ebp xorl %eax, %eax movq %rax, 0x28(%rsp) movq $-0x4, %r14 xorl %r13d, %r13d movl (%r12,%r14,4), %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmpq %rcx, %rax je 0x1ec4605 movq (%rbx), %rcx movq 0x1e8(%rcx), %rcx movq (%rcx,%rax,8), %rdi movl %eax, 0x44(%rbx) movl 0x10(%r12,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r15, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b incq %r14 jne 0x1ec45c8 movq 0x28(%rsp), %rax orb %r13b, %al incq %rbp addq $0x140, %r12 # imm = 0x140 cmpq 0x20(%rsp), %rbp jne 0x1ec45b9 testb $0x1, %al vmovaps 0x30(%rsp), %xmm10 movq 0x18(%rsp), %r8 vpmovsxbd 0x9943c(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x9cf7f(%rip), %ymm13 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1ec3fcc vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ec467d vmovss 0x10(%r15), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ec4688 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ec3f56 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, true, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiMBIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ec46f7 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2658, %rsp # imm = 0x2658 movq 0x70(%rax), %rax movq %rax, 0x310(%rsp) movl $0x0, 0x318(%rsp) cmpq $0x8, %rax jne 0x1ec46fb addq $0x2658, %rsp # imm = 0x2658 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 vxorps %xmm27, %xmm27, %xmm27 vmaxss 0xc(%rsi), %xmm27, %xmm1 vmaxss 0x20(%rsi), %xmm27, %xmm2 vandps 0x5c7a6(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec4 vcmpltps 0x2c8bf(%rip){1to4}, %xmm3, %k1 # 0x1ef0fe8 vbroadcastss 0x27fe1(%rip), %xmm28 # 0x1eec714 vdivps %xmm0, %xmm28, %xmm0 vbroadcastss 0x5c81d(%rip), %xmm0 {%k1} # 0x1f20f60 vmulps 0x5b7c3(%rip){1to4}, %xmm0, %xmm3 # 0x1f1ff10 vmulps 0x5b7bd(%rip){1to4}, %xmm0, %xmm0 # 0x1f1ff14 vbroadcastss (%rsi), %ymm29 leaq 0x320(%rsp), %r8 vbroadcastss 0x4(%rsi), %ymm30 vbroadcastss 0x8(%rsi), %ymm31 xorl %r9d, %r9d vucomiss %xmm27, %xmm3 setb %r9b vbroadcastss %xmm3, %ymm8 vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3] vbroadcastsd %xmm4, %ymm9 vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0] vbroadcastss 0x5c740(%rip), %ymm6 # 0x1f20edc vpermps %ymm3, %ymm6, %ymm10 vbroadcastss %xmm0, %ymm11 vbroadcastss 0x4df55(%rip), %ymm3 # 0x1f12704 vpermps %ymm0, %ymm3, %ymm12 vpermps %ymm0, %ymm6, %ymm13 shll $0x5, %r9d xorl %r10d, %r10d vucomiss %xmm27, %xmm4 setb %r10b shll $0x5, %r10d orq $0x40, %r10 xorl %r11d, %r11d vucomiss %xmm27, %xmm5 setb %r11b shll $0x5, %r11d orq $0x80, %r11 movq %r9, %rbx xorq $0x20, %rbx movq %r10, %r14 xorq $0x20, %r14 movq %r11, %r15 xorq $0x20, %r15 vbroadcastss %xmm1, %ymm14 vbroadcastss %xmm2, %ymm0 vpmovsxbd 0x9925e(%rip), %ymm15 # 0x1f5da70 vpbroadcastd 0x9cda0(%rip), %ymm16 # 0x1f615bc leaq 0x310(%rsp), %r12 movq %rdx, 0x10(%rsp) movq %rsi, 0x8(%rsp) vmovups %ymm29, 0x140(%rsp) vmovups %ymm30, 0x120(%rsp) vmovups %ymm31, 0x100(%rsp) movq %r9, 0x68(%rsp) vmovups %ymm8, 0x2d0(%rsp) vmovups %ymm9, 0x2b0(%rsp) vmovups %ymm10, 0x290(%rsp) vmovups %ymm11, 0x270(%rsp) vmovups %ymm12, 0x250(%rsp) vmovups %ymm13, 0x230(%rsp) movq %r10, 0x60(%rsp) movq %r11, 0x58(%rsp) movq %rbx, 0x50(%rsp) movq %r14, 0x48(%rsp) movq %r15, 0x40(%rsp) vmovups %ymm14, 0x210(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r12, %r8 je 0x1ec46e6 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1ec48a8 movq (%r8), %rdi testb $0x8, %dil jne 0x1ec49ab movq %rdi, %rax andq $-0x10, %rax vbroadcastss 0x1c(%rsi), %ymm1 vmovaps 0x100(%rax,%r9), %ymm2 vfmadd213ps 0x40(%rax,%r9), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vsubps %ymm29, %ymm2, %ymm2 vmulps %ymm2, %ymm8, %ymm2 vmovaps 0x100(%rax,%r10), %ymm3 vmaxps %ymm2, %ymm14, %ymm2 vfmadd213ps 0x40(%rax,%r10), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vsubps %ymm30, %ymm3, %ymm3 vmovaps 0x100(%rax,%r11), %ymm4 vmulps %ymm3, %ymm9, %ymm3 vfmadd213ps 0x40(%rax,%r11), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vsubps %ymm31, %ymm4, %ymm4 vmulps %ymm4, %ymm10, %ymm4 vmaxps %ymm4, %ymm3, %ymm3 vmaxps %ymm3, %ymm2, %ymm18 vmovaps 0x100(%rax,%rbx), %ymm2 vfmadd213ps 0x40(%rax,%rbx), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vsubps %ymm29, %ymm2, %ymm2 vmulps %ymm2, %ymm11, %ymm2 vmovaps 0x100(%rax,%r14), %ymm3 vfmadd213ps 0x40(%rax,%r14), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vsubps %ymm30, %ymm3, %ymm3 vmulps %ymm3, %ymm12, %ymm3 vmovaps 0x100(%rax,%r15), %ymm4 vfmadd213ps 0x40(%rax,%r15), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vsubps %ymm31, %ymm4, %ymm4 vmulps %ymm4, %ymm13, %ymm4 vminps %ymm4, %ymm3, %ymm3 vminps %ymm2, %ymm0, %ymm2 vminps %ymm3, %ymm2, %ymm2 movl %edi, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ec4a1e vcmpleps %ymm2, %ymm18, %k0 kmovb %k0, %r13d testb $0x8, %dil jne 0x1ec4a17 testq %r13, %r13 je 0x1ec4a3a andq $-0x10, %rdi vmovdqu (%rdi), %ymm1 vmovdqu 0x20(%rdi), %ymm2 vmovdqa %ymm15, %ymm3 vpternlogd $0xf8, %ymm16, %ymm18, %ymm3 kmovd %r13d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) xorl %eax, %eax blsrq %r13, %rcx jne 0x1ec4a41 testl %eax, %eax je 0x1ec48c4 jmp 0x1ec4dd5 movl $0x6, %eax jmp 0x1ec4a0a vcmpleps %ymm2, %ymm18, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1ec49a7 movl $0x4, %eax jmp 0x1ec4a0a vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ec4aa6 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rdi vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm18, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 jmp 0x1ec4a0a vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ec4b31 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rdi vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm18, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm18, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec4a0a vmovdqa64 %ymm18, %ymm19 vmovdqa64 %ymm16, %ymm17 vmovdqa64 %ymm15, %ymm16 vmovaps %ymm14, %ymm15 vmovaps %ymm13, %ymm14 vmovaps %ymm12, %ymm13 vmovaps %ymm11, %ymm12 vmovaps %ymm10, %ymm11 vmovaps %ymm9, %ymm10 vmovaps %ymm8, %ymm9 vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ec4c82 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rdi vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa64 %ymm19, %ymm18 vpermd %ymm19, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm19, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm19, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rsi vxorps %xmm27, %xmm27, %xmm27 vbroadcastss 0x27ade(%rip), %xmm28 # 0x1eec714 vmovups 0x140(%rsp), %ymm29 vmovups 0x120(%rsp), %ymm30 vmovups 0x100(%rsp), %ymm31 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 vmovaps %ymm13, %ymm12 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 vmovdqa64 %ymm16, %ymm15 vmovdqa64 %ymm17, %ymm16 jmp 0x1ec4a0a valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x5c22e(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x9c984(%rip), %ymm18 # 0x1f61620 vpermt2d %ymm8, %ymm18, %ymm3 vpmovsxbd 0x9c97d(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x9c970(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x4da2e(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ec4cc9 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm19, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ec4d33 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rdi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rsi vxorps %xmm27, %xmm27, %xmm27 vbroadcastss 0x27991(%rip), %xmm28 # 0x1eec714 vmovups 0x140(%rsp), %ymm29 vmovups 0x120(%rsp), %ymm30 vmovups 0x100(%rsp), %ymm31 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 vmovaps %ymm13, %ymm12 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 vmovdqa64 %ymm16, %ymm15 vmovdqa64 %ymm17, %ymm16 vmovdqa64 %ymm19, %ymm18 jmp 0x1ec4a0a cmpl $0x6, %eax jne 0x1ec48a3 movq %r13, 0x70(%rsp) vmovdqu64 %ymm18, 0x2f0(%rsp) movq %r8, 0x78(%rsp) movl %edi, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x80(%rsp) je 0x1ec587a andq $-0x10, %rdi movq (%rdx), %r15 xorl %eax, %eax movq %rdi, 0x18(%rsp) movq %rax, 0x88(%rsp) leaq (%rax,%rax,4), %rax shlq $0x4, %rax vmovss 0x1c(%rsi), %xmm0 movl 0x30(%rdi,%rax), %ecx movq 0x1e8(%r15), %rdx movq (%rdx,%rcx,8), %rcx vmovss 0x28(%rcx), %xmm1 vmovss 0x2c(%rcx), %xmm2 vmovss 0x30(%rcx), %xmm3 vsubss %xmm2, %xmm0, %xmm0 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm2, %xmm0, %xmm0 vmulss %xmm0, %xmm1, %xmm0 vroundss $0x9, %xmm0, %xmm0, %xmm2 vaddss 0x2bb63(%rip), %xmm1, %xmm1 # 0x1ef09cc vminss %xmm1, %xmm2, %xmm1 vmaxss %xmm1, %xmm27, %xmm1 vcvttss2si %xmm1, %edx movslq %edx, %rdx movq 0xe0(%rcx), %rcx imulq $0x38, %rdx, %rdx movl (%rdi,%rax), %esi movq %rsi, 0x30(%rsp) movl 0x4(%rdi,%rax), %r9d movq %r9, 0x20(%rsp) movq (%rcx,%rdx), %rbp vmovups (%rbp,%rsi,4), %xmm5 movl 0x10(%rdi,%rax), %esi vmovups (%rbp,%rsi,4), %xmm3 movq 0x38(%rcx,%rdx), %rcx movl 0x20(%rdi,%rax), %r8d vmovups (%rbp,%r8,4), %xmm2 vmovups (%rbp,%r9,4), %xmm7 movl 0x14(%rdi,%rax), %r9d vmovups (%rbp,%r9,4), %xmm6 movl 0x24(%rdi,%rax), %r10d vmovups (%rbp,%r10,4), %xmm4 movl 0x8(%rdi,%rax), %r11d vmovups (%rbp,%r11,4), %xmm11 movl 0x18(%rdi,%rax), %ebx vmovups (%rbp,%rbx,4), %xmm9 movl 0x28(%rdi,%rax), %r14d vmovups (%rbp,%r14,4), %xmm8 movl 0xc(%rdi,%rax), %r12d vmovups (%rbp,%r12,4), %xmm15 movl 0x1c(%rdi,%rax), %r13d vmovups (%rbp,%r13,4), %xmm12 movq 0x18(%rsp), %rdx movl 0x2c(%rdx,%rax), %edi vmovups (%rbp,%rdi,4), %xmm10 movq 0x30(%rsp), %rdx vmovups (%rcx,%rdx,4), %xmm20 vmovups (%rcx,%rsi,4), %xmm16 vmovups (%rcx,%r8,4), %xmm13 movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x20(%rsp), %r8 vmovups (%rcx,%r8,4), %xmm22 vmovups (%rcx,%r9,4), %xmm18 vmovups (%rcx,%r10,4), %xmm14 vmovups (%rcx,%r11,4), %xmm24 vmovups (%rcx,%rbx,4), %xmm21 vmovups (%rcx,%r14,4), %xmm17 vmovups (%rcx,%r12,4), %xmm25 vmovups (%rcx,%r13,4), %xmm23 vmovups (%rcx,%rdi,4), %xmm19 movq 0x18(%rsp), %rdi vmovaps 0x30(%rdi,%rax), %xmm26 vmovaps %xmm26, 0x200(%rsp) vmovaps 0x40(%rdi,%rax), %xmm26 vsubss %xmm1, %xmm0, %xmm0 vunpcklps %xmm11, %xmm5, %xmm1 # xmm1 = xmm5[0],xmm11[0],xmm5[1],xmm11[1] vunpckhps %xmm11, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm11[2],xmm5[3],xmm11[3] vunpcklps %xmm15, %xmm7, %xmm11 # xmm11 = xmm7[0],xmm15[0],xmm7[1],xmm15[1] vunpckhps %xmm15, %xmm7, %xmm7 # xmm7 = xmm7[2],xmm15[2],xmm7[3],xmm15[3] vunpcklps %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] vunpcklps %xmm11, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm11[0],xmm1[1],xmm11[1] vunpckhps %xmm11, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3] vunpcklps %xmm9, %xmm3, %xmm11 # xmm11 = xmm3[0],xmm9[0],xmm3[1],xmm9[1] vunpckhps %xmm9, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm9[2],xmm3[3],xmm9[3] vunpcklps %xmm12, %xmm6, %xmm9 # xmm9 = xmm6[0],xmm12[0],xmm6[1],xmm12[1] vunpckhps %xmm12, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm12[2],xmm6[3],xmm12[3] vunpcklps %xmm6, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] vunpcklps %xmm9, %xmm11, %xmm6 # xmm6 = xmm11[0],xmm9[0],xmm11[1],xmm9[1] vunpckhps %xmm9, %xmm11, %xmm9 # xmm9 = xmm11[2],xmm9[2],xmm11[3],xmm9[3] vunpcklps %xmm8, %xmm2, %xmm11 # xmm11 = xmm2[0],xmm8[0],xmm2[1],xmm8[1] vunpckhps %xmm8, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] vunpcklps %xmm10, %xmm4, %xmm8 # xmm8 = xmm4[0],xmm10[0],xmm4[1],xmm10[1] vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3] vunpcklps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] vunpcklps %xmm8, %xmm11, %xmm4 # xmm4 = xmm11[0],xmm8[0],xmm11[1],xmm8[1] vunpckhps %xmm8, %xmm11, %xmm8 # xmm8 = xmm11[2],xmm8[2],xmm11[3],xmm8[3] vunpcklps %xmm24, %xmm20, %xmm10 # xmm10 = xmm20[0],xmm24[0],xmm20[1],xmm24[1] vunpckhps %xmm24, %xmm20, %xmm11 # xmm11 = xmm20[2],xmm24[2],xmm20[3],xmm24[3] vunpcklps %xmm25, %xmm22, %xmm12 # xmm12 = xmm22[0],xmm25[0],xmm22[1],xmm25[1] vunpckhps %xmm25, %xmm22, %xmm15 # xmm15 = xmm22[2],xmm25[2],xmm22[3],xmm25[3] vunpcklps %xmm15, %xmm11, %xmm11 # xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1] vunpcklps %xmm12, %xmm10, %xmm15 # xmm15 = xmm10[0],xmm12[0],xmm10[1],xmm12[1] vunpckhps %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3] vunpcklps %xmm21, %xmm16, %xmm12 # xmm12 = xmm16[0],xmm21[0],xmm16[1],xmm21[1] vunpckhps %xmm21, %xmm16, %xmm16 # xmm16 = xmm16[2],xmm21[2],xmm16[3],xmm21[3] vunpcklps %xmm23, %xmm18, %xmm20 # xmm20 = xmm18[0],xmm23[0],xmm18[1],xmm23[1] vunpckhps %xmm23, %xmm18, %xmm18 # xmm18 = xmm18[2],xmm23[2],xmm18[3],xmm23[3] vunpcklps %xmm18, %xmm16, %xmm16 # xmm16 = xmm16[0],xmm18[0],xmm16[1],xmm18[1] vunpcklps %xmm20, %xmm12, %xmm18 # xmm18 = xmm12[0],xmm20[0],xmm12[1],xmm20[1] vunpckhps %xmm20, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm20[2],xmm12[3],xmm20[3] vunpcklps %xmm17, %xmm13, %xmm20 # xmm20 = xmm13[0],xmm17[0],xmm13[1],xmm17[1] vunpckhps %xmm17, %xmm13, %xmm13 # xmm13 = xmm13[2],xmm17[2],xmm13[3],xmm17[3] vunpcklps %xmm19, %xmm14, %xmm17 # xmm17 = xmm14[0],xmm19[0],xmm14[1],xmm19[1] vunpckhps %xmm19, %xmm14, %xmm14 # xmm14 = xmm14[2],xmm19[2],xmm14[3],xmm19[3] vunpcklps %xmm14, %xmm13, %xmm13 # xmm13 = xmm13[0],xmm14[0],xmm13[1],xmm14[1] vunpcklps %xmm17, %xmm20, %xmm14 # xmm14 = xmm20[0],xmm17[0],xmm20[1],xmm17[1] vunpckhps %xmm17, %xmm20, %xmm17 # xmm17 = xmm20[2],xmm17[2],xmm20[3],xmm17[3] vbroadcastss %xmm0, %xmm19 vmovss 0x27679(%rip), %xmm20 # 0x1eec714 vsubss %xmm0, %xmm20, %xmm0 vbroadcastss %xmm0, %xmm0 vmulps %xmm15, %xmm19, %xmm15 vmulps %xmm10, %xmm19, %xmm10 vmulps %xmm11, %xmm19, %xmm11 vfmadd231ps %xmm7, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm7) + xmm15 vfmadd231ps %xmm1, %xmm0, %xmm10 # xmm10 = (xmm0 * xmm1) + xmm10 vfmadd231ps %xmm5, %xmm0, %xmm11 # xmm11 = (xmm0 * xmm5) + xmm11 vmulps %xmm18, %xmm19, %xmm1 vmulps %xmm12, %xmm19, %xmm12 vmulps %xmm16, %xmm19, %xmm16 vfmadd231ps %xmm6, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm6) + xmm1 vfmadd231ps %xmm9, %xmm0, %xmm12 # xmm12 = (xmm0 * xmm9) + xmm12 vfmadd231ps %xmm3, %xmm0, %xmm16 # xmm16 = (xmm0 * xmm3) + xmm16 vmulps %xmm14, %xmm19, %xmm3 vmulps %xmm17, %xmm19, %xmm14 vmulps %xmm13, %xmm19, %xmm13 vfmadd231ps %xmm4, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm4) + xmm3 vfmadd231ps %xmm8, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm8) + xmm14 vmovaps %xmm26, 0xf0(%rsp) vfmadd231ps %xmm2, %xmm0, %xmm13 # xmm13 = (xmm0 * xmm2) + xmm13 movb $0x0, 0x7(%rsp) vbroadcastss (%rsi), %xmm0 vbroadcastss 0x4(%rsi), %xmm2 vbroadcastss 0x8(%rsi), %xmm17 vbroadcastss 0x10(%rsi), %xmm5 vbroadcastss 0x14(%rsi), %xmm7 leaq 0x7(%rsp), %rax movq %rax, 0x190(%rsp) vbroadcastss 0x18(%rsi), %xmm9 vsubps %xmm0, %xmm15, %xmm4 vsubps %xmm2, %xmm10, %xmm6 vsubps %xmm17, %xmm11, %xmm8 vsubps %xmm0, %xmm1, %xmm19 vsubps %xmm2, %xmm12, %xmm20 vsubps %xmm17, %xmm16, %xmm21 vsubps %xmm0, %xmm3, %xmm22 vsubps %xmm2, %xmm14, %xmm14 vsubps %xmm17, %xmm13, %xmm17 vsubps %xmm4, %xmm22, %xmm11 vsubps %xmm6, %xmm14, %xmm13 vsubps %xmm8, %xmm17, %xmm12 vsubps %xmm19, %xmm4, %xmm15 vsubps %xmm20, %xmm6, %xmm18 vsubps %xmm21, %xmm8, %xmm16 vsubps %xmm22, %xmm19, %xmm0 vsubps %xmm14, %xmm20, %xmm1 vsubps %xmm17, %xmm21, %xmm2 vaddps %xmm4, %xmm22, %xmm3 vaddps %xmm6, %xmm14, %xmm10 vaddps %xmm8, %xmm17, %xmm23 vmulps %xmm12, %xmm10, %xmm24 vfmsub231ps %xmm23, %xmm13, %xmm24 # xmm24 = (xmm13 * xmm23) - xmm24 vmulps %xmm11, %xmm23, %xmm23 vfmsub231ps %xmm3, %xmm12, %xmm23 # xmm23 = (xmm12 * xmm3) - xmm23 vmulps %xmm3, %xmm13, %xmm3 vfmsub231ps %xmm10, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm10) - xmm3 vmulps %xmm3, %xmm9, %xmm3 vfmadd231ps %xmm23, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm23) + xmm3 vfmadd231ps %xmm24, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm24) + xmm3 vaddps %xmm19, %xmm4, %xmm10 vaddps %xmm20, %xmm6, %xmm23 vaddps %xmm21, %xmm8, %xmm24 vmulps %xmm16, %xmm23, %xmm25 vfmsub231ps %xmm24, %xmm18, %xmm25 # xmm25 = (xmm18 * xmm24) - xmm25 vmulps %xmm15, %xmm24, %xmm24 vfmsub231ps %xmm10, %xmm16, %xmm24 # xmm24 = (xmm16 * xmm10) - xmm24 vmulps %xmm18, %xmm10, %xmm10 vfmsub231ps %xmm23, %xmm15, %xmm10 # xmm10 = (xmm15 * xmm23) - xmm10 vmulps %xmm10, %xmm9, %xmm10 vfmadd231ps %xmm24, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm24) + xmm10 vfmadd231ps %xmm25, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm25) + xmm10 vbroadcastss 0x267e2(%rip), %xmm25 # 0x1eeba20 vaddps %xmm22, %xmm19, %xmm19 vaddps %xmm14, %xmm20, %xmm14 vaddps %xmm17, %xmm21, %xmm17 vmulps %xmm2, %xmm14, %xmm20 vfmsub231ps %xmm17, %xmm1, %xmm20 # xmm20 = (xmm1 * xmm17) - xmm20 vmulps %xmm0, %xmm17, %xmm17 vfmsub231ps %xmm19, %xmm2, %xmm17 # xmm17 = (xmm2 * xmm19) - xmm17 vmulps %xmm1, %xmm19, %xmm19 vfmsub231ps %xmm14, %xmm0, %xmm19 # xmm19 = (xmm0 * xmm14) - xmm19 vmulps %xmm19, %xmm9, %xmm19 vfmadd231ps %xmm17, %xmm7, %xmm19 # xmm19 = (xmm7 * xmm17) + xmm19 vfmadd231ps %xmm20, %xmm5, %xmm19 # xmm19 = (xmm5 * xmm20) + xmm19 vaddps %xmm3, %xmm10, %xmm14 vaddps %xmm14, %xmm19, %xmm14 vandps 0x5bc2a(%rip){1to4}, %xmm14, %xmm17 # 0x1f20ec4 vmulps 0x5bc28(%rip){1to4}, %xmm17, %xmm20 # 0x1f20ecc vminps %xmm10, %xmm3, %xmm21 vminps %xmm19, %xmm21, %xmm21 vbroadcastss 0x5bc06(%rip), %xmm22 # 0x1f20ec0 vxorps %xmm22, %xmm20, %xmm22 vcmpnltps %xmm22, %xmm21, %k0 vmaxps %xmm10, %xmm3, %xmm21 vmaxps %xmm19, %xmm21, %xmm19 vcmpleps %xmm20, %xmm19, %k1 korw %k1, %k0, %k0 kshiftlb $0x4, %k0, %k0 kshiftrb $0x4, %k0, %k0 kortestb %k0, %k0 movl $0x1, %r8d je 0x1ec5861 vmulps %xmm18, %xmm12, %xmm19 vmulps %xmm16, %xmm11, %xmm20 vmulps %xmm15, %xmm13, %xmm21 vmulps %xmm1, %xmm16, %xmm22 vmulps %xmm2, %xmm15, %xmm23 vmulps %xmm0, %xmm18, %xmm24 vfmsub213ps %xmm19, %xmm16, %xmm13 # xmm13 = (xmm16 * xmm13) - xmm19 vfmsub213ps %xmm20, %xmm15, %xmm12 # xmm12 = (xmm15 * xmm12) - xmm20 vfmsub213ps %xmm21, %xmm18, %xmm11 # xmm11 = (xmm18 * xmm11) - xmm21 vfmsub213ps %xmm22, %xmm18, %xmm2 # xmm2 = (xmm18 * xmm2) - xmm22 vfmsub213ps %xmm23, %xmm16, %xmm0 # xmm0 = (xmm16 * xmm0) - xmm23 vfmsub213ps %xmm24, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm1) - xmm24 vbroadcastss 0x5bb78(%rip), %xmm18 # 0x1f20ec4 vandps %xmm18, %xmm19, %xmm15 vandps %xmm18, %xmm22, %xmm16 vcmpltps %xmm16, %xmm15, %k1 vandps %xmm18, %xmm20, %xmm15 vandps %xmm18, %xmm23, %xmm16 vcmpltps %xmm16, %xmm15, %k2 vandps %xmm18, %xmm21, %xmm15 vandps %xmm18, %xmm24, %xmm16 vcmpltps %xmm16, %xmm15, %k3 vmovaps %xmm13, %xmm2 {%k1} vmovaps %xmm12, %xmm0 {%k2} vmovaps %xmm11, %xmm1 {%k3} vmulps %xmm1, %xmm9, %xmm9 vfmadd213ps %xmm9, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm7) + xmm9 vfmadd213ps %xmm7, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm5) + xmm7 vaddps %xmm5, %xmm5, %xmm5 vmulps %xmm1, %xmm8, %xmm7 vfmadd213ps %xmm7, %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + xmm7 vfmadd213ps %xmm6, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm4) + xmm6 vaddps %xmm4, %xmm4, %xmm4 vrcp14ps %xmm5, %xmm6 vmovaps %xmm6, %xmm7 vfnmadd213ps %xmm28, %xmm5, %xmm7 # xmm7 = -(xmm5 * xmm7) + xmm28 vfmadd132ps %xmm6, %xmm6, %xmm7 # xmm7 = (xmm7 * xmm6) + xmm6 vmulps %xmm7, %xmm4, %xmm6 vcmpgeps 0xc(%rsi){1to4}, %xmm6, %k1 vbroadcastss 0x5badb(%rip), %xmm4 # 0x1f20ec0 vxorps %xmm4, %xmm5, %xmm4 vcmpleps 0x20(%rsi){1to4}, %xmm6, %k1 {%k1} vcmpneqps %xmm4, %xmm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ec5861 vmovaps %xmm3, 0x160(%rsp) vmovaps %xmm10, 0x170(%rsp) vmovaps %xmm14, 0x180(%rsp) movq %rax, 0x190(%rsp) kmovb %k1, 0x198(%rsp) vmovaps %xmm6, 0x1c0(%rsp) vmovaps %xmm2, 0x1d0(%rsp) vmovaps %xmm0, 0x1e0(%rsp) vmovaps %xmm1, 0x1f0(%rsp) vcmpnltps 0x2bb87(%rip){1to4}, %xmm17, %k2 # 0x1ef0fe8 vrcp14ps %xmm14, %xmm0 vfnmadd213ps %xmm28, %xmm0, %xmm14 # xmm14 = -(xmm0 * xmm14) + xmm28 vfmadd132ps %xmm0, %xmm0, %xmm14 {%k2} {z} # xmm14 {%k2} {z} = (xmm14 * xmm0) + xmm0 vmulps %xmm3, %xmm14, %xmm0 vminps %xmm28, %xmm0, %xmm0 vmovaps %xmm0, 0x1a0(%rsp) vmulps %xmm10, %xmm14, %xmm0 vminps %xmm28, %xmm0, %xmm0 vmovaps %xmm0, 0x1b0(%rsp) kmovd %k1, %r13d vblendmps %xmm6, %xmm25, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax cmovel %r13d, %eax movzbl %al, %eax tzcntq %rax, %r12 movl 0x200(%rsp,%r12,4), %ebx movq 0x1e8(%r15), %rax movq (%rax,%rbx,8), %rbp movl 0x24(%rsi), %eax testl %eax, 0x34(%rbp) je 0x1ec5506 movq 0x10(%rdx), %r14 cmpq $0x0, 0x10(%r14) jne 0x1ec5571 cmpq $0x0, 0x40(%rbp) jne 0x1ec5571 xorl %eax, %eax jmp 0x1ec5521 shlxl %r12d, %r8d, %eax kmovd %eax, %k0 movzbl %r13b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %r13d movb $0x1, %al testb %al, %al je 0x1ec57ea testb %r13b, %r13b je 0x1ec5861 kmovd %r13d, %k1 vblendmps %xmm6, %xmm25, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %r13b, %al movzbl %al, %eax movzbl %r13b, %ecx cmovnel %eax, %ecx tzcntl %ecx, %r12d jmp 0x1ec54d5 vmovss 0x1a0(%rsp,%r12,4), %xmm0 vmovss 0x1b0(%rsp,%r12,4), %xmm1 movq 0x8(%rdx), %rax movl 0xf0(%rsp,%r12,4), %ecx vmovss 0x1d0(%rsp,%r12,4), %xmm2 vmovss 0x1e0(%rsp,%r12,4), %xmm3 vmovss 0x1f0(%rsp,%r12,4), %xmm4 vmovss %xmm2, 0xc0(%rsp) vmovss %xmm3, 0xc4(%rsp) vmovss %xmm4, 0xc8(%rsp) vmovss %xmm0, 0xcc(%rsp) vmovss %xmm1, 0xd0(%rsp) movl %ecx, 0xd4(%rsp) movl %ebx, 0xd8(%rsp) movl (%rax), %ecx movl %ecx, 0xdc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xe0(%rsp) vmovss 0x20(%rsi), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x1c0(%rsp,%r12,4), %xmm0 vmovss %xmm0, 0x20(%rsi) movl $0xffffffff, 0x2c(%rsp) # imm = 0xFFFFFFFF leaq 0x2c(%rsp), %rcx movq %rcx, 0x90(%rsp) movq 0x18(%rbp), %rcx movq %rcx, 0x98(%rsp) movq %rax, 0xa0(%rsp) movq %rsi, 0xa8(%rsp) leaq 0xc0(%rsp), %rax movq %rax, 0xb0(%rsp) movl $0x1, 0xb8(%rsp) movq 0x40(%rbp), %rax testq %rax, %rax vmovaps %xmm6, 0x30(%rsp) je 0x1ec56dd leaq 0x90(%rsp), %rdi vzeroupper callq *%rax vmovaps 0x30(%rsp), %xmm6 movq 0x18(%rsp), %rdi movl $0x1, %r8d vbroadcastss 0x26386(%rip), %xmm25 # 0x1eeba20 vmovups 0x100(%rsp), %ymm31 vmovups 0x120(%rsp), %ymm30 vmovups 0x140(%rsp), %ymm29 vbroadcastss 0x27058(%rip), %xmm28 # 0x1eec714 vxorps %xmm27, %xmm27, %xmm27 movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x90(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec57b9 movq 0x10(%r14), %rax testq %rax, %rax je 0x1ec5759 testb $0x2, (%r14) jne 0x1ec56f2 testb $0x40, 0x3e(%rbp) je 0x1ec574c leaq 0x90(%rsp), %rdi vzeroupper callq *%rax vmovaps 0x30(%rsp), %xmm6 movq 0x18(%rsp), %rdi movl $0x1, %r8d vbroadcastss 0x26306(%rip), %xmm25 # 0x1eeba20 vmovups 0x100(%rsp), %ymm31 vmovups 0x120(%rsp), %ymm30 vmovups 0x140(%rsp), %ymm29 vbroadcastss 0x26fd8(%rip), %xmm28 # 0x1eec714 vxorps %xmm27, %xmm27, %xmm27 movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x90(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec57b9 movq 0xa8(%rsp), %rax movq 0xb0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edi movl %edi, 0x44(%rax) movl 0x18(%rcx), %edi movl %edi, 0x48(%rax) movl 0x1c(%rcx), %edi movl %edi, 0x4c(%rax) movq 0x18(%rsp), %rdi movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ec57c4 vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%rsi) shlxl %r12d, %r8d, %eax kmovd %eax, %k0 movzbl %r13b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 vcmpleps 0x20(%rsi){1to4}, %xmm6, %k1 kandb %k1, %k0, %k0 jmp 0x1ec551b vmovss 0x1a0(%rsp,%r12,4), %xmm0 vmovss 0x1b0(%rsp,%r12,4), %xmm1 vmovss 0x1c0(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x20(%rsi) vmovss 0x1d0(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x30(%rsi) vmovss 0x1e0(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x34(%rsi) vmovss 0x1f0(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x38(%rsi) vmovss %xmm0, 0x3c(%rsi) vmovss %xmm1, 0x40(%rsi) movl 0xf0(%rsp,%r12,4), %eax movl %eax, 0x44(%rsi) movl %ebx, 0x48(%rsi) movq 0x8(%rdx), %rax movl (%rax), %ecx movl %ecx, 0x4c(%rsi) movl 0x4(%rax), %eax movl %eax, 0x50(%rsi) movq 0x88(%rsp), %rax incq %rax cmpq 0x80(%rsp), %rax jne 0x1ec4e18 vbroadcastss 0x20(%rsi), %ymm0 movq 0x78(%rsp), %r8 movq 0x68(%rsp), %r9 vmovups 0x2d0(%rsp), %ymm8 vmovups 0x2b0(%rsp), %ymm9 vmovups 0x290(%rsp), %ymm10 vmovups 0x270(%rsp), %ymm11 vmovups 0x250(%rsp), %ymm12 vmovups 0x230(%rsp), %ymm13 movq 0x60(%rsp), %r10 movq 0x58(%rsp), %r11 movq 0x50(%rsp), %rbx movq 0x48(%rsp), %r14 movq 0x40(%rsp), %r15 vmovups 0x210(%rsp), %ymm14 vpmovsxbd 0x98185(%rip), %ymm15 # 0x1f5da70 vpbroadcastd 0x9bcc7(%rip), %ymm16 # 0x1f615bc leaq 0x310(%rsp), %r12 vmovdqu64 0x2f0(%rsp), %ymm18 movq 0x70(%rsp), %r13 jmp 0x1ec48a3
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, true, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiMBIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ec5923 xorl %eax, %eax jmp 0x1ec610c pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r13 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ec5963 vmovss 0x10(%r13), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1ec596e vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r13), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r13), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r13), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x980a8(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x9bbeb(%rip), %ymm13 # 0x1f615bc vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ec60f7 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1ec5a47 movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1ec5bcc testb $0x8, %bpl jne 0x1ec5b57 movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%r13), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm8, %ymm3, %ymm3 vmovups 0x40(%rsp), %ymm8 vmaxps %ymm5, %ymm8, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm8, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps 0x180(%rsp), %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ec5d34 kmovb %k0, %edi testb $0x8, %bpl jne 0x1ec5d20 testq %rdi, %rdi je 0x1ec5d2a andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm12, %ymm2 vpternlogd $0xf8, %ymm13, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ec5d4d testl %eax, %eax je 0x1ec5a64 jmp 0x1ec5fe4 testb $0x8, %bpl jne 0x1ec5b57 movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%r13), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm9 vmaxps %ymm3, %ymm9, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmovups 0x40(%rsp), %ymm11 vmaxps %ymm5, %ymm11, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm11, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edx andb %cl, %dl movzbl %dl, %edi movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1ec5b57 vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %dil, %al movzbl %al, %edi jmp 0x1ec5b57 movl $0x6, %eax jmp 0x1ec5bbf movl $0x4, %eax jmp 0x1ec5bbf vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1ec5b53 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ec5db1 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ec5bbf vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ec5e3a vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec5bbf movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ec5ef6 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1ec5bbf valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x5afba(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x9b711(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x9b70a(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x9b6fd(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x4c7bb(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ec5f3c popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ec5fa6 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovdqa %ymm9, %ymm7 jmp 0x1ec5eee cmpl $0x6, %eax jne 0x1ec5a47 movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1ec5a47 movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %rbp addq $0x40, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x28(%rsp) xorl %r12d, %r12d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ec606c movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r13, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r12b incq %r14 cmpq $0x4, %r14 jne 0x1ec602c movq 0x28(%rsp), %rax orb %r12b, %al incq %r15 addq $0x50, %rbp cmpq 0x20(%rsp), %r15 jne 0x1ec6021 testb $0x1, %al movq 0x18(%rsp), %r8 vmovaps 0x30(%rsp), %xmm10 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x979d0(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x9b513(%rip), %ymm13 # 0x1f615bc vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1ec5a47 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ec60e1 vmovss 0x10(%r13), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ec60ec vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ec59d1 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::QuadMvIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ec615f pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx andq $-0x20, %rsp subq $0x2700, %rsp # imm = 0x2700 movq 0x70(%rax), %rax movq %rax, 0x3a0(%rsp) movl $0x0, 0x3a8(%rsp) cmpq $0x8, %rax jne 0x1ec6163 leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rsi, %r14 leaq 0x3b0(%rsp), %r8 vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x5ad39(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x2ae54(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x26563(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %edi, %edi vucomiss %xmm1, %xmm0 setb %dil vbroadcastss %xmm0, %ymm19 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm20 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x5acfe(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm21 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x4c50d(%rip), %ymm22 # 0x1f12704 vpermps %ymm0, %ymm22, %ymm8 vpermps %ymm0, %ymm6, %ymm6 shll $0x5, %edi xorl %r9d, %r9d vucomiss %xmm1, %xmm4 setb %r9b shll $0x5, %r9d orq $0x40, %r9 xorl %r10d, %r10d vucomiss %xmm1, %xmm5 setb %r10b shll $0x5, %r10d orq $0x80, %r10 movq %rdi, %r11 xorq $0x20, %r11 movq %r9, %rsi xorq $0x20, %rsi movq %r10, %rbx xorq $0x20, %rbx vbroadcastss %xmm3, %ymm0 vbroadcastss 0x5ac6f(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm7, %ymm23 vxorps %ymm1, %ymm8, %ymm24 vxorps %ymm1, %ymm6, %ymm25 vbroadcastss %xmm2, %ymm26 vpmovsxbd 0x977fd(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0x9b33f(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x5ac39(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0x9b38f(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0x9b38d(%rip), %ymm31 # 0x1f61628 movq %rbx, 0x78(%rsp) vmovss 0x20(%r14), %xmm1 leaq 0x3a0(%rsp), %rax cmpq %rax, %r8 je 0x1ec6151 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1ec62a6 movq (%r8), %r13 testb $0x8, %r13b jne 0x1ec6348 vmovaps 0x40(%r13,%rdi), %ymm1 vfmadd132ps %ymm19, %ymm23, %ymm1 # ymm1 = (ymm1 * ymm19) + ymm23 vmovaps 0x40(%r13,%r9), %ymm2 vfmadd132ps %ymm20, %ymm24, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm24 vmovaps 0x40(%r13,%r10), %ymm3 vpmaxsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm21, %ymm25, %ymm3 # ymm3 = (ymm3 * ymm21) + ymm25 vpmaxsd %ymm26, %ymm3, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm8 vmovaps 0x40(%r13,%r11), %ymm1 vfmadd132ps %ymm19, %ymm23, %ymm1 # ymm1 = (ymm1 * ymm19) + ymm23 vmovaps 0x40(%r13,%rsi), %ymm2 vfmadd132ps %ymm20, %ymm24, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm24 vmovaps 0x40(%r13,%rbx), %ymm3 vpminsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm21, %ymm25, %ymm3 # ymm3 = (ymm3 * ymm21) + ymm25 vpminsd %ymm0, %ymm3, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm8, %k0 kmovb %k0, %r12d testb $0x8, %r13b jne 0x1ec63ba testq %r12, %r12 je 0x1ec63c1 andq $-0x10, %r13 vmovdqu (%r13), %ymm1 vmovdqu 0x20(%r13), %ymm2 vmovdqa64 %ymm27, %ymm3 vpternlogd $0xf8, %ymm28, %ymm8, %ymm3 kmovd %r12d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %r13 prefetcht0 (%r13) prefetcht0 0x40(%r13) prefetcht0 0x80(%r13) prefetcht0 0xc0(%r13) xorl %eax, %eax blsrq %r12, %rcx jne 0x1ec63c8 testl %eax, %eax je 0x1ec62ca jmp 0x1ec667a movl $0x6, %eax jmp 0x1ec63ad movl $0x4, %eax jmp 0x1ec63ad movq %rsi, %r15 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ec6432 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %r13 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm8, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 movq %r15, %rsi jmp 0x1ec63ad vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ec64bb vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %r13 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm8, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm8, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1ec642a vmovdqa %ymm8, %ymm9 movq %r11, %rsi movq %r10, %r11 movq %r9, %r10 movq %rdi, %r9 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ec6591 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %r13 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa %ymm9, %ymm8 vpermd %ymm9, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm9, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm9, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rdx movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %rsi, %r11 jmp 0x1ec642a valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vmovdqa64 %ymm29, %ymm3 vpermt2d %ymm8, %ymm30, %ymm3 vpermt2d %ymm4, %ymm31, %ymm3 vpermt2d %ymm6, %ymm31, %ymm3 vpmovsxbd 0x9b077(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpermps %ymm5, %ymm22, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %r13 prefetcht0 (%r13) prefetcht0 0x40(%r13) prefetcht0 0x80(%r13) prefetcht0 0xc0(%r13) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ec65c2 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm9, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ec6629 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %r13 movq %rdi, %rdx movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %rsi, %r11 movq %r15, %rsi vmovdqa %ymm9, %ymm8 jmp 0x1ec63ad cmpl $0x6, %eax jne 0x1ec62a0 vmovdqa %ymm8, 0x360(%rsp) movl %r13d, %r15d andl $0xf, %r15d addq $-0x8, %r15 je 0x1ec6e14 andq $-0x10, %r13 xorl %ebx, %ebx imulq $0xe0, %rbx, %rax vbroadcasti128 0xd0(%r13,%rax), %ymm0 # ymm0 = mem[0,1,0,1] vbroadcasti128 0xc0(%r13,%rax), %ymm1 # ymm1 = mem[0,1,0,1] vmovdqa %ymm1, 0x380(%rsp) vmovdqa %ymm0, 0xe0(%rsp) vmovaps (%r13,%rax), %xmm0 vmovaps 0x10(%r13,%rax), %xmm1 vmovaps 0x20(%r13,%rax), %xmm2 vinsertf128 $0x1, 0x60(%r13,%rax), %ymm0, %ymm3 vinsertf128 $0x1, 0x70(%r13,%rax), %ymm1, %ymm5 vinsertf128 $0x1, 0x80(%r13,%rax), %ymm2, %ymm6 vbroadcastf128 0x30(%r13,%rax), %ymm0 # ymm0 = mem[0,1,0,1] vbroadcastf128 0x40(%r13,%rax), %ymm1 # ymm1 = mem[0,1,0,1] vbroadcastf128 0x50(%r13,%rax), %ymm2 # ymm2 = mem[0,1,0,1] vbroadcastf128 0x90(%r13,%rax), %ymm4 # ymm4 = mem[0,1,0,1] vbroadcastf128 0xa0(%r13,%rax), %ymm7 # ymm7 = mem[0,1,0,1] vbroadcastf128 0xb0(%r13,%rax), %ymm8 # ymm8 = mem[0,1,0,1] vsubps %ymm0, %ymm3, %ymm10 vsubps %ymm1, %ymm5, %ymm11 vsubps %ymm2, %ymm6, %ymm12 vsubps %ymm3, %ymm4, %ymm9 vsubps %ymm5, %ymm7, %ymm7 vsubps %ymm6, %ymm8, %ymm8 vmulps %ymm8, %ymm11, %ymm0 vfmsub231ps %ymm12, %ymm7, %ymm0 # ymm0 = (ymm7 * ymm12) - ymm0 vmulps %ymm9, %ymm12, %ymm1 vfmsub231ps %ymm10, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm10) - ymm1 vmulps %ymm7, %ymm10, %ymm2 vbroadcastss 0x10(%r14), %ymm13 vbroadcastss 0x14(%r14), %ymm14 vbroadcastss 0x18(%r14), %ymm15 vsubps (%r14){1to8}, %ymm3, %ymm4 vfmsub231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) - ymm2 vsubps 0x4(%r14){1to8}, %ymm5, %ymm5 vsubps 0x8(%r14){1to8}, %ymm6, %ymm6 vmulps %ymm6, %ymm14, %ymm16 vfmsub231ps %ymm15, %ymm5, %ymm16 # ymm16 = (ymm5 * ymm15) - ymm16 vmulps %ymm4, %ymm15, %ymm17 vfmsub231ps %ymm13, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm13) - ymm17 vmulps %ymm5, %ymm13, %ymm18 vfmsub231ps %ymm14, %ymm4, %ymm18 # ymm18 = (ymm4 * ymm14) - ymm18 vmulps %ymm2, %ymm15, %ymm15 vfmadd231ps %ymm14, %ymm1, %ymm15 # ymm15 = (ymm1 * ymm14) + ymm15 vfmadd231ps %ymm13, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm13) + ymm15 vandps 0x5a6fa(%rip){1to8}, %ymm15, %ymm3 # 0x1f20ec4 vmulps %ymm18, %ymm8, %ymm8 vfmadd231ps %ymm7, %ymm17, %ymm8 # ymm8 = (ymm17 * ymm7) + ymm8 vfmadd231ps %ymm9, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm9) + ymm8 vandpd 0x5a782(%rip){1to4}, %ymm15, %ymm9 # 0x1f20f68 vxorps %ymm8, %ymm9, %ymm7 vmulps %ymm18, %ymm12, %ymm8 vfmadd231ps %ymm17, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm17) + ymm8 vfmadd231ps %ymm16, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm16) + ymm8 vxorps %ymm8, %ymm9, %ymm8 vxorps %xmm10, %xmm10, %xmm10 vcmpnltps %ymm10, %ymm7, %k1 vcmpnltps %ymm10, %ymm8, %k1 {%k1} vcmpneqps %ymm10, %ymm15, %k1 {%k1} vaddps %ymm7, %ymm8, %ymm10 vcmpleps %ymm3, %ymm10, %k0 {%k1} kortestb %k0, %k0 jne 0x1ec683e incq %rbx cmpq %r15, %rbx jne 0x1ec66a3 jmp 0x1ec6e14 vmulps %ymm6, %ymm2, %ymm6 vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6 vfmadd213ps %ymm5, %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + ymm5 vxorps %ymm4, %ymm9, %ymm4 vmulps 0xc(%r14){1to8}, %ymm3, %ymm5 vcmpltps %ymm4, %ymm5, %k1 vmulps 0x20(%r14){1to8}, %ymm3, %ymm5 vcmpleps %ymm5, %ymm4, %k1 {%k1} kandb %k0, %k1, %k0 kortestb %k0, %k0 je 0x1ec682d vmovaps %ymm7, 0x100(%rsp) vmovaps %ymm8, 0x120(%rsp) vmovaps %ymm4, 0x140(%rsp) vmovaps %ymm3, 0x160(%rsp) kmovb %k0, 0x181(%rsp) vmovaps 0x100(%rsp), %ymm4 vmovaps 0x120(%rsp), %ymm5 vsubps %ymm5, %ymm3, %ymm6 vblendps $0xf0, %ymm6, %ymm4, %ymm6 # ymm6 = ymm4[0,1,2,3],ymm6[4,5,6,7] vmovaps %ymm6, 0x100(%rsp) vsubps %ymm4, %ymm3, %ymm4 vblendps $0xf0, %ymm4, %ymm5, %ymm4 # ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] vmovaps %ymm4, 0x120(%rsp) vmovaps 0x9a05d(%rip), %ymm5 # 0x1f60940 vmulps %ymm5, %ymm0, %ymm0 vmovaps %ymm0, 0x200(%rsp) vmulps %ymm5, %ymm1, %ymm0 vmovaps %ymm0, 0x220(%rsp) vmulps %ymm5, %ymm2, %ymm0 vmovaps %ymm0, 0x240(%rsp) movq %rdx, 0x10(%rsp) movq (%rdx), %rax movq %rax, 0x70(%rsp) movzbl 0x181(%rsp), %ecx vrcp14ps %ymm3, %ymm0 vfnmadd213ps 0x25de5(%rip){1to8}, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm3) + mem vfmadd132ps %ymm0, %ymm0, %ymm3 # ymm3 = (ymm3 * ymm0) + ymm0 vmulps 0x140(%rsp), %ymm3, %ymm5 vmovaps %ymm5, 0x1e0(%rsp) vmulps %ymm3, %ymm6, %ymm0 vmovaps %ymm0, 0x1a0(%rsp) vmulps %ymm3, %ymm4, %ymm0 vmovaps %ymm0, 0x1c0(%rsp) kmovd %ecx, %k1 vbroadcastss 0x250b3(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm5, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kmovd %k0, %eax andb %cl, %al movzbl %al, %eax movl %ecx, 0xc(%rsp) cmovel %ecx, %eax movzbl %al, %eax tzcntq %rax, %rdx movl 0x380(%rsp,%rdx,4), %ecx movq 0x70(%rsp), %rax movq 0x1e8(%rax), %rax movq %rcx, 0x30(%rsp) movq (%rax,%rcx,8), %rcx movl 0x24(%r14), %eax testl %eax, 0x34(%rcx) movq %rdx, 0x18(%rsp) je 0x1ec6a07 movq %rcx, %rax movq 0x10(%rsp), %rdx movq 0x10(%rdx), %rcx movq %rcx, 0x28(%rsp) cmpq $0x0, 0x10(%rcx) movl 0xc(%rsp), %ecx jne 0x1ec6a8f cmpq $0x0, 0x40(%rax) jne 0x1ec6a8f xorl %eax, %eax jmp 0x1ec6a2d movl $0x1, %eax shlxl %edx, %eax, %eax kmovd %eax, %k0 movzbl 0xc(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al movq 0x10(%rsp), %rdx testb %al, %al je 0x1ec6d8b testb %cl, %cl je 0x1ec682d kmovd %ecx, %k1 vbroadcastss 0x24fd6(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm5, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kmovd %k0, %eax andb %cl, %al movzbl %al, %eax movl %ecx, 0xc(%rsp) movzbl %cl, %ecx cmovnel %eax, %ecx tzcntl %ecx, %edx jmp 0x1ec69ae vmovaps %ymm5, 0x260(%rsp) vmovaps %ymm26, 0x280(%rsp) vmovaps %ymm25, 0x2a0(%rsp) vmovaps %ymm24, 0x2c0(%rsp) vmovaps %ymm23, 0x2e0(%rsp) movq %rsi, 0x40(%rsp) movq %r11, 0x48(%rsp) movq %r10, 0x50(%rsp) movq %r9, 0x58(%rsp) vmovaps %ymm21, 0x300(%rsp) vmovaps %ymm20, 0x320(%rsp) vmovaps %ymm19, 0x340(%rsp) movq %rdi, 0x60(%rsp) movq %r8, 0x68(%rsp) movq 0x18(%rsp), %rsi vmovss 0x1a0(%rsp,%rsi,4), %xmm0 vmovss 0x1c0(%rsp,%rsi,4), %xmm1 movq %rax, %rdi movq 0x8(%rdx), %rax movl 0xe0(%rsp,%rsi,4), %ecx vmovss 0x200(%rsp,%rsi,4), %xmm2 vmovss 0x220(%rsp,%rsi,4), %xmm3 vmovss 0x240(%rsp,%rsi,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movq 0x30(%rsp), %rcx movl %ecx, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%r14), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x1e0(%rsp,%rsi,4), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF leaq 0x24(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%rdi), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %r14, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq %rdi, 0x38(%rsp) movq 0x40(%rdi), %rax testq %rax, %rax je 0x1ec6c14 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec6cac movq 0x28(%rsp), %rax movq 0x10(%rax), %rax testq %rax, %rax je 0x1ec6c51 movq 0x28(%rsp), %rcx testb $0x2, (%rcx) jne 0x1ec6c37 movq 0x38(%rsp), %rcx testb $0x40, 0x3e(%rcx) je 0x1ec6c44 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec6cac movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ec6cb8 vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0x1, %eax movq 0x18(%rsp), %rcx shlxl %ecx, %eax, %eax kmovd %eax, %k0 movzbl 0xc(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k1 vmovaps 0x260(%rsp), %ymm5 vcmpleps 0x20(%r14){1to8}, %ymm5, %k0 {%k1} kmovd %k0, %ecx movb $0x1, %al movq 0x10(%rsp), %rdx movq 0x68(%rsp), %r8 movq 0x60(%rsp), %rdi vmovaps 0x340(%rsp), %ymm19 vmovaps 0x320(%rsp), %ymm20 vmovaps 0x300(%rsp), %ymm21 vbroadcastss 0x4b9e4(%rip), %ymm22 # 0x1f12704 movq 0x58(%rsp), %r9 movq 0x50(%rsp), %r10 movq 0x48(%rsp), %r11 movq 0x40(%rsp), %rsi vmovaps 0x2e0(%rsp), %ymm23 vmovaps 0x2c0(%rsp), %ymm24 vmovaps 0x2a0(%rsp), %ymm25 vmovaps 0x280(%rsp), %ymm26 vpmovsxbd 0x96d12(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0x9a854(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x5a14e(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0x9a8a4(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0x9a8a2(%rip), %ymm31 # 0x1f61628 jmp 0x1ec6a2d movq 0x18(%rsp), %rax vmovss 0x1a0(%rsp,%rax,4), %xmm0 vmovss 0x1c0(%rsp,%rax,4), %xmm1 vmovss 0x1e0(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x20(%r14) vmovss 0x200(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x30(%r14) vmovss 0x220(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x34(%r14) vmovss 0x240(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x38(%r14) vmovss %xmm0, 0x3c(%r14) vmovss %xmm1, 0x40(%r14) movl 0xe0(%rsp,%rax,4), %eax movl %eax, 0x44(%r14) movq 0x30(%rsp), %rax movl %eax, 0x48(%r14) movq 0x8(%rdx), %rax movl (%rax), %ecx movl %ecx, 0x4c(%r14) movl 0x4(%rax), %eax movl %eax, 0x50(%r14) jmp 0x1ec682d vbroadcastss 0x20(%r14), %ymm0 movq 0x78(%rsp), %rbx vmovdqa 0x360(%rsp), %ymm8 jmp 0x1ec62a0 nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::QuadMvIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ec6e3f xorl %eax, %eax jmp 0x1ec75a9 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %rbp movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ec6e7e vmovss 0x10(%rbp), %xmm0 vmulss %xmm0, %xmm0, %xmm9 jmp 0x1ec6e89 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm9 vbroadcastss (%rbp), %ymm6 vbroadcastss 0x4(%rbp), %ymm5 vbroadcastss 0x8(%rbp), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 leaq 0x1b0(%rsp), %r8 movl $0x0, 0xc(%rsp) vpmovsxbd 0x96ba1(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x9a6e4(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups %ymm6, 0x30(%rsp) vmovups %ymm5, 0x10(%rsp) vsubps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x140(%rsp) vaddps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x120(%rsp) vsubps %ymm1, %ymm5, %ymm3 vmovups %ymm3, 0x100(%rsp) vaddps %ymm1, %ymm5, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x80(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm10 vmovaps %xmm9, 0x70(%rsp) vmovups %ymm10, 0x160(%rsp) cmpq %r9, %r8 je 0x1ec7594 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm9, %xmm0 ja 0x1ec6f56 movq (%r8), %r12 cmpl $0x1, 0x18(%rbx) jne 0x1ec707b testb $0x8, %r12b jne 0x1ec6ffc vmovaps 0x40(%r12), %ymm0 vmovaps 0x60(%r12), %ymm1 vmaxps %ymm0, %ymm6, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm6, %ymm2, %ymm2 vmaxps 0x80(%r12), %ymm5, %ymm3 vminps 0xa0(%r12), %ymm3, %ymm3 vmovups 0x80(%rsp), %ymm7 vmaxps 0xc0(%r12), %ymm7, %ymm4 vsubps %ymm5, %ymm3, %ymm3 vminps 0xe0(%r12), %ymm4, %ymm4 vsubps %ymm7, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm10, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %edi testb $0x8, %r12b jne 0x1ec718b testq %rdi, %rdi je 0x1ec7195 andq $-0x10, %r12 vmovdqu (%r12), %ymm0 vmovdqu 0x20(%r12), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ec719f testl %eax, %eax je 0x1ec6f73 jmp 0x1ec7464 testb $0x8, %r12b jne 0x1ec6ffc vmovaps 0xc0(%r12), %ymm0 vmovaps 0x40(%r12), %ymm1 vmovaps 0x60(%r12), %ymm2 vmovaps 0x80(%r12), %ymm3 vmovaps 0xa0(%r12), %ymm4 vmovaps 0xe0(%r12), %ymm5 vmovups 0x30(%rsp), %ymm6 vmaxps %ymm1, %ymm6, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps 0x30(%rsp), %ymm6, %ymm6 vmovups 0x10(%rsp), %ymm7 vmaxps %ymm3, %ymm7, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps 0x10(%rsp), %ymm7, %ymm7 vmovaps %ymm10, %ymm13 vmovups 0x80(%rsp), %ymm10 vmaxps %ymm0, %ymm10, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm10, %ymm8, %ymm8 vmovaps %ymm13, %ymm10 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vmovups 0x30(%rsp), %ymm6 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm5, %k3 vmovups 0x10(%rsp), %ymm5 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ec6ffc movl $0x6, %eax jmp 0x1ec706e movl $0x4, %eax jmp 0x1ec706e vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ec7203 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r12 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ec706e vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ec7298 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 jmp 0x1ec706e movq %rdi, %r10 vmovaps %ymm10, %ymm13 vmovdqa %ymm7, %ymm10 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ec736a vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm10, %ymm7 vpermd %ymm10, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps %ymm13, %ymm10 movq %r10, %rdi jmp 0x1ec706e valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x59b46(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x9a29d(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x9a296(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x9a289(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x4b347(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ec73b0 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ec741a vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r12 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovdqa %ymm10, %ymm7 jmp 0x1ec735d cmpl $0x6, %eax jne 0x1ec6f56 movl %r12d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x60(%rsp) je 0x1ec6f56 movq %rdi, 0x50(%rsp) vmovups %ymm7, 0x180(%rsp) movq %r8, 0x58(%rsp) andq $-0x10, %r12 addq $0xd0, %r12 xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x68(%rsp) movq $-0x10, %r14 xorl %r13d, %r13d movl (%r12,%r14), %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmpq %rcx, %rax je 0x1ec74f2 movq (%rbx), %rcx movq 0x1e8(%rcx), %rcx movq (%rcx,%rax,8), %rdi movl %eax, 0x44(%rbx) movl 0x10(%r12,%r14), %eax movl %eax, 0x40(%rbx) movq %rbp, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b addq $0x4, %r14 jne 0x1ec74b4 movq 0x68(%rsp), %rax orb %r13b, %al incq %r15 addq $0xe0, %r12 cmpq 0x60(%rsp), %r15 jne 0x1ec74a5 testb $0x1, %al vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps 0x70(%rsp), %xmm9 movq 0x58(%rsp), %r8 vpmovsxbd 0x96543(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x9a086(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x180(%rsp), %ymm7 vmovups 0x160(%rsp), %ymm10 movq 0x50(%rsp), %rdi je 0x1ec6f56 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ec757e vmovss 0x10(%rbp), %xmm3 vmulss %xmm3, %xmm3, %xmm9 jmp 0x1ec7589 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm9 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ec6eec movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::QuadMiIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ec75fd pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx andq $-0x20, %rsp subq $0x2700, %rsp # imm = 0x2700 movq 0x70(%rax), %rax movq %rax, 0x3a0(%rsp) movl $0x0, 0x3a8(%rsp) cmpq $0x8, %rax jne 0x1ec7601 leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rdx, %r8 movq %rsi, %r14 leaq 0x3b0(%rsp), %rdi vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x59898(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x299b3(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x250c2(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %r9d, %r9d vucomiss %xmm1, %xmm0 setb %r9b vbroadcastss %xmm0, %ymm19 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm20 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x5985c(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm21 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x4b06b(%rip), %ymm22 # 0x1f12704 vpermps %ymm0, %ymm22, %ymm8 vpermps %ymm0, %ymm6, %ymm6 shll $0x5, %r9d xorl %r10d, %r10d vucomiss %xmm1, %xmm4 setb %r10b shll $0x5, %r10d orq $0x40, %r10 xorl %r11d, %r11d vucomiss %xmm1, %xmm5 setb %r11b shll $0x5, %r11d orq $0x80, %r11 movq %r9, %rsi xorq $0x20, %rsi movq %r10, %r15 xorq $0x20, %r15 movq %r11, %r12 xorq $0x20, %r12 vbroadcastss %xmm3, %ymm0 vbroadcastss 0x597cc(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm7, %ymm23 vxorps %ymm1, %ymm8, %ymm24 vxorps %ymm1, %ymm6, %ymm25 vbroadcastss %xmm2, %ymm26 vpmovsxbd 0x9635a(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0x99e9c(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x59796(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0x99eec(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0x99eea(%rip), %ymm31 # 0x1f61628 movq %rsi, 0x70(%rsp) movq %r15, 0x68(%rsp) movq %r12, 0x8(%rsp) vmovss 0x20(%r14), %xmm1 leaq 0x3a0(%rsp), %rax cmpq %rax, %rdi je 0x1ec75ef vmovss -0x8(%rdi), %xmm2 addq $-0x10, %rdi vucomiss %xmm1, %xmm2 ja 0x1ec7753 movq (%rdi), %r13 testb $0x8, %r13b jne 0x1ec77f4 vmovaps 0x40(%r13,%r9), %ymm1 vfmadd132ps %ymm19, %ymm23, %ymm1 # ymm1 = (ymm1 * ymm19) + ymm23 vmovaps 0x40(%r13,%r10), %ymm2 vfmadd132ps %ymm20, %ymm24, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm24 vmovaps 0x40(%r13,%r11), %ymm3 vpmaxsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm21, %ymm25, %ymm3 # ymm3 = (ymm3 * ymm21) + ymm25 vpmaxsd %ymm26, %ymm3, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm8 vmovaps 0x40(%r13,%rsi), %ymm1 vfmadd132ps %ymm19, %ymm23, %ymm1 # ymm1 = (ymm1 * ymm19) + ymm23 vmovaps 0x40(%r13,%r15), %ymm2 vfmadd132ps %ymm20, %ymm24, %ymm2 # ymm2 = (ymm2 * ymm20) + ymm24 vmovaps 0x40(%r13,%r12), %ymm3 vpminsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm21, %ymm25, %ymm3 # ymm3 = (ymm3 * ymm21) + ymm25 vpminsd %ymm0, %ymm3, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm8, %k0 kmovb %k0, %ebx testb $0x8, %r13b jne 0x1ec7865 testq %rbx, %rbx je 0x1ec786c andq $-0x10, %r13 vmovdqu (%r13), %ymm1 vmovdqu 0x20(%r13), %ymm2 vmovdqa64 %ymm27, %ymm3 vpternlogd $0xf8, %ymm28, %ymm8, %ymm3 kmovd %ebx, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %r13 prefetcht0 (%r13) prefetcht0 0x40(%r13) prefetcht0 0x80(%r13) prefetcht0 0xc0(%r13) xorl %eax, %eax blsrq %rbx, %rcx jne 0x1ec7873 testl %eax, %eax je 0x1ec7776 jmp 0x1ec7af5 movl $0x6, %eax jmp 0x1ec7858 movl $0x4, %eax jmp 0x1ec7858 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ec78d2 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %r13 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%rdi) vpermd %ymm8, %ymm4, %ymm1 vmovd %xmm1, 0x8(%rdi) addq $0x10, %rdi jmp 0x1ec7858 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ec7957 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %r13 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%rdi) vpermd %ymm8, %ymm7, %ymm3 vmovd %xmm3, 0x8(%rdi) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%rdi) vpermd %ymm8, %ymm5, %ymm1 vmovd %xmm1, 0x18(%rdi) addq $0x20, %rdi jmp 0x1ec7858 vmovdqa %ymm8, %ymm9 movq %r15, %r12 movq %rsi, %r15 vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ec7a1a vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %r13 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%rdi) vmovdqa %ymm9, %ymm8 vpermd %ymm9, %ymm7, %ymm3 vmovd %xmm3, 0x8(%rdi) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%rdi) vpermd %ymm9, %ymm6, %ymm3 vmovd %xmm3, 0x18(%rdi) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%rdi) vpermd %ymm9, %ymm4, %ymm1 vmovd %xmm1, 0x28(%rdi) addq $0x30, %rdi movq %r15, %rsi movq %r12, %r15 movq 0x8(%rsp), %r12 jmp 0x1ec7858 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vmovdqa64 %ymm29, %ymm3 vpermt2d %ymm8, %ymm30, %ymm3 vpermt2d %ymm4, %ymm31, %ymm3 vpermt2d %ymm6, %ymm31, %ymm3 vpmovsxbd 0x99bee(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpermps %ymm5, %ymm22, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ec7a4b popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%rdi) vpermd %ymm9, %ymm3, %ymm4 vmovd %xmm4, 0x8(%rdi) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %rdi vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ec7aad vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %r13 movq %r15, %rsi movq %r12, %r15 movq 0x8(%rsp), %r12 vmovdqa %ymm9, %ymm8 jmp 0x1ec7858 cmpl $0x6, %eax jne 0x1ec774d vmovdqa %ymm8, 0x360(%rsp) movl %r13d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x78(%rsp) je 0x1ec835d andq $-0x10, %r13 xorl %r12d, %r12d leaq (%r12,%r12,2), %rax shlq $0x5, %rax movq (%r8), %r15 prefetcht0 (%r13,%rax) prefetcht0 0x40(%r13,%rax) movq 0x228(%r15), %rcx movl 0x48(%r13,%rax), %edx movq (%rcx,%rdx,8), %rdx movl 0x8(%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm2 movl 0x18(%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm1 movl 0x38(%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm0 movl 0x28(%r13,%rax), %esi vinsertf128 $0x1, (%rdx,%rsi,4), %ymm2, %ymm2 movl 0x40(%r13,%rax), %edx movq (%rcx,%rdx,8), %rdx movl (%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm3 movl 0x10(%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm4 movl 0x30(%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm5 movl 0x20(%r13,%rax), %esi vinsertf128 $0x1, (%rdx,%rsi,4), %ymm3, %ymm3 movl 0x4c(%r13,%rax), %edx movq (%rcx,%rdx,8), %rdx movl 0xc(%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm6 movl 0x1c(%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm7 movl 0x3c(%r13,%rax), %esi vmovups (%rdx,%rsi,4), %xmm8 movl 0x2c(%r13,%rax), %esi vinsertf128 $0x1, (%rdx,%rsi,4), %ymm6, %ymm6 movl 0x44(%r13,%rax), %edx movq (%rcx,%rdx,8), %rcx movl 0x4(%r13,%rax), %edx vmovups (%rcx,%rdx,4), %xmm9 movl 0x14(%r13,%rax), %edx vmovups (%rcx,%rdx,4), %xmm10 movl 0x34(%r13,%rax), %edx vmovups (%rcx,%rdx,4), %xmm11 movl 0x24(%r13,%rax), %edx vinsertf128 $0x1, (%rcx,%rdx,4), %ymm9, %ymm9 vbroadcasti128 0x40(%r13,%rax), %ymm12 # ymm12 = mem[0,1,0,1] vmovdqa %ymm12, 0x380(%rsp) vbroadcasti128 0x50(%r13,%rax), %ymm12 # ymm12 = mem[0,1,0,1] vunpcklps %xmm1, %xmm4, %xmm13 # xmm13 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] vunpckhps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[2],xmm1[2],xmm4[3],xmm1[3] vunpcklps %xmm7, %xmm10, %xmm4 # xmm4 = xmm10[0],xmm7[0],xmm10[1],xmm7[1] vunpckhps %xmm7, %xmm10, %xmm7 # xmm7 = xmm10[2],xmm7[2],xmm10[3],xmm7[3] vunpcklps %xmm7, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] vunpcklps %xmm4, %xmm13, %xmm7 # xmm7 = xmm13[0],xmm4[0],xmm13[1],xmm4[1] vunpckhps %xmm4, %xmm13, %xmm4 # xmm4 = xmm13[2],xmm4[2],xmm13[3],xmm4[3] vunpcklps %xmm0, %xmm5, %xmm10 # xmm10 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] vunpckhps %xmm0, %xmm5, %xmm0 # xmm0 = xmm5[2],xmm0[2],xmm5[3],xmm0[3] vunpcklps %xmm8, %xmm11, %xmm5 # xmm5 = xmm11[0],xmm8[0],xmm11[1],xmm8[1] vunpckhps %xmm8, %xmm11, %xmm8 # xmm8 = xmm11[2],xmm8[2],xmm11[3],xmm8[3] vunpcklps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1] vunpcklps %xmm5, %xmm10, %xmm8 # xmm8 = xmm10[0],xmm5[0],xmm10[1],xmm5[1] vunpckhps %xmm5, %xmm10, %xmm5 # xmm5 = xmm10[2],xmm5[2],xmm10[3],xmm5[3] vmovdqa %ymm12, 0xe0(%rsp) vunpcklps %ymm6, %ymm9, %ymm10 # ymm10 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[4],ymm6[4],ymm9[5],ymm6[5] vunpcklps %ymm2, %ymm3, %ymm11 # ymm11 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5] vunpcklps %ymm10, %ymm11, %ymm12 # ymm12 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[4],ymm10[4],ymm11[5],ymm10[5] vunpckhps %ymm10, %ymm11, %ymm10 # ymm10 = ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[6],ymm10[6],ymm11[7],ymm10[7] vunpckhps %ymm6, %ymm9, %ymm6 # ymm6 = ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[6],ymm6[6],ymm9[7],ymm6[7] vunpckhps %ymm2, %ymm3, %ymm2 # ymm2 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7] vunpcklps %ymm6, %ymm2, %ymm3 # ymm3 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[4],ymm6[4],ymm2[5],ymm6[5] vinsertf128 $0x1, %xmm7, %ymm7, %ymm2 vinsertf128 $0x1, %xmm4, %ymm4, %ymm4 vinsertf128 $0x1, %xmm1, %ymm1, %ymm1 vinsertf128 $0x1, %xmm8, %ymm8, %ymm6 vinsertf128 $0x1, %xmm5, %ymm5, %ymm5 vinsertf128 $0x1, %xmm0, %ymm0, %ymm0 vsubps %ymm2, %ymm12, %ymm8 vsubps %ymm4, %ymm10, %ymm11 vsubps %ymm1, %ymm3, %ymm13 vsubps %ymm12, %ymm6, %ymm7 vsubps %ymm10, %ymm5, %ymm9 vsubps %ymm3, %ymm0, %ymm14 vmulps %ymm14, %ymm11, %ymm0 vfmsub231ps %ymm13, %ymm9, %ymm0 # ymm0 = (ymm9 * ymm13) - ymm0 vmulps %ymm7, %ymm13, %ymm1 vfmsub231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) - ymm1 vmulps %ymm9, %ymm8, %ymm2 vbroadcastss 0x10(%r14), %ymm15 vbroadcastss 0x14(%r14), %ymm16 vbroadcastss 0x18(%r14), %ymm17 vsubps (%r14){1to8}, %ymm12, %ymm4 vfmsub231ps %ymm11, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm11) - ymm2 vsubps 0x4(%r14){1to8}, %ymm10, %ymm5 vsubps 0x8(%r14){1to8}, %ymm3, %ymm6 vmulps %ymm6, %ymm16, %ymm10 vfmsub231ps %ymm17, %ymm5, %ymm10 # ymm10 = (ymm5 * ymm17) - ymm10 vmulps %ymm4, %ymm17, %ymm12 vfmsub231ps %ymm15, %ymm6, %ymm12 # ymm12 = (ymm6 * ymm15) - ymm12 vmulps %ymm5, %ymm15, %ymm18 vfmsub231ps %ymm16, %ymm4, %ymm18 # ymm18 = (ymm4 * ymm16) - ymm18 vmulps %ymm17, %ymm2, %ymm17 vfmadd231ps %ymm16, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm16) + ymm17 vfmadd231ps %ymm15, %ymm0, %ymm17 # ymm17 = (ymm0 * ymm15) + ymm17 vandps 0x5917c(%rip){1to8}, %ymm17, %ymm3 # 0x1f20ec4 vmulps %ymm18, %ymm14, %ymm14 vfmadd231ps %ymm9, %ymm12, %ymm14 # ymm14 = (ymm12 * ymm9) + ymm14 vfmadd231ps %ymm7, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm7) + ymm14 vandpd 0x59206(%rip){1to4}, %ymm17, %ymm9 # 0x1f20f68 vxorps %ymm14, %ymm9, %ymm7 vmulps %ymm18, %ymm13, %ymm13 vfmadd231ps %ymm12, %ymm11, %ymm13 # ymm13 = (ymm11 * ymm12) + ymm13 vfmadd231ps %ymm10, %ymm8, %ymm13 # ymm13 = (ymm8 * ymm10) + ymm13 vxorps %ymm13, %ymm9, %ymm8 vxorps %xmm10, %xmm10, %xmm10 vcmpnltps %ymm10, %ymm7, %k1 vcmpnltps %ymm10, %ymm8, %k1 {%k1} vcmpneqps %ymm10, %ymm17, %k1 {%k1} vaddps %ymm7, %ymm8, %ymm10 vcmpleps %ymm3, %ymm10, %k0 {%k1} kortestb %k0, %k0 jne 0x1ec7dba incq %r12 cmpq 0x78(%rsp), %r12 jne 0x1ec7b23 jmp 0x1ec835d vmulps %ymm6, %ymm2, %ymm6 vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6 vfmadd213ps %ymm5, %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + ymm5 vxorps %ymm4, %ymm9, %ymm4 vmulps 0xc(%r14){1to8}, %ymm3, %ymm5 vcmpltps %ymm4, %ymm5, %k1 vmulps 0x20(%r14){1to8}, %ymm3, %ymm5 vcmpleps %ymm5, %ymm4, %k1 {%k1} kandb %k0, %k1, %k0 kortestb %k0, %k0 je 0x1ec7da7 vmovaps %ymm7, 0x100(%rsp) vmovaps %ymm8, 0x120(%rsp) vmovaps %ymm4, 0x140(%rsp) vmovaps %ymm3, 0x160(%rsp) kmovb %k0, 0x181(%rsp) vmovaps 0x100(%rsp), %ymm4 vmovaps 0x120(%rsp), %ymm5 vsubps %ymm5, %ymm3, %ymm6 vblendps $0xf0, %ymm6, %ymm4, %ymm6 # ymm6 = ymm4[0,1,2,3],ymm6[4,5,6,7] vmovaps %ymm6, 0x100(%rsp) vsubps %ymm4, %ymm3, %ymm4 vblendps $0xf0, %ymm4, %ymm5, %ymm4 # ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] vmovaps %ymm4, 0x120(%rsp) vmovaps 0x98ae1(%rip), %ymm5 # 0x1f60940 vmulps %ymm5, %ymm0, %ymm0 vmovaps %ymm0, 0x200(%rsp) vmulps %ymm5, %ymm1, %ymm0 vmovaps %ymm0, 0x220(%rsp) vmulps %ymm5, %ymm2, %ymm0 vmovaps %ymm0, 0x240(%rsp) movzbl 0x181(%rsp), %edx vrcp14ps %ymm3, %ymm0 vfnmadd213ps 0x24876(%rip){1to8}, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm3) + mem vfmadd132ps %ymm0, %ymm0, %ymm3 # ymm3 = (ymm3 * ymm0) + ymm0 vmulps 0x140(%rsp), %ymm3, %ymm5 vmovaps %ymm5, 0x1e0(%rsp) vmulps %ymm3, %ymm6, %ymm0 vmovaps %ymm0, 0x1a0(%rsp) vmulps %ymm3, %ymm4, %ymm0 vmovaps %ymm0, 0x1c0(%rsp) kmovd %edx, %k1 vbroadcastss 0x23b44(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm5, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kmovd %k0, %eax andb %dl, %al movzbl %al, %eax cmovel %edx, %eax movzbl %al, %eax tzcntq %rax, %rcx movl 0x380(%rsp,%rcx,4), %esi movq 0x1e8(%r15), %rax movq %rsi, 0x28(%rsp) movq (%rax,%rsi,8), %rsi movl 0x24(%r14), %eax testl %eax, 0x34(%rsi) je 0x1ec7f5d movq %rsi, %rax movq %rcx, %rsi movq 0x10(%r8), %rcx cmpq $0x0, 0x10(%rcx) jne 0x1ec7fdd cmpq $0x0, 0x40(%rax) jne 0x1ec7fdd xorl %eax, %eax jmp 0x1ec7f7f movl $0x1, %eax movq %rcx, %rsi shlxl %ecx, %eax, %eax kmovd %eax, %k0 movzbl %dl, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %edx movb $0x1, %al testb %al, %al je 0x1ec82d9 testb %dl, %dl je 0x1ec7da7 kmovd %edx, %k1 vbroadcastss 0x23a84(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm5, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kmovd %k0, %eax andb %dl, %al movzbl %al, %eax movzbl %dl, %ecx cmovnel %eax, %ecx tzcntl %ecx, %ecx jmp 0x1ec7f19 movq %rcx, 0x20(%rsp) movl %edx, 0x18(%rsp) vmovaps %ymm5, 0x260(%rsp) vmovaps %ymm26, 0x280(%rsp) vmovaps %ymm25, 0x2a0(%rsp) vmovaps %ymm24, 0x2c0(%rsp) vmovaps %ymm23, 0x2e0(%rsp) movq %r11, 0x40(%rsp) movq %r10, 0x48(%rsp) vmovaps %ymm21, 0x300(%rsp) vmovaps %ymm20, 0x320(%rsp) vmovaps %ymm19, 0x340(%rsp) movq %r9, 0x50(%rsp) movq %rdi, 0x58(%rsp) movq %rsi, %rdi vmovss 0x1a0(%rsp,%rsi,4), %xmm0 vmovss 0x1c0(%rsp,%rsi,4), %xmm1 movq %r8, 0x60(%rsp) movq %rax, %rsi movq 0x8(%r8), %rax movl 0xe0(%rsp,%rdi,4), %ecx vmovss 0x200(%rsp,%rdi,4), %xmm2 vmovss 0x220(%rsp,%rdi,4), %xmm3 vmovss 0x240(%rsp,%rdi,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movq 0x28(%rsp), %rcx movl %ecx, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%r14), %xmm0 vmovss %xmm0, 0x14(%rsp) movq %rdi, 0x38(%rsp) vmovss 0x1e0(%rsp,%rdi,4), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0xffffffff, 0x1c(%rsp) # imm = 0xFFFFFFFF leaq 0x1c(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%rsi), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %r14, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq %rsi, 0x30(%rsp) movq 0x40(%rsi), %rax testq %rax, %rax je 0x1ec8169 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec8201 movq 0x20(%rsp), %rax movq 0x10(%rax), %rax testq %rax, %rax je 0x1ec81a6 movq 0x20(%rsp), %rcx testb $0x2, (%rcx) jne 0x1ec818c movq 0x30(%rsp), %rcx testb $0x40, 0x3e(%rcx) je 0x1ec8199 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec8201 movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ec820d vmovss 0x14(%rsp), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0x1, %eax movq 0x38(%rsp), %rcx movq %rcx, %rsi shlxl %ecx, %eax, %eax kmovd %eax, %k0 movzbl 0x18(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k1 vmovaps 0x260(%rsp), %ymm5 vcmpleps 0x20(%r14){1to8}, %ymm5, %k0 {%k1} kmovd %k0, %edx movb $0x1, %al movq 0x60(%rsp), %r8 movq 0x58(%rsp), %rdi movq 0x50(%rsp), %r9 vmovaps 0x340(%rsp), %ymm19 vmovaps 0x320(%rsp), %ymm20 vmovaps 0x300(%rsp), %ymm21 vbroadcastss 0x4a48c(%rip), %ymm22 # 0x1f12704 movq 0x48(%rsp), %r10 movq 0x40(%rsp), %r11 vmovaps 0x2e0(%rsp), %ymm23 vmovaps 0x2c0(%rsp), %ymm24 vmovaps 0x2a0(%rsp), %ymm25 vmovaps 0x280(%rsp), %ymm26 vpmovsxbd 0x957c4(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0x99306(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x58c00(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0x99356(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0x99354(%rip), %ymm31 # 0x1f61628 jmp 0x1ec7f7f vmovss 0x1a0(%rsp,%rsi,4), %xmm0 vmovss 0x1c0(%rsp,%rsi,4), %xmm1 vmovss 0x1e0(%rsp,%rsi,4), %xmm2 vmovss %xmm2, 0x20(%r14) vmovss 0x200(%rsp,%rsi,4), %xmm2 vmovss %xmm2, 0x30(%r14) vmovss 0x220(%rsp,%rsi,4), %xmm2 vmovss %xmm2, 0x34(%r14) vmovss 0x240(%rsp,%rsi,4), %xmm2 vmovss %xmm2, 0x38(%r14) vmovss %xmm0, 0x3c(%r14) vmovss %xmm1, 0x40(%r14) movl 0xe0(%rsp,%rsi,4), %eax movl %eax, 0x44(%r14) movq 0x28(%rsp), %rax movl %eax, 0x48(%r14) movq 0x8(%r8), %rax movl (%rax), %ecx movl %ecx, 0x4c(%r14) movl 0x4(%rax), %eax movl %eax, 0x50(%r14) jmp 0x1ec7da7 vbroadcastss 0x20(%r14), %ymm0 movq 0x70(%rsp), %rsi movq 0x68(%rsp), %r15 movq 0x8(%rsp), %r12 vmovdqa 0x360(%rsp), %ymm8 jmp 0x1ec774d
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, true, embree::avx512::ArrayIntersector1<embree::avx512::QuadMvIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx andq $-0x20, %rsp subq $0x27c0, %rsp # imm = 0x27C0 movq %rdx, 0x18(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ec8b1c movq 0x70(%rax), %rax movq %rax, 0x460(%rsp) movl $0x0, 0x468(%rsp) cmpq $0x8, %rax jne 0x1ec8b2e leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rsi, %r12 vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x58376(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vcmpltps 0x2848f(%rip){1to4}, %xmm4, %k1 # 0x1ef0fe8 leaq 0x470(%rsp), %rax vbroadcastss 0x23baa(%rip), %xmm4 # 0x1eec714 vdivps %xmm0, %xmm4, %xmm0 vbroadcastss 0x583e8(%rip), %xmm0 {%k1} # 0x1f20f60 vmulps 0x5738e(%rip){1to4}, %xmm0, %xmm4 # 0x1f1ff10 vmulps 0x57388(%rip){1to4}, %xmm0, %xmm0 # 0x1f1ff14 vbroadcastss (%rsi), %ymm8 vbroadcastss 0x4(%rsi), %ymm9 vbroadcastss 0x8(%rsi), %ymm10 xorl %ecx, %ecx vucomiss %xmm1, %xmm4 setb %cl vbroadcastss %xmm4, %ymm11 vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3] vbroadcastsd %xmm5, %ymm12 vbroadcastss 0x5831f(%rip), %ymm6 # 0x1f20edc vshufpd $0x1, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,0] vpermps %ymm4, %ymm6, %ymm13 vbroadcastss %xmm0, %ymm14 vbroadcastss 0x49b2f(%rip), %ymm4 # 0x1f12704 vpermps %ymm0, %ymm4, %ymm15 vpermps %ymm0, %ymm6, %ymm16 shll $0x5, %ecx xorl %esi, %esi vucomiss %xmm1, %xmm5 setb %sil shll $0x5, %esi orq $0x40, %rsi xorl %edi, %edi vucomiss %xmm1, %xmm7 setb %dil shll $0x5, %edi orq $0x80, %rdi movq %rcx, 0x98(%rsp) xorq $0x20, %rcx movq %rcx, 0x80(%rsp) movq %rsi, 0x90(%rsp) xorq $0x20, %rsi movq %rsi, 0x78(%rsp) movq %rdi, 0x88(%rsp) xorq $0x20, %rdi movq %rdi, 0x70(%rsp) vbroadcastss %xmm2, %ymm17 vbroadcastss %xmm3, %ymm0 vpmovsxbd 0x94e1d(%rip), %ymm18 # 0x1f5da70 vpbroadcastd 0x9895f(%rip), %ymm19 # 0x1f615bc movq %r12, 0x10(%rsp) vmovaps %ymm8, 0x240(%rsp) vmovaps %ymm9, 0x220(%rsp) vmovaps %ymm10, 0x200(%rsp) vmovaps %ymm11, 0x1e0(%rsp) vmovaps %ymm12, 0x1c0(%rsp) vmovaps %ymm13, 0x1a0(%rsp) vmovaps %ymm14, 0x180(%rsp) vmovaps %ymm15, 0x160(%rsp) vmovaps %ymm16, 0x140(%rsp) vmovaps %ymm17, 0x120(%rsp) vmovss 0x20(%r12), %xmm1 leaq 0x460(%rsp), %rcx cmpq %rcx, %rax je 0x1ec8b1c vmovss -0x8(%rax), %xmm2 addq $-0x10, %rax vucomiss %xmm1, %xmm2 ja 0x1ec8cc1 movq %rax, 0x8(%rsp) movq (%rax), %rdx testb $0x8, %dl jne 0x1ec8d9f movq 0x98(%rsp), %rax vmovaps 0x40(%rdx,%rax), %ymm1 vsubps %ymm8, %ymm1, %ymm1 vmulps %ymm1, %ymm11, %ymm1 movq 0x90(%rsp), %rax vmovaps 0x40(%rdx,%rax), %ymm2 vsubps %ymm9, %ymm2, %ymm2 vmulps %ymm2, %ymm12, %ymm2 vmaxps %ymm2, %ymm1, %ymm1 movq 0x88(%rsp), %rax vmovaps 0x40(%rdx,%rax), %ymm2 vsubps %ymm10, %ymm2, %ymm2 vmulps %ymm2, %ymm13, %ymm2 vmaxps %ymm17, %ymm2, %ymm2 vmaxps %ymm2, %ymm1, %ymm21 movq 0x80(%rsp), %rax vmovaps 0x40(%rdx,%rax), %ymm1 vsubps %ymm8, %ymm1, %ymm1 movq 0x78(%rsp), %rax vmovaps 0x40(%rdx,%rax), %ymm2 vmulps %ymm1, %ymm14, %ymm1 vsubps %ymm9, %ymm2, %ymm2 vmulps %ymm2, %ymm15, %ymm2 vminps %ymm2, %ymm1, %ymm1 movq 0x70(%rsp), %rax vmovaps 0x40(%rdx,%rax), %ymm2 vsubps %ymm10, %ymm2, %ymm2 vmulps %ymm2, %ymm16, %ymm2 vminps %ymm0, %ymm2, %ymm2 vminps %ymm2, %ymm1, %ymm1 vcmpleps %ymm1, %ymm21, %k0 kmovb %k0, %esi testb $0x8, %dl jne 0x1ec8e07 testq %rsi, %rsi je 0x1ec8e0e andq $-0x10, %rdx vmovdqu (%rdx), %ymm1 vmovdqu 0x20(%rdx), %ymm2 vmovdqa64 %ymm18, %ymm3 vpternlogd $0xf8, %ymm19, %ymm21, %ymm3 kmovd %esi, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) xorl %eax, %eax blsrq %rsi, %rcx jne 0x1ec8e15 testl %eax, %eax je 0x1ec8ce9 jmp 0x1ec91a0 movl $0x6, %eax jmp 0x1ec8dfa movl $0x4, %eax jmp 0x1ec8dfa vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ec8e82 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rdx vpermt2q %ymm2, %ymm4, %ymm1 movq 0x8(%rsp), %rcx vmovq %xmm1, (%rcx) vpermd %ymm21, %ymm4, %ymm1 vmovd %xmm1, 0x8(%rcx) addq $0x10, %rcx movq %rcx, 0x8(%rsp) jmp 0x1ec8dfa vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ec8f0e vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rdx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 movq 0x8(%rsp), %rcx vmovq %xmm3, (%rcx) vpermd %ymm21, %ymm7, %ymm3 vmovd %xmm3, 0x8(%rcx) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%rcx) vpermd %ymm21, %ymm5, %ymm1 vmovd %xmm1, 0x18(%rcx) addq $0x20, %rcx jmp 0x1ec8e78 movq %rsi, %rdi vmovdqa64 %ymm21, %ymm22 vmovdqa64 %ymm19, %ymm20 vmovdqa64 %ymm18, %ymm19 vmovaps %ymm17, %ymm18 vmovaps %ymm16, %ymm17 vmovaps %ymm15, %ymm16 vmovaps %ymm14, %ymm15 vmovaps %ymm13, %ymm14 vmovaps %ymm12, %ymm13 vmovaps %ymm11, %ymm12 vmovaps %ymm10, %ymm11 vmovaps %ymm9, %ymm10 vmovaps %ymm8, %ymm9 vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ec9060 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rdx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 movq 0x8(%rsp), %rcx vmovq %xmm3, (%rcx) vmovdqa64 %ymm22, %ymm21 vpermd %ymm22, %ymm7, %ymm3 vmovd %xmm3, 0x8(%rcx) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%rcx) vpermd %ymm22, %ymm6, %ymm3 vmovd %xmm3, 0x18(%rcx) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%rcx) vpermd %ymm22, %ymm4, %ymm1 vmovd %xmm1, 0x28(%rcx) addq $0x30, %rcx movq %rcx, 0x8(%rsp) movq 0x10(%rsp), %r12 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 vmovaps %ymm13, %ymm12 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 vmovaps %ymm16, %ymm15 vmovaps %ymm17, %ymm16 vmovaps %ymm18, %ymm17 vmovdqa64 %ymm19, %ymm18 vmovdqa64 %ymm20, %ymm19 movq %rdi, %rsi jmp 0x1ec8dfa valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x57e50(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x985a6(%rip), %ymm21 # 0x1f61620 vpermt2d %ymm8, %ymm21, %ymm3 vpmovsxbd 0x9859f(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x98592(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x49650(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ec90a7 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 movq 0x8(%rsp), %rdx vmovq %xmm4, (%rdx) vpermd %ymm22, %ymm3, %ymm4 vmovd %xmm4, 0x8(%rdx) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %rdx movq %rdx, 0x8(%rsp) vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ec9111 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rdx movq 0x10(%rsp), %r12 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 vmovaps %ymm12, %ymm11 vmovaps %ymm13, %ymm12 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 vmovaps %ymm16, %ymm15 vmovaps %ymm17, %ymm16 vmovaps %ymm18, %ymm17 vmovdqa64 %ymm19, %ymm18 vmovdqa64 %ymm20, %ymm19 vmovdqa64 %ymm22, %ymm21 jmp 0x1ec9058 cmpl $0x6, %eax jne 0x1ec9a31 vmovaps %ymm21, 0x260(%rsp) movl %edx, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x68(%rsp) je 0x1ec99ac andq $-0x10, %rdx xorl %ecx, %ecx movq %rsi, 0x60(%rsp) movq %rdx, 0x58(%rsp) imulq $0xe0, %rcx, %rax vbroadcasti128 0xd0(%rdx,%rax), %ymm0 # ymm0 = mem[0,1,0,1] vbroadcasti128 0xc0(%rdx,%rax), %ymm1 # ymm1 = mem[0,1,0,1] vmovdqa %ymm1, 0x280(%rsp) vmovdqa %ymm0, 0x100(%rsp) vmovaps (%rdx,%rax), %xmm0 vmovaps 0x10(%rdx,%rax), %xmm1 vmovaps 0x20(%rdx,%rax), %xmm2 vinsertf128 $0x1, 0x60(%rdx,%rax), %ymm0, %ymm0 vinsertf128 $0x1, 0x70(%rdx,%rax), %ymm1, %ymm1 vinsertf128 $0x1, 0x80(%rdx,%rax), %ymm2, %ymm2 vbroadcastf128 0x30(%rdx,%rax), %ymm3 # ymm3 = mem[0,1,0,1] vbroadcastf128 0x40(%rdx,%rax), %ymm10 # ymm10 = mem[0,1,0,1] vbroadcastf128 0x50(%rdx,%rax), %ymm11 # ymm11 = mem[0,1,0,1] vbroadcastf128 0x90(%rdx,%rax), %ymm12 # ymm12 = mem[0,1,0,1] vbroadcastf128 0xa0(%rdx,%rax), %ymm13 # ymm13 = mem[0,1,0,1] vbroadcastf128 0xb0(%rdx,%rax), %ymm14 # ymm14 = mem[0,1,0,1] vbroadcastss (%r12), %ymm15 vbroadcastss 0x4(%r12), %ymm16 vbroadcastss 0x8(%r12), %ymm17 vbroadcastss 0x10(%r12), %ymm6 vbroadcastss 0x14(%r12), %ymm7 vbroadcastss 0x18(%r12), %ymm9 vsubps %ymm15, %ymm0, %ymm4 vsubps %ymm16, %ymm1, %ymm5 vsubps %ymm17, %ymm2, %ymm8 vsubps %ymm15, %ymm3, %ymm19 vsubps %ymm16, %ymm10, %ymm20 vsubps %ymm17, %ymm11, %ymm21 vsubps %ymm15, %ymm12, %ymm22 vsubps %ymm16, %ymm13, %ymm16 vsubps %ymm17, %ymm14, %ymm14 vsubps %ymm4, %ymm22, %ymm11 vsubps %ymm5, %ymm16, %ymm13 vsubps %ymm8, %ymm14, %ymm12 vsubps %ymm19, %ymm4, %ymm15 vsubps %ymm20, %ymm5, %ymm18 vsubps %ymm21, %ymm8, %ymm17 vsubps %ymm22, %ymm19, %ymm1 vsubps %ymm16, %ymm20, %ymm0 vsubps %ymm14, %ymm21, %ymm2 vaddps %ymm4, %ymm22, %ymm3 vaddps %ymm5, %ymm16, %ymm10 vaddps %ymm8, %ymm14, %ymm23 vmulps %ymm12, %ymm10, %ymm24 vfmsub231ps %ymm23, %ymm13, %ymm24 # ymm24 = (ymm13 * ymm23) - ymm24 vmulps %ymm11, %ymm23, %ymm23 vfmsub231ps %ymm3, %ymm12, %ymm23 # ymm23 = (ymm12 * ymm3) - ymm23 vmulps %ymm3, %ymm13, %ymm3 vfmsub231ps %ymm10, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm10) - ymm3 vmulps %ymm3, %ymm9, %ymm3 vfmadd231ps %ymm23, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm23) + ymm3 vfmadd231ps %ymm24, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm24) + ymm3 vaddps %ymm19, %ymm4, %ymm10 vaddps %ymm20, %ymm5, %ymm23 vaddps %ymm21, %ymm8, %ymm24 vmulps %ymm17, %ymm23, %ymm25 vfmsub231ps %ymm24, %ymm18, %ymm25 # ymm25 = (ymm18 * ymm24) - ymm25 vmulps %ymm15, %ymm24, %ymm24 vfmsub231ps %ymm10, %ymm17, %ymm24 # ymm24 = (ymm17 * ymm10) - ymm24 vmulps %ymm18, %ymm10, %ymm10 vfmsub231ps %ymm23, %ymm15, %ymm10 # ymm10 = (ymm15 * ymm23) - ymm10 vmulps %ymm10, %ymm9, %ymm10 vfmadd231ps %ymm24, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm24) + ymm10 vfmadd231ps %ymm25, %ymm6, %ymm10 # ymm10 = (ymm6 * ymm25) + ymm10 vaddps %ymm22, %ymm19, %ymm19 vaddps %ymm16, %ymm20, %ymm16 vaddps %ymm14, %ymm21, %ymm14 vmulps %ymm2, %ymm16, %ymm20 vfmsub231ps %ymm14, %ymm0, %ymm20 # ymm20 = (ymm0 * ymm14) - ymm20 vmulps %ymm1, %ymm14, %ymm14 vfmsub231ps %ymm19, %ymm2, %ymm14 # ymm14 = (ymm2 * ymm19) - ymm14 vmulps %ymm0, %ymm19, %ymm19 vfmsub231ps %ymm16, %ymm1, %ymm19 # ymm19 = (ymm1 * ymm16) - ymm19 vmulps %ymm19, %ymm9, %ymm19 vfmadd231ps %ymm14, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm14) + ymm19 vfmadd231ps %ymm20, %ymm6, %ymm19 # ymm19 = (ymm6 * ymm20) + ymm19 vaddps %ymm3, %ymm10, %ymm14 vaddps %ymm14, %ymm19, %ymm14 vandps 0x57aea(%rip){1to8}, %ymm14, %ymm16 # 0x1f20ec4 vmulps 0x57ae8(%rip){1to8}, %ymm16, %ymm20 # 0x1f20ecc vminps %ymm10, %ymm3, %ymm21 vminps %ymm19, %ymm21, %ymm21 vxorps 0x57ac6(%rip){1to8}, %ymm20, %ymm22 # 0x1f20ec0 vcmpnltps %ymm22, %ymm21, %k0 vmaxps %ymm10, %ymm3, %ymm21 vmaxps %ymm19, %ymm21, %ymm19 vcmpleps %ymm20, %ymm19, %k1 korb %k1, %k0, %k0 kortestb %k0, %k0 je 0x1ec999e vmulps %ymm18, %ymm12, %ymm19 vmulps %ymm17, %ymm11, %ymm20 vmulps %ymm15, %ymm13, %ymm21 vmulps %ymm0, %ymm17, %ymm22 vmulps %ymm2, %ymm15, %ymm23 vmulps %ymm1, %ymm18, %ymm24 vfmsub213ps %ymm19, %ymm17, %ymm13 # ymm13 = (ymm17 * ymm13) - ymm19 vfmsub213ps %ymm20, %ymm15, %ymm12 # ymm12 = (ymm15 * ymm12) - ymm20 vfmsub213ps %ymm21, %ymm18, %ymm11 # ymm11 = (ymm18 * ymm11) - ymm21 vfmsub213ps %ymm22, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) - ymm22 vfmsub213ps %ymm23, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm1) - ymm23 vfmsub213ps %ymm24, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm0) - ymm24 vbroadcastss 0x57a50(%rip), %ymm18 # 0x1f20ec4 vandps %ymm18, %ymm19, %ymm15 vandps %ymm18, %ymm22, %ymm17 vcmpltps %ymm17, %ymm15, %k1 vandps %ymm18, %ymm20, %ymm15 vandps %ymm18, %ymm23, %ymm17 vcmpltps %ymm17, %ymm15, %k2 vandps %ymm18, %ymm21, %ymm15 vandps %ymm18, %ymm24, %ymm17 vcmpltps %ymm17, %ymm15, %k3 vmovaps %ymm13, %ymm2 {%k1} vmovaps %ymm12, %ymm1 {%k2} vmovaps %ymm11, %ymm0 {%k3} vmulps %ymm0, %ymm9, %ymm9 vfmadd213ps %ymm9, %ymm1, %ymm7 # ymm7 = (ymm1 * ymm7) + ymm9 vfmadd213ps %ymm7, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm6) + ymm7 vaddps %ymm6, %ymm6, %ymm6 vmulps %ymm0, %ymm8, %ymm7 vfmadd213ps %ymm7, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm7 vfmadd213ps %ymm5, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) + ymm5 vaddps %ymm4, %ymm4, %ymm4 vrcp14ps %ymm6, %ymm5 vmovaps %ymm5, %ymm7 vfnmadd213ps 0x2321d(%rip){1to8}, %ymm6, %ymm7 # ymm7 = -(ymm6 * ymm7) + mem vfmadd132ps %ymm5, %ymm5, %ymm7 # ymm7 = (ymm7 * ymm5) + ymm5 vmulps %ymm7, %ymm4, %ymm7 vcmpgeps 0xc(%r12){1to8}, %ymm7, %k1 vxorps 0x579ad(%rip){1to8}, %ymm6, %ymm4 # 0x1f20ec0 vcmpleps 0x20(%r12){1to8}, %ymm7, %k1 {%k1} vcmpneqps %ymm4, %ymm6, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ec999e movq %rcx, 0xa0(%rsp) kmovd %k1, %r15d vmovaps %ymm3, 0x2a0(%rsp) vmovaps %ymm10, 0x2c0(%rsp) vmovaps %ymm14, 0x2e0(%rsp) vmovaps %ymm2, 0x300(%rsp) vmovaps %ymm1, 0x320(%rsp) vmovaps %ymm0, 0x340(%rsp) kmovb %k1, 0x360(%rsp) vmovaps %ymm7, 0x3c0(%rsp) movb $-0x10, 0x440(%rsp) movq 0x18(%rsp), %rax movq (%rax), %rax movq %rax, 0xa8(%rsp) vcmpnltps 0x27a40(%rip){1to8}, %ymm16, %k2 # 0x1ef0fe8 vrcp14ps %ymm14, %ymm4 vbroadcastss 0x2315d(%rip), %ymm6 # 0x1eec714 vfnmadd213ps %ymm6, %ymm4, %ymm14 # ymm14 = -(ymm4 * ymm14) + ymm6 vfmadd132ps %ymm4, %ymm4, %ymm14 {%k2} {z} # ymm14 {%k2} {z} = (ymm14 * ymm4) + ymm4 vmulps %ymm3, %ymm14, %ymm3 vminps %ymm6, %ymm3, %ymm3 vmulps %ymm14, %ymm10, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm3, %ymm6, %ymm5 vsubps %ymm4, %ymm6, %ymm6 vblendps $0xf0, %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7] vmovaps %ymm4, 0x3a0(%rsp) vblendps $0xf0, %ymm6, %ymm3, %ymm3 # ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7] vmovaps %ymm3, 0x380(%rsp) vmovaps 0x9733f(%rip), %ymm3 # 0x1f60940 vmulps %ymm3, %ymm2, %ymm2 vmulps %ymm3, %ymm1, %ymm1 vmovaps %ymm2, 0x3e0(%rsp) vmulps %ymm3, %ymm0, %ymm0 vmovaps %ymm1, 0x400(%rsp) vmovaps %ymm0, 0x420(%rsp) vbroadcastss 0x223ef(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm7, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax movzbl %r15b, %ecx cmovnel %eax, %ecx tzcntl %ecx, %eax vmovaps %ymm7, 0xe0(%rsp) movzbl %al, %r14d movl 0x280(%rsp,%r14,4), %ebx movq 0xa8(%rsp), %rax movq 0x1e8(%rax), %rax movq (%rax,%rbx,8), %r13 movl 0x24(%r12), %eax testl %eax, 0x34(%r13) je 0x1ec96c5 movq 0x18(%rsp), %rax movq 0x10(%rax), %r12 cmpq $0x0, 0x10(%r12) jne 0x1ec971a cmpq $0x0, 0x40(%r13) jne 0x1ec971a xorl %eax, %eax movq 0x10(%rsp), %r12 jmp 0x1ec96e5 movl $0x1, %eax shlxl %r14d, %eax, %eax movzbl %r15b, %ecx kmovd %eax, %k0 kmovd %ecx, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %r15d movb $0x1, %al testb %al, %al je 0x1ec98f8 testb %r15b, %r15b je 0x1ec998c movzbl %r15b, %edi vmovaps %ymm7, %ymm0 leaq 0x28(%rsp), %rsi callq 0x631859 vmovaps 0xe0(%rsp), %ymm7 movb 0x28(%rsp), %al jmp 0x1ec967a leal (,%r14,4), %eax vmovss 0x380(%rsp,%rax), %xmm0 vmovss 0x3a0(%rsp,%rax), %xmm1 movq 0x18(%rsp), %rcx movq 0x8(%rcx), %rcx movl 0x100(%rsp,%rax), %edx vmovss 0x3e0(%rsp,%rax), %xmm2 vmovss 0x400(%rsp,%rax), %xmm3 vmovss 0x420(%rsp,%rax), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %edx, 0xc4(%rsp) movl %ebx, 0xc8(%rsp) movl (%rcx), %edx movl %edx, 0xcc(%rsp) movl 0x4(%rcx), %edx movl %edx, 0xd0(%rsp) movq 0x10(%rsp), %rdx vmovss 0x20(%rdx), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x3c0(%rsp,%rax), %xmm0 vmovss %xmm0, 0x20(%rdx) movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF leaq 0x24(%rsp), %rax movq %rax, 0x28(%rsp) movq 0x18(%r13), %rax movq %rax, 0x30(%rsp) movq %rcx, 0x38(%rsp) movq %rdx, 0x40(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0x48(%rsp) movl $0x1, 0x50(%rsp) movq 0x40(%r13), %rax testq %rax, %rax je 0x1ec982f leaq 0x28(%rsp), %rdi vzeroupper callq *%rax vmovaps 0xe0(%rsp), %ymm7 movq 0x28(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec98be movq 0x10(%r12), %rax testq %rax, %rax je 0x1ec9864 testb $0x2, (%r12) jne 0x1ec9847 testb $0x40, 0x3e(%r13) je 0x1ec985a leaq 0x28(%rsp), %rdi vzeroupper callq *%rax vmovaps 0xe0(%rsp), %ymm7 movq 0x28(%rsp), %rax cmpl $0x0, (%rax) je 0x1ec98be movq 0x40(%rsp), %rax movq 0x48(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) movq 0x10(%rsp), %r12 jmp 0x1ec98d0 movq 0x10(%rsp), %r12 vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%r12) movl $0x1, %eax shlxl %r14d, %eax, %eax movzbl %r15b, %ecx kmovd %eax, %k0 kmovd %ecx, %k1 kandnb %k1, %k0, %k1 vcmpleps 0x20(%r12){1to8}, %ymm7, %k0 {%k1} jmp 0x1ec96df shll $0x2, %r14d vmovss 0x380(%rsp,%r14), %xmm0 vmovss 0x3a0(%rsp,%r14), %xmm1 vmovss 0x3c0(%rsp,%r14), %xmm2 vmovss %xmm2, 0x20(%r12) vmovss 0x3e0(%rsp,%r14), %xmm2 vmovss %xmm2, 0x30(%r12) vmovss 0x400(%rsp,%r14), %xmm2 vmovss %xmm2, 0x34(%r12) vmovss 0x420(%rsp,%r14), %xmm2 vmovss %xmm2, 0x38(%r12) vmovss %xmm0, 0x3c(%r12) vmovss %xmm1, 0x40(%r12) movl 0x100(%rsp,%r14), %eax movl %eax, 0x44(%r12) movl %ebx, 0x48(%r12) movq 0x18(%rsp), %rax movq 0x8(%rax), %rax movl (%rax), %ecx movl %ecx, 0x4c(%r12) movl 0x4(%rax), %eax movl %eax, 0x50(%r12) movq 0x60(%rsp), %rsi movq 0x58(%rsp), %rdx movq 0xa0(%rsp), %rcx incq %rcx cmpq 0x68(%rsp), %rcx jne 0x1ec91d5 vbroadcastss 0x20(%r12), %ymm0 movq 0x8(%rsp), %rax vmovaps 0x240(%rsp), %ymm8 vmovaps 0x220(%rsp), %ymm9 vmovaps 0x200(%rsp), %ymm10 vmovaps 0x1e0(%rsp), %ymm11 vmovaps 0x1c0(%rsp), %ymm12 vmovaps 0x1a0(%rsp), %ymm13 vmovaps 0x180(%rsp), %ymm14 vmovaps 0x160(%rsp), %ymm15 vmovaps 0x140(%rsp), %ymm16 vmovaps 0x120(%rsp), %ymm17 vpmovsxbd 0x94056(%rip), %ymm18 # 0x1f5da70 vpbroadcastd 0x97b98(%rip), %ymm19 # 0x1f615bc vmovaps 0x260(%rsp), %ymm21 jmp 0x1ec8cba movq 0x8(%rsp), %rax jmp 0x1ec8cba nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, true, embree::avx512::ArrayIntersector1<embree::avx512::QuadMvIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ec9a4d xorl %eax, %eax jmp 0x1eca1b6 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %rbp movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ec9a8c vmovss 0x10(%rbp), %xmm0 vmulss %xmm0, %xmm0, %xmm9 jmp 0x1ec9a97 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm9 vbroadcastss (%rbp), %ymm6 vbroadcastss 0x4(%rbp), %ymm5 vbroadcastss 0x8(%rbp), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 leaq 0x1b0(%rsp), %r8 movl $0x0, 0xc(%rsp) vpmovsxbd 0x93f93(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x97ad6(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups %ymm6, 0x30(%rsp) vmovups %ymm5, 0x10(%rsp) vsubps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x140(%rsp) vaddps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x120(%rsp) vsubps %ymm1, %ymm5, %ymm3 vmovups %ymm3, 0x100(%rsp) vaddps %ymm1, %ymm5, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x80(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm10 vmovaps %xmm9, 0x70(%rsp) vmovups %ymm10, 0x160(%rsp) cmpq %r9, %r8 je 0x1eca1a1 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm9, %xmm0 ja 0x1ec9b64 movq (%r8), %r12 cmpl $0x1, 0x18(%rbx) jne 0x1ec9c89 testb $0x8, %r12b jne 0x1ec9c0a vmovaps 0x40(%r12), %ymm0 vmovaps 0x60(%r12), %ymm1 vmaxps %ymm0, %ymm6, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm6, %ymm2, %ymm2 vmaxps 0x80(%r12), %ymm5, %ymm3 vminps 0xa0(%r12), %ymm3, %ymm3 vmovups 0x80(%rsp), %ymm7 vmaxps 0xc0(%r12), %ymm7, %ymm4 vsubps %ymm5, %ymm3, %ymm3 vminps 0xe0(%r12), %ymm4, %ymm4 vsubps %ymm7, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm10, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %edi testb $0x8, %r12b jne 0x1ec9d99 testq %rdi, %rdi je 0x1ec9da3 andq $-0x10, %r12 vmovdqu (%r12), %ymm0 vmovdqu 0x20(%r12), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ec9dad testl %eax, %eax je 0x1ec9b81 jmp 0x1eca072 testb $0x8, %r12b jne 0x1ec9c0a vmovaps 0xc0(%r12), %ymm0 vmovaps 0x40(%r12), %ymm1 vmovaps 0x60(%r12), %ymm2 vmovaps 0x80(%r12), %ymm3 vmovaps 0xa0(%r12), %ymm4 vmovaps 0xe0(%r12), %ymm5 vmovups 0x30(%rsp), %ymm6 vmaxps %ymm1, %ymm6, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps 0x30(%rsp), %ymm6, %ymm6 vmovups 0x10(%rsp), %ymm7 vmaxps %ymm3, %ymm7, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps 0x10(%rsp), %ymm7, %ymm7 vmovaps %ymm10, %ymm13 vmovups 0x80(%rsp), %ymm10 vmaxps %ymm0, %ymm10, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm10, %ymm8, %ymm8 vmovaps %ymm13, %ymm10 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vmovups 0x30(%rsp), %ymm6 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm5, %k3 vmovups 0x10(%rsp), %ymm5 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ec9c0a movl $0x6, %eax jmp 0x1ec9c7c movl $0x4, %eax jmp 0x1ec9c7c vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ec9e11 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r12 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ec9c7c vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ec9ea6 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 jmp 0x1ec9c7c movq %rdi, %r10 vmovaps %ymm10, %ymm13 vmovdqa %ymm7, %ymm10 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ec9f78 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm10, %ymm7 vpermd %ymm10, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps %ymm13, %ymm10 movq %r10, %rdi jmp 0x1ec9c7c valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x56f38(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x9768f(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x97688(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x9767b(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x48739(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ec9fbe popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1eca028 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r12 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovdqa %ymm10, %ymm7 jmp 0x1ec9f6b cmpl $0x6, %eax jne 0x1ec9b64 movl %r12d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x60(%rsp) je 0x1ec9b64 movq %rdi, 0x50(%rsp) vmovups %ymm7, 0x180(%rsp) movq %r8, 0x58(%rsp) andq $-0x10, %r12 addq $0xd0, %r12 xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x68(%rsp) movq $-0x4, %r14 xorl %r13d, %r13d movl (%r12,%r14,4), %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmpq %rcx, %rax je 0x1eca0ff movq (%rbx), %rcx movq 0x1e8(%rcx), %rcx movq (%rcx,%rax,8), %rdi movl %eax, 0x44(%rbx) movl 0x10(%r12,%r14,4), %eax movl %eax, 0x40(%rbx) movq %rbp, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b incq %r14 jne 0x1eca0c2 movq 0x68(%rsp), %rax orb %r13b, %al incq %r15 addq $0xe0, %r12 cmpq 0x60(%rsp), %r15 jne 0x1eca0b3 testb $0x1, %al vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps 0x70(%rsp), %xmm9 movq 0x58(%rsp), %r8 vpmovsxbd 0x93936(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x97479(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x180(%rsp), %ymm7 vmovups 0x160(%rsp), %ymm10 movq 0x50(%rsp), %rdi je 0x1ec9b64 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1eca18b vmovss 0x10(%rbp), %xmm3 vmulss %xmm3, %xmm3, %xmm9 jmp 0x1eca196 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm9 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ec9afa movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, true, embree::avx512::ArrayIntersector1<embree::avx512::QuadMiIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx andq $-0x20, %rsp subq $0x27c0, %rsp # imm = 0x27C0 movq %rdx, 0x18(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1eca200 movq 0x70(%rax), %rax movq %rax, 0x460(%rsp) movl $0x0, 0x468(%rsp) cmpq $0x8, %rax jne 0x1eca212 leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rsi, %r13 vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x56c92(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vcmpltps 0x26dab(%rip){1to4}, %xmm4, %k1 # 0x1ef0fe8 leaq 0x470(%rsp), %rsi vbroadcastss 0x224c6(%rip), %xmm4 # 0x1eec714 vdivps %xmm0, %xmm4, %xmm0 vbroadcastss 0x56d04(%rip), %xmm0 {%k1} # 0x1f20f60 vmulps 0x55caa(%rip){1to4}, %xmm0, %xmm4 # 0x1f1ff10 vmulps 0x55ca4(%rip){1to4}, %xmm0, %xmm0 # 0x1f1ff14 vbroadcastss (%r13), %ymm8 vbroadcastss 0x4(%r13), %ymm9 vbroadcastss 0x8(%r13), %ymm10 xorl %edx, %edx vucomiss %xmm1, %xmm4 setb %dl vbroadcastss %xmm4, %ymm11 vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3] vbroadcastsd %xmm5, %ymm12 vbroadcastss 0x56c3a(%rip), %ymm6 # 0x1f20edc vshufpd $0x1, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,0] vpermps %ymm4, %ymm6, %ymm13 vbroadcastss %xmm0, %ymm14 vbroadcastss 0x4844a(%rip), %ymm4 # 0x1f12704 vpermps %ymm0, %ymm4, %ymm15 vpermps %ymm0, %ymm6, %ymm16 shll $0x5, %edx xorl %ecx, %ecx vucomiss %xmm1, %xmm5 setb %cl shll $0x5, %ecx orq $0x40, %rcx xorl %edi, %edi vucomiss %xmm1, %xmm7 setb %dil shll $0x5, %edi orq $0x80, %rdi movq %rdx, %rax xorq $0x20, %rax movq %rax, 0x90(%rsp) movq %rcx, 0xa0(%rsp) xorq $0x20, %rcx movq %rcx, 0x88(%rsp) movq %rdi, 0x98(%rsp) xorq $0x20, %rdi movq %rdi, 0x80(%rsp) vbroadcastss %xmm2, %ymm17 vbroadcastss %xmm3, %ymm0 vpmovsxbd 0x93738(%rip), %ymm18 # 0x1f5da70 vpbroadcastd 0x9727a(%rip), %ymm19 # 0x1f615bc movq %r13, 0x8(%rsp) movq %rdx, 0x10(%rsp) vmovaps %ymm8, 0x240(%rsp) vmovaps %ymm9, 0x220(%rsp) vmovaps %ymm10, 0x200(%rsp) vmovaps %ymm11, 0x1e0(%rsp) vmovaps %ymm12, 0x1c0(%rsp) vmovaps %ymm13, 0x1a0(%rsp) vmovaps %ymm14, 0x180(%rsp) vmovaps %ymm15, 0x160(%rsp) vmovaps %ymm16, 0x140(%rsp) vmovaps %ymm17, 0x120(%rsp) vmovss 0x20(%r13), %xmm1 leaq 0x460(%rsp), %rax cmpq %rax, %rsi je 0x1eca200 vmovss -0x8(%rsi), %xmm2 addq $-0x10, %rsi vucomiss %xmm1, %xmm2 ja 0x1eca3aa movq (%rsi), %r9 testb $0x8, %r9b jne 0x1eca488 vmovaps 0x40(%r9,%rdx), %ymm1 vsubps %ymm8, %ymm1, %ymm1 vmulps %ymm1, %ymm11, %ymm1 movq 0xa0(%rsp), %rax vmovaps 0x40(%r9,%rax), %ymm2 vsubps %ymm9, %ymm2, %ymm2 vmulps %ymm2, %ymm12, %ymm2 vmaxps %ymm2, %ymm1, %ymm1 movq 0x98(%rsp), %rax vmovaps 0x40(%r9,%rax), %ymm2 vsubps %ymm10, %ymm2, %ymm2 vmulps %ymm2, %ymm13, %ymm2 vmaxps %ymm17, %ymm2, %ymm2 vmaxps %ymm2, %ymm1, %ymm21 movq 0x90(%rsp), %rax vmovaps 0x40(%r9,%rax), %ymm1 vsubps %ymm8, %ymm1, %ymm1 movq 0x88(%rsp), %rax vmovaps 0x40(%r9,%rax), %ymm2 vmulps %ymm1, %ymm14, %ymm1 vsubps %ymm9, %ymm2, %ymm2 vmulps %ymm2, %ymm15, %ymm2 vminps %ymm2, %ymm1, %ymm1 movq 0x80(%rsp), %rax vmovaps 0x40(%r9,%rax), %ymm2 vsubps %ymm10, %ymm2, %ymm2 vmulps %ymm2, %ymm16, %ymm2 vminps %ymm0, %ymm2, %ymm2 vminps %ymm2, %ymm1, %ymm1 vcmpleps %ymm1, %ymm21, %k0 kmovb %k0, %edi testb $0x8, %r9b jne 0x1eca4f7 testq %rdi, %rdi je 0x1eca4fe andq $-0x10, %r9 vmovdqu (%r9), %ymm1 vmovdqu 0x20(%r9), %ymm2 vmovdqa64 %ymm18, %ymm3 vpternlogd $0xf8, %ymm19, %ymm21, %ymm3 kmovd %edi, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %r9 prefetcht0 (%r9) prefetcht0 0x40(%r9) prefetcht0 0x80(%r9) prefetcht0 0xc0(%r9) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1eca505 testl %eax, %eax je 0x1eca3cd jmp 0x1eca882 movl $0x6, %eax jmp 0x1eca4ea movl $0x4, %eax jmp 0x1eca4ea vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1eca56a vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %r9 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%rsi) vpermd %ymm21, %ymm4, %ymm1 vmovd %xmm1, 0x8(%rsi) addq $0x10, %rsi movq 0x10(%rsp), %rdx jmp 0x1eca4ea vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1eca5f1 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %r9 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%rsi) vpermd %ymm21, %ymm7, %ymm3 vmovd %xmm3, 0x8(%rsi) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%rsi) vpermd %ymm21, %ymm5, %ymm1 vmovd %xmm1, 0x18(%rsi) addq $0x20, %rsi jmp 0x1eca563 movq %rdi, %r8 vmovdqa64 %ymm21, %ymm22 vmovdqa64 %ymm19, %ymm20 vmovdqa64 %ymm18, %ymm19 vmovaps %ymm17, %ymm18 vmovaps %ymm16, %ymm17 vmovaps %ymm15, %ymm16 vmovaps %ymm14, %ymm15 vmovaps %ymm13, %ymm14 vmovaps %ymm12, %ymm13 vmovaps %ymm11, %ymm12 vmovaps %ymm10, %ymm11 vmovaps %ymm9, %ymm10 vmovaps %ymm8, %ymm9 movq %rsi, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1eca744 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %r9 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 movq %rdi, %rsi vmovq %xmm3, (%rdi) vmovdqa64 %ymm22, %ymm21 vpermd %ymm22, %ymm7, %ymm3 vmovd %xmm3, 0x8(%rdi) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%rdi) vpermd %ymm22, %ymm6, %ymm3 vmovd %xmm3, 0x18(%rdi) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%rdi) vpermd %ymm22, %ymm4, %ymm1 vmovd %xmm1, 0x28(%rdi) addq $0x30, %rsi movq 0x8(%rsp), %r13 vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 movq 0x10(%rsp), %rdx vmovaps %ymm12, %ymm11 vmovaps %ymm13, %ymm12 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 vmovaps %ymm16, %ymm15 vmovaps %ymm17, %ymm16 vmovaps %ymm18, %ymm17 vmovdqa64 %ymm19, %ymm18 vmovdqa64 %ymm20, %ymm19 movq %r8, %rdi jmp 0x1eca4ea valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x5676c(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x96ec2(%rip), %ymm21 # 0x1f61620 vpermt2d %ymm8, %ymm21, %ymm3 vpmovsxbd 0x96ebb(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x96eae(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x47f6c(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1eca78b popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%rdi) vpermd %ymm22, %ymm3, %ymm4 vmovd %xmm4, 0x8(%rdi) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %rdi vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1eca7f5 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %r9 movq 0x8(%rsp), %r13 movq %rdi, %rsi vmovaps %ymm9, %ymm8 vmovaps %ymm10, %ymm9 vmovaps %ymm11, %ymm10 movq 0x10(%rsp), %rdx vmovaps %ymm12, %ymm11 vmovaps %ymm13, %ymm12 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 vmovaps %ymm16, %ymm15 vmovaps %ymm17, %ymm16 vmovaps %ymm18, %ymm17 vmovdqa64 %ymm19, %ymm18 vmovdqa64 %ymm20, %ymm19 vmovdqa64 %ymm22, %ymm21 jmp 0x1eca73c cmpl $0x6, %eax jne 0x1eca3a4 vmovaps %ymm21, 0x260(%rsp) movq %rsi, 0x60(%rsp) movl %r9d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x78(%rsp) je 0x1ecb178 andq $-0x10, %r9 movq 0x18(%rsp), %rax movq (%rax), %rax movq %rax, 0x28(%rsp) xorl %r8d, %r8d movq %rdi, 0x70(%rsp) movq %r9, 0x68(%rsp) leaq (%r8,%r8,2), %rax shlq $0x5, %rax prefetcht0 (%r9,%rax) prefetcht0 0x40(%r9,%rax) movq 0x28(%rsp), %rcx movq 0x228(%rcx), %rcx movl 0x48(%r9,%rax), %edx movq (%rcx,%rdx,8), %rdx movl 0x8(%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm2 movl 0x18(%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm1 movl 0x38(%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm0 movl 0x28(%r9,%rax), %esi vinsertf128 $0x1, (%rdx,%rsi,4), %ymm2, %ymm2 movl 0x40(%r9,%rax), %edx movq (%rcx,%rdx,8), %rdx movl (%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm3 movl 0x10(%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm4 movl 0x30(%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm5 movl 0x20(%r9,%rax), %esi vinsertf128 $0x1, (%rdx,%rsi,4), %ymm3, %ymm3 movl 0x4c(%r9,%rax), %edx movq (%rcx,%rdx,8), %rdx movl 0xc(%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm6 movl 0x1c(%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm7 movl 0x3c(%r9,%rax), %esi vmovups (%rdx,%rsi,4), %xmm8 movl 0x2c(%r9,%rax), %esi vinsertf128 $0x1, (%rdx,%rsi,4), %ymm6, %ymm6 movl 0x44(%r9,%rax), %edx movq (%rcx,%rdx,8), %rcx movl 0x4(%r9,%rax), %edx vmovups (%rcx,%rdx,4), %xmm9 movl 0x14(%r9,%rax), %edx vmovups (%rcx,%rdx,4), %xmm10 movl 0x34(%r9,%rax), %edx vmovups (%rcx,%rdx,4), %xmm11 movl 0x24(%r9,%rax), %edx vinsertf128 $0x1, (%rcx,%rdx,4), %ymm9, %ymm9 vbroadcasti128 0x40(%r9,%rax), %ymm12 # ymm12 = mem[0,1,0,1] vmovdqa %ymm12, 0x280(%rsp) vbroadcasti128 0x50(%r9,%rax), %ymm12 # ymm12 = mem[0,1,0,1] vunpcklps %xmm1, %xmm4, %xmm13 # xmm13 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] vunpckhps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[2],xmm1[2],xmm4[3],xmm1[3] vunpcklps %xmm7, %xmm10, %xmm4 # xmm4 = xmm10[0],xmm7[0],xmm10[1],xmm7[1] vunpckhps %xmm7, %xmm10, %xmm7 # xmm7 = xmm10[2],xmm7[2],xmm10[3],xmm7[3] vunpcklps %xmm7, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] vunpcklps %xmm4, %xmm13, %xmm7 # xmm7 = xmm13[0],xmm4[0],xmm13[1],xmm4[1] vunpckhps %xmm4, %xmm13, %xmm4 # xmm4 = xmm13[2],xmm4[2],xmm13[3],xmm4[3] vunpcklps %xmm0, %xmm5, %xmm10 # xmm10 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] vunpckhps %xmm0, %xmm5, %xmm0 # xmm0 = xmm5[2],xmm0[2],xmm5[3],xmm0[3] vunpcklps %xmm8, %xmm11, %xmm5 # xmm5 = xmm11[0],xmm8[0],xmm11[1],xmm8[1] vunpckhps %xmm8, %xmm11, %xmm8 # xmm8 = xmm11[2],xmm8[2],xmm11[3],xmm8[3] vunpcklps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1] vunpcklps %xmm5, %xmm10, %xmm8 # xmm8 = xmm10[0],xmm5[0],xmm10[1],xmm5[1] vunpckhps %xmm5, %xmm10, %xmm5 # xmm5 = xmm10[2],xmm5[2],xmm10[3],xmm5[3] vmovdqa %ymm12, 0x100(%rsp) vunpcklps %ymm6, %ymm9, %ymm10 # ymm10 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[4],ymm6[4],ymm9[5],ymm6[5] vunpcklps %ymm2, %ymm3, %ymm11 # ymm11 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5] vunpcklps %ymm10, %ymm11, %ymm12 # ymm12 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[4],ymm10[4],ymm11[5],ymm10[5] vunpckhps %ymm10, %ymm11, %ymm10 # ymm10 = ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[6],ymm10[6],ymm11[7],ymm10[7] vunpckhps %ymm6, %ymm9, %ymm6 # ymm6 = ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[6],ymm6[6],ymm9[7],ymm6[7] vunpckhps %ymm2, %ymm3, %ymm2 # ymm2 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7] vunpcklps %ymm6, %ymm2, %ymm2 # ymm2 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[4],ymm6[4],ymm2[5],ymm6[5] vinsertf128 $0x1, %xmm7, %ymm7, %ymm3 vinsertf128 $0x1, %xmm4, %ymm4, %ymm11 vinsertf128 $0x1, %xmm1, %ymm1, %ymm1 vinsertf128 $0x1, %xmm8, %ymm8, %ymm13 vinsertf128 $0x1, %xmm5, %ymm5, %ymm14 vbroadcastss (%r13), %ymm15 vbroadcastss 0x4(%r13), %ymm16 vbroadcastss 0x8(%r13), %ymm17 vbroadcastss 0x10(%r13), %ymm5 vinsertf128 $0x1, %xmm0, %ymm0, %ymm0 vbroadcastss 0x14(%r13), %ymm7 vbroadcastss 0x18(%r13), %ymm9 vsubps %ymm15, %ymm12, %ymm4 vsubps %ymm16, %ymm10, %ymm6 vsubps %ymm17, %ymm2, %ymm8 vsubps %ymm15, %ymm3, %ymm19 vsubps %ymm16, %ymm11, %ymm20 vsubps %ymm17, %ymm1, %ymm21 vsubps %ymm15, %ymm13, %ymm22 vsubps %ymm16, %ymm14, %ymm14 vsubps %ymm17, %ymm0, %ymm17 vsubps %ymm4, %ymm22, %ymm11 vsubps %ymm6, %ymm14, %ymm13 vsubps %ymm8, %ymm17, %ymm12 vsubps %ymm19, %ymm4, %ymm15 vsubps %ymm20, %ymm6, %ymm18 vsubps %ymm21, %ymm8, %ymm16 vsubps %ymm22, %ymm19, %ymm0 vsubps %ymm14, %ymm20, %ymm1 vsubps %ymm17, %ymm21, %ymm2 vaddps %ymm4, %ymm22, %ymm3 vaddps %ymm6, %ymm14, %ymm10 vaddps %ymm8, %ymm17, %ymm23 vmulps %ymm12, %ymm10, %ymm24 vfmsub231ps %ymm23, %ymm13, %ymm24 # ymm24 = (ymm13 * ymm23) - ymm24 vmulps %ymm11, %ymm23, %ymm23 vfmsub231ps %ymm3, %ymm12, %ymm23 # ymm23 = (ymm12 * ymm3) - ymm23 vmulps %ymm3, %ymm13, %ymm3 vfmsub231ps %ymm10, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm10) - ymm3 vmulps %ymm3, %ymm9, %ymm3 vfmadd231ps %ymm23, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm23) + ymm3 vfmadd231ps %ymm24, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm24) + ymm3 vaddps %ymm19, %ymm4, %ymm10 vaddps %ymm20, %ymm6, %ymm23 vaddps %ymm21, %ymm8, %ymm24 vmulps %ymm16, %ymm23, %ymm25 vfmsub231ps %ymm24, %ymm18, %ymm25 # ymm25 = (ymm18 * ymm24) - ymm25 vmulps %ymm15, %ymm24, %ymm24 vfmsub231ps %ymm10, %ymm16, %ymm24 # ymm24 = (ymm16 * ymm10) - ymm24 vmulps %ymm18, %ymm10, %ymm10 vfmsub231ps %ymm23, %ymm15, %ymm10 # ymm10 = (ymm15 * ymm23) - ymm10 vmulps %ymm10, %ymm9, %ymm10 vfmadd231ps %ymm24, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm24) + ymm10 vfmadd231ps %ymm25, %ymm5, %ymm10 # ymm10 = (ymm5 * ymm25) + ymm10 vaddps %ymm22, %ymm19, %ymm19 vaddps %ymm14, %ymm20, %ymm14 vaddps %ymm17, %ymm21, %ymm17 vmulps %ymm2, %ymm14, %ymm20 vfmsub231ps %ymm17, %ymm1, %ymm20 # ymm20 = (ymm1 * ymm17) - ymm20 vmulps %ymm0, %ymm17, %ymm17 vfmsub231ps %ymm19, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm19) - ymm17 vmulps %ymm1, %ymm19, %ymm19 vfmsub231ps %ymm14, %ymm0, %ymm19 # ymm19 = (ymm0 * ymm14) - ymm19 vmulps %ymm19, %ymm9, %ymm19 vfmadd231ps %ymm17, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm17) + ymm19 vfmadd231ps %ymm20, %ymm5, %ymm19 # ymm19 = (ymm5 * ymm20) + ymm19 vaddps %ymm3, %ymm10, %ymm14 vaddps %ymm14, %ymm19, %ymm14 vandps 0x562ff(%rip){1to8}, %ymm14, %ymm17 # 0x1f20ec4 vmulps 0x562fd(%rip){1to8}, %ymm17, %ymm20 # 0x1f20ecc vminps %ymm10, %ymm3, %ymm21 vminps %ymm19, %ymm21, %ymm21 vxorps 0x562db(%rip){1to8}, %ymm20, %ymm22 # 0x1f20ec0 vcmpnltps %ymm22, %ymm21, %k0 vmaxps %ymm10, %ymm3, %ymm21 vmaxps %ymm19, %ymm21, %ymm19 vcmpleps %ymm20, %ymm19, %k1 korb %k1, %k0, %k0 kortestb %k0, %k0 je 0x1ecb16a vmulps %ymm18, %ymm12, %ymm19 vmulps %ymm16, %ymm11, %ymm20 vmulps %ymm15, %ymm13, %ymm21 vmulps %ymm1, %ymm16, %ymm22 vmulps %ymm2, %ymm15, %ymm23 vmulps %ymm0, %ymm18, %ymm24 vfmsub213ps %ymm19, %ymm16, %ymm13 # ymm13 = (ymm16 * ymm13) - ymm19 vfmsub213ps %ymm20, %ymm15, %ymm12 # ymm12 = (ymm15 * ymm12) - ymm20 vfmsub213ps %ymm21, %ymm18, %ymm11 # ymm11 = (ymm18 * ymm11) - ymm21 vfmsub213ps %ymm22, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) - ymm22 vfmsub213ps %ymm23, %ymm16, %ymm0 # ymm0 = (ymm16 * ymm0) - ymm23 vfmsub213ps %ymm24, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm1) - ymm24 vbroadcastss 0x56265(%rip), %ymm18 # 0x1f20ec4 vandps %ymm18, %ymm19, %ymm15 vandps %ymm18, %ymm22, %ymm16 vcmpltps %ymm16, %ymm15, %k1 vandps %ymm18, %ymm20, %ymm15 vandps %ymm18, %ymm23, %ymm16 vcmpltps %ymm16, %ymm15, %k2 vandps %ymm18, %ymm21, %ymm15 vandps %ymm18, %ymm24, %ymm16 vcmpltps %ymm16, %ymm15, %k3 vmovaps %ymm13, %ymm2 {%k1} vmovaps %ymm12, %ymm0 {%k2} vmovaps %ymm11, %ymm1 {%k3} vmulps %ymm1, %ymm9, %ymm9 vfmadd213ps %ymm9, %ymm0, %ymm7 # ymm7 = (ymm0 * ymm7) + ymm9 vfmadd213ps %ymm7, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm7 vaddps %ymm5, %ymm5, %ymm5 vmulps %ymm1, %ymm8, %ymm7 vfmadd213ps %ymm7, %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + ymm7 vfmadd213ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) + ymm6 vaddps %ymm4, %ymm4, %ymm4 vrcp14ps %ymm5, %ymm6 vmovaps %ymm6, %ymm7 vfnmadd213ps 0x21a32(%rip){1to8}, %ymm5, %ymm7 # ymm7 = -(ymm5 * ymm7) + mem vfmadd132ps %ymm6, %ymm6, %ymm7 # ymm7 = (ymm7 * ymm6) + ymm6 vmulps %ymm7, %ymm4, %ymm7 vcmpgeps 0xc(%r13){1to8}, %ymm7, %k1 vxorps 0x561c3(%rip){1to8}, %ymm5, %ymm4 # 0x1f20ec0 vcmpleps 0x20(%r13){1to8}, %ymm7, %k1 {%k1} vcmpneqps %ymm4, %ymm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ecb16a movq %r8, 0xa8(%rsp) kmovd %k1, %r15d vmovaps %ymm3, 0x2a0(%rsp) vmovaps %ymm10, 0x2c0(%rsp) vmovaps %ymm14, 0x2e0(%rsp) vmovaps %ymm2, 0x300(%rsp) vmovaps %ymm0, 0x320(%rsp) vmovaps %ymm1, 0x340(%rsp) kmovb %k1, 0x360(%rsp) vmovaps %ymm7, 0x3c0(%rsp) movb $-0x10, 0x440(%rsp) vcmpnltps 0x26267(%rip){1to8}, %ymm17, %k2 # 0x1ef0fe8 vrcp14ps %ymm14, %ymm4 vbroadcastss 0x21984(%rip), %ymm6 # 0x1eec714 vfnmadd213ps %ymm6, %ymm4, %ymm14 # ymm14 = -(ymm4 * ymm14) + ymm6 vfmadd132ps %ymm4, %ymm4, %ymm14 {%k2} {z} # ymm14 {%k2} {z} = (ymm14 * ymm4) + ymm4 vmulps %ymm3, %ymm14, %ymm3 vminps %ymm6, %ymm3, %ymm3 vmulps %ymm14, %ymm10, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm3, %ymm6, %ymm5 vsubps %ymm4, %ymm6, %ymm6 vblendps $0xf0, %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7] vmovaps %ymm4, 0x3a0(%rsp) vblendps $0xf0, %ymm6, %ymm3, %ymm3 # ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7] vmovaps %ymm3, 0x380(%rsp) vmovaps 0x95b66(%rip), %ymm3 # 0x1f60940 vmulps %ymm3, %ymm2, %ymm2 vmulps %ymm3, %ymm0, %ymm0 vmulps %ymm3, %ymm1, %ymm1 vmovaps %ymm2, 0x3e0(%rsp) vmovaps %ymm0, 0x400(%rsp) vmovaps %ymm1, 0x420(%rsp) vbroadcastss 0x20c16(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm7, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %ecx movzbl %r15b, %eax cmovnel %ecx, %eax vmovaps %ymm7, 0xe0(%rsp) movzbl %al, %eax tzcntl %eax, %r14d movl 0x280(%rsp,%r14,4), %ebx movq 0x28(%rsp), %rax movq 0x1e8(%rax), %rax movq (%rax,%rbx,8), %r12 movl 0x24(%r13), %eax testl %eax, 0x34(%r12) je 0x1ecae9b movq 0x18(%rsp), %rax movq 0x10(%rax), %r13 cmpq $0x0, 0x10(%r13) jne 0x1ecaef0 cmpq $0x0, 0x40(%r12) jne 0x1ecaef0 xorl %eax, %eax movq 0x8(%rsp), %r13 jmp 0x1ecaebb movl $0x1, %eax shlxl %r14d, %eax, %eax movzbl %r15b, %ecx kmovd %eax, %k0 kmovd %ecx, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %r15d movb $0x1, %al testb %al, %al je 0x1ecb0ce testb %r15b, %r15b je 0x1ecb158 movzbl %r15b, %edi vmovaps %ymm7, %ymm0 leaq 0x30(%rsp), %rsi callq 0x6318aa vmovaps 0xe0(%rsp), %ymm7 movb 0x30(%rsp), %al jmp 0x1ecae4f leal (,%r14,4), %eax vmovss 0x380(%rsp,%rax), %xmm0 vmovss 0x3a0(%rsp,%rax), %xmm1 movq 0x18(%rsp), %rcx movq 0x8(%rcx), %rcx movl 0x100(%rsp,%rax), %edx vmovss 0x3e0(%rsp,%rax), %xmm2 vmovss 0x400(%rsp,%rax), %xmm3 vmovss 0x420(%rsp,%rax), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %edx, 0xc4(%rsp) movl %ebx, 0xc8(%rsp) movl (%rcx), %edx movl %edx, 0xcc(%rsp) movl 0x4(%rcx), %edx movl %edx, 0xd0(%rsp) movq 0x8(%rsp), %rdx vmovss 0x20(%rdx), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x3c0(%rsp,%rax), %xmm0 vmovss %xmm0, 0x20(%rdx) movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF leaq 0x24(%rsp), %rax movq %rax, 0x30(%rsp) movq 0x18(%r12), %rax movq %rax, 0x38(%rsp) movq %rcx, 0x40(%rsp) movq %rdx, 0x48(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0x50(%rsp) movl $0x1, 0x58(%rsp) movq 0x40(%r12), %rax testq %rax, %rax je 0x1ecb007 leaq 0x30(%rsp), %rdi vzeroupper callq *%rax vmovaps 0xe0(%rsp), %ymm7 movq 0x30(%rsp), %rax cmpl $0x0, (%rax) je 0x1ecb096 movq 0x10(%r13), %rax testq %rax, %rax je 0x1ecb03c testb $0x2, (%r13) jne 0x1ecb01f testb $0x40, 0x3e(%r12) je 0x1ecb032 leaq 0x30(%rsp), %rdi vzeroupper callq *%rax vmovaps 0xe0(%rsp), %ymm7 movq 0x30(%rsp), %rax cmpl $0x0, (%rax) je 0x1ecb096 movq 0x48(%rsp), %rax movq 0x50(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) movq 0x8(%rsp), %r13 jmp 0x1ecb0a7 movq 0x8(%rsp), %r13 vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%r13) movl $0x1, %eax shlxl %r14d, %eax, %eax movzbl %r15b, %ecx kmovd %eax, %k0 kmovd %ecx, %k1 kandnb %k1, %k0, %k1 vcmpleps 0x20(%r13){1to8}, %ymm7, %k0 {%k1} jmp 0x1ecaeb5 shll $0x2, %r14d vmovss 0x380(%rsp,%r14), %xmm0 vmovss 0x3a0(%rsp,%r14), %xmm1 vmovss 0x3c0(%rsp,%r14), %xmm2 vmovss %xmm2, 0x20(%r13) vmovss 0x3e0(%rsp,%r14), %xmm2 vmovss %xmm2, 0x30(%r13) vmovss 0x400(%rsp,%r14), %xmm2 vmovss %xmm2, 0x34(%r13) vmovss 0x420(%rsp,%r14), %xmm2 vmovss %xmm2, 0x38(%r13) vmovss %xmm0, 0x3c(%r13) vmovss %xmm1, 0x40(%r13) movl 0x100(%rsp,%r14), %eax movl %eax, 0x44(%r13) movl %ebx, 0x48(%r13) movq 0x18(%rsp), %rax movq 0x8(%rax), %rax movl (%rax), %ecx movl %ecx, 0x4c(%r13) movl 0x4(%rax), %eax movl %eax, 0x50(%r13) movq 0x70(%rsp), %rdi movq 0x68(%rsp), %r9 movq 0xa8(%rsp), %r8 incq %r8 cmpq 0x78(%rsp), %r8 jne 0x1eca8cb vbroadcastss 0x20(%r13), %ymm0 movq 0x60(%rsp), %rsi vmovaps 0x240(%rsp), %ymm8 vmovaps 0x220(%rsp), %ymm9 vmovaps 0x200(%rsp), %ymm10 movq 0x10(%rsp), %rdx vmovaps 0x1e0(%rsp), %ymm11 vmovaps 0x1c0(%rsp), %ymm12 vmovaps 0x1a0(%rsp), %ymm13 vmovaps 0x180(%rsp), %ymm14 vmovaps 0x160(%rsp), %ymm15 vmovaps 0x140(%rsp), %ymm16 vmovaps 0x120(%rsp), %ymm17 vpmovsxbd 0x92886(%rip), %ymm18 # 0x1f5da70 vpbroadcastd 0x963c8(%rip), %ymm19 # 0x1f615bc vmovaps 0x260(%rsp), %ymm21 jmp 0x1eca3a4 nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, true, embree::avx512::ArrayIntersector1<embree::avx512::QuadMiIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ecb213 xorl %eax, %eax jmp 0x1ecb954 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r12 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ecb254 vmovss 0x10(%r12), %xmm0 vmulss %xmm0, %xmm0, %xmm9 jmp 0x1ecb25f vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm9 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r12), %ymm6 vbroadcastss 0x4(%r12), %ymm5 vbroadcastss 0x8(%r12), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x927c1(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x96304(%rip), %ymm12 # 0x1f615bc vmovups %ymm6, 0x30(%rsp) vmovups %ymm5, 0x10(%rsp) vsubps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x140(%rsp) vaddps %ymm0, %ymm6, %ymm3 vmovups %ymm3, 0x120(%rsp) vsubps %ymm1, %ymm5, %ymm3 vmovups %ymm3, 0x100(%rsp) vaddps %ymm1, %ymm5, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x80(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm10 vmovaps %xmm9, 0x70(%rsp) vmovups %ymm10, 0x160(%rsp) cmpq %r9, %r8 je 0x1ecb93f vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm9, %xmm0 ja 0x1ecb32e movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1ecb43d testb $0x8, %bpl jne 0x1ecb3c8 vmovaps 0x40(%rbp), %ymm0 vmovaps 0x60(%rbp), %ymm1 vmaxps %ymm0, %ymm6, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm6, %ymm2, %ymm2 vmaxps 0x80(%rbp), %ymm5, %ymm3 vminps 0xa0(%rbp), %ymm3, %ymm3 vmovups 0x80(%rsp), %ymm7 vmaxps 0xc0(%rbp), %ymm7, %ymm4 vsubps %ymm5, %ymm3, %ymm3 vminps 0xe0(%rbp), %ymm4, %ymm4 vsubps %ymm7, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm10, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %edi testb $0x8, %bpl jne 0x1ecb53d testq %rdi, %rdi je 0x1ecb547 andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ecb551 testl %eax, %eax je 0x1ecb34b jmp 0x1ecb816 testb $0x8, %bpl jne 0x1ecb3c8 vmovaps 0xc0(%rbp), %ymm0 vmovaps 0x40(%rbp), %ymm1 vmovaps 0x60(%rbp), %ymm2 vmovaps 0x80(%rbp), %ymm3 vmovaps 0xa0(%rbp), %ymm4 vmovaps 0xe0(%rbp), %ymm5 vmovups 0x30(%rsp), %ymm6 vmaxps %ymm1, %ymm6, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps 0x30(%rsp), %ymm6, %ymm6 vmovups 0x10(%rsp), %ymm7 vmaxps %ymm3, %ymm7, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps 0x10(%rsp), %ymm7, %ymm7 vmovaps %ymm10, %ymm13 vmovups 0x80(%rsp), %ymm10 vmaxps %ymm0, %ymm10, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm10, %ymm8, %ymm8 vmovaps %ymm13, %ymm10 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vmovups 0x30(%rsp), %ymm6 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm5, %k3 vmovups 0x10(%rsp), %ymm5 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ecb3c8 movl $0x6, %eax jmp 0x1ecb430 movl $0x4, %eax jmp 0x1ecb430 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ecb5b5 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ecb430 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ecb64a vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 jmp 0x1ecb430 movq %rdi, %r10 vmovaps %ymm10, %ymm13 vmovdqa %ymm7, %ymm10 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ecb71c vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm10, %ymm7 vpermd %ymm10, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps %ymm13, %ymm10 movq %r10, %rdi jmp 0x1ecb430 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x55794(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x95eeb(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x95ee4(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x95ed7(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x46f95(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ecb762 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ecb7cc vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovdqa %ymm10, %ymm7 jmp 0x1ecb70f cmpl $0x6, %eax jne 0x1ecb32e movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x60(%rsp) je 0x1ecb32e movq %rdi, 0x50(%rsp) vmovups %ymm7, 0x180(%rsp) movq %r8, 0x58(%rsp) andq $-0x10, %rbp addq $0x50, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x68(%rsp) xorl %r13d, %r13d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ecb89e movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r12, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b incq %r14 cmpq $0x4, %r14 jne 0x1ecb85e movq 0x68(%rsp), %rax orb %r13b, %al incq %r15 addq $0x60, %rbp cmpq 0x60(%rsp), %r15 jne 0x1ecb853 testb $0x1, %al movq 0x58(%rsp), %r8 vmovups 0x30(%rsp), %ymm6 vmovups 0x10(%rsp), %ymm5 vmovaps 0x70(%rsp), %xmm9 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x92192(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x95cd5(%rip), %ymm12 # 0x1f615bc vmovups 0x180(%rsp), %ymm7 vmovups 0x160(%rsp), %ymm10 movq 0x50(%rsp), %rdi je 0x1ecb32e vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ecb929 vmovss 0x10(%r12), %xmm3 vmulss %xmm3, %xmm3, %xmm9 jmp 0x1ecb934 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm9 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ecb2c4 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::QuadMiMBIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1eccae1 xorl %eax, %eax jmp 0x1ecd2ca pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r13 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1eccb21 vmovss 0x10(%r13), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1eccb2c vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r13), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r13), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r13), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x90eea(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x94a2d(%rip), %ymm13 # 0x1f615bc vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ecd2b5 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1eccc05 movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1eccd8a testb $0x8, %bpl jne 0x1eccd15 movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%r13), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm8, %ymm3, %ymm3 vmovups 0x40(%rsp), %ymm8 vmaxps %ymm5, %ymm8, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm8, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps 0x180(%rsp), %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1eccef2 kmovb %k0, %edi testb $0x8, %bpl jne 0x1eccede testq %rdi, %rdi je 0x1eccee8 andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm12, %ymm2 vpternlogd $0xf8, %ymm13, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1eccf0b testl %eax, %eax je 0x1eccc22 jmp 0x1ecd1a2 testb $0x8, %bpl jne 0x1eccd15 movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%r13), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm9 vmaxps %ymm3, %ymm9, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmovups 0x40(%rsp), %ymm11 vmaxps %ymm5, %ymm11, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm11, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edx andb %cl, %dl movzbl %dl, %edi movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1eccd15 vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %dil, %al movzbl %al, %edi jmp 0x1eccd15 movl $0x6, %eax jmp 0x1eccd7d movl $0x4, %eax jmp 0x1eccd7d vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1eccd11 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1eccf6f vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1eccd7d vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1eccff8 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1eccd7d movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ecd0b4 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1eccd7d valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x53dfc(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x94553(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x9454c(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x9453f(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x455fd(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ecd0fa popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ecd164 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovdqa %ymm9, %ymm7 jmp 0x1ecd0ac cmpl $0x6, %eax jne 0x1eccc05 movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1eccc05 movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %rbp addq $0x50, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x28(%rsp) xorl %r12d, %r12d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ecd22a movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r13, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r12b incq %r14 cmpq $0x4, %r14 jne 0x1ecd1ea movq 0x28(%rsp), %rax orb %r12b, %al incq %r15 addq $0x60, %rbp cmpq 0x20(%rsp), %r15 jne 0x1ecd1df testb $0x1, %al movq 0x18(%rsp), %r8 vmovaps 0x30(%rsp), %xmm10 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x90812(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x94355(%rip), %ymm13 # 0x1f615bc vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1eccc05 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ecd29f vmovss 0x10(%r13), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ecd2aa vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1eccb8f movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, true, embree::avx512::ArrayIntersector1<embree::avx512::QuadMiMBIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ece673 xorl %eax, %eax jmp 0x1ecee5c pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r13 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ece6b3 vmovss 0x10(%r13), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1ece6be vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r13), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r13), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r13), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x8f358(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x92e9b(%rip), %ymm13 # 0x1f615bc vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ecee47 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1ece797 movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1ece91c testb $0x8, %bpl jne 0x1ece8a7 movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%r13), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm8, %ymm3, %ymm3 vmovups 0x40(%rsp), %ymm8 vmaxps %ymm5, %ymm8, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm8, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps 0x180(%rsp), %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ecea84 kmovb %k0, %edi testb $0x8, %bpl jne 0x1ecea70 testq %rdi, %rdi je 0x1ecea7a andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm12, %ymm2 vpternlogd $0xf8, %ymm13, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ecea9d testl %eax, %eax je 0x1ece7b4 jmp 0x1eced34 testb $0x8, %bpl jne 0x1ece8a7 movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%r13), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmovups 0x80(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x60(%rsp), %ymm9 vmaxps %ymm3, %ymm9, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmovups 0x40(%rsp), %ymm11 vmaxps %ymm5, %ymm11, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm11, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0x140(%rsp), %ymm2, %k0 vcmpnleps 0x120(%rsp), %ymm1, %k1 vcmpltps 0x100(%rsp), %ymm4, %k2 vcmpnleps 0xe0(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0xc0(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0xa0(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edx andb %cl, %dl movzbl %dl, %edi movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1ece8a7 vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %dil, %al movzbl %al, %edi jmp 0x1ece8a7 movl $0x6, %eax jmp 0x1ece90f movl $0x4, %eax jmp 0x1ece90f vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1ece8a3 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1eceb01 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ece90f vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1eceb8a vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1ece90f movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ecec46 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1ece90f valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x5226a(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x929c1(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x929ba(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x929ad(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x43a6b(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ecec8c popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ececf6 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovdqa %ymm9, %ymm7 jmp 0x1ecec3e cmpl $0x6, %eax jne 0x1ece797 movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1ece797 movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %rbp addq $0x50, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x28(%rsp) xorl %r12d, %r12d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ecedbc movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r13, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r12b incq %r14 cmpq $0x4, %r14 jne 0x1eced7c movq 0x28(%rsp), %rax orb %r12b, %al incq %r15 addq $0x60, %rbp cmpq 0x20(%rsp), %r15 jne 0x1eced71 testb $0x1, %al movq 0x18(%rsp), %r8 vmovaps 0x30(%rsp), %xmm10 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x8ec80(%rip), %ymm12 # 0x1f5da70 vpbroadcastd 0x927c3(%rip), %ymm13 # 0x1f615bc vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1ece797 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ecee31 vmovss 0x10(%r13), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ecee3c vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ece721 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1048576, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1eceeab pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x25f8, %rsp # imm = 0x25F8 movq 0x70(%rax), %rax movq %rax, 0x2b0(%rsp) movl $0x0, 0x2b8(%rsp) cmpq $0x8, %rax jne 0x1eceeaf addq $0x25f8, %rsp # imm = 0x25F8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rdx, %r8 vmovaps 0x10(%rsi), %xmm0 leaq 0x2c0(%rsp), %r10 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x51fed(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x22107(%rip), %xmm26 # 0x1ef0fe8 vcmpltps %xmm26, %xmm4, %k1 vmovaps %xmm26, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x1d816(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %r11d, %r11d vucomiss %xmm1, %xmm0 setb %r11b vbroadcastss %xmm0, %ymm27 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm28 vbroadcastss 0x51fb5(%rip), %ymm5 # 0x1f20edc vshufpd $0x1, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[1,0] vpermps %ymm0, %ymm5, %ymm29 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss 0x437c5(%rip), %ymm8 # 0x1f12704 vbroadcastss %xmm0, %ymm7 vpermps %ymm0, %ymm8, %ymm8 vpermps %ymm0, %ymm5, %ymm5 xorl %eax, %eax vucomiss %xmm1, %xmm4 setb %al xorl %ecx, %ecx vucomiss %xmm1, %xmm6 setb %cl vbroadcastss %xmm3, %ymm0 shll $0x3, %r11d movq %r11, %rdi xorq $0x8, %rdi leal 0x10(,%rax,8), %r9d movq %r9, %rbx xorq $0x8, %rbx leal 0x20(,%rcx,8), %r14d movq %r14, %r15 xorq $0x8, %r15 vbroadcastss 0x51f29(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm7, %ymm30 vxorps %ymm1, %ymm8, %ymm31 vxorps %ymm1, %ymm5, %ymm9 vbroadcastss %xmm2, %ymm10 vpmovsxbd 0x8eabb(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x925fe(%rip), %ymm12 # 0x1f615bc leaq 0x2b0(%rsp), %r12 movq %rdx, 0x28(%rsp) movq %rsi, 0x20(%rsp) movq %r11, 0x18(%rsp) vmovups %ymm27, 0x180(%rsp) vmovups %ymm28, 0x160(%rsp) vmovups %ymm29, 0x140(%rsp) vmovups %ymm30, 0x120(%rsp) vmovups %ymm31, 0x100(%rsp) movq %rdi, 0x60(%rsp) movq %r9, 0x58(%rsp) movq %rbx, 0x50(%rsp) movq %r14, 0x48(%rsp) movq %r15, 0x40(%rsp) vmovups %ymm9, 0x270(%rsp) vmovups %ymm10, 0x250(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %r12, %r10 je 0x1ecee9a vmovss -0x8(%r10), %xmm2 addq $-0x10, %r10 vucomiss %xmm1, %xmm2 ja 0x1ecf02d movq (%r10), %rdx testb $0x8, %dl jne 0x1ecf140 movq %rdx, %rcx andq $-0x10, %rcx leaq 0x40(%rcx), %rax testq %rcx, %rcx cmoveq %rcx, %rax vmovq (%rax), %xmm1 vmovq 0x8(%rax), %xmm2 vpcmpleub %xmm2, %xmm1, %k1 vbroadcastss 0x30(%rax), %ymm1 vbroadcastss 0x3c(%rax), %ymm2 vpmovzxbd (%rax,%r11), %ymm3 vcvtdq2ps %ymm3, %ymm3 vfmadd213ps %ymm1, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + ymm1 vpmovzxbd (%rax,%rdi), %ymm4 vcvtdq2ps %ymm4, %ymm4 vfmadd213ps %ymm1, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) + ymm1 vbroadcastss 0x34(%rax), %ymm1 vbroadcastss 0x40(%rax), %ymm2 vpmovzxbd (%rax,%r9), %ymm5 vcvtdq2ps %ymm5, %ymm5 vfmadd213ps %ymm1, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm1 vpmovzxbd (%rax,%rbx), %ymm6 vcvtdq2ps %ymm6, %ymm6 vfmadd213ps %ymm1, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm6) + ymm1 vbroadcastss 0x38(%rax), %ymm1 vbroadcastss 0x44(%rax), %ymm2 vpmovzxbd (%rax,%r14), %ymm7 vcvtdq2ps %ymm7, %ymm7 vfmadd213ps %ymm1, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm7) + ymm1 vpmovzxbd (%rax,%r15), %ymm8 vcvtdq2ps %ymm8, %ymm8 vfmadd213ps %ymm1, %ymm2, %ymm8 # ymm8 = (ymm2 * ymm8) + ymm1 vfmadd213ps %ymm30, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm3) + ymm30 vfmadd213ps %ymm31, %ymm28, %ymm5 # ymm5 = (ymm28 * ymm5) + ymm31 vpmaxsd %ymm5, %ymm3, %ymm1 vfmadd213ps %ymm9, %ymm29, %ymm7 # ymm7 = (ymm29 * ymm7) + ymm9 vpmaxsd %ymm10, %ymm7, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm13 vfmadd213ps %ymm30, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm4) + ymm30 vfmadd213ps %ymm31, %ymm28, %ymm6 # ymm6 = (ymm28 * ymm6) + ymm31 vpminsd %ymm6, %ymm4, %ymm1 vfmadd213ps %ymm9, %ymm29, %ymm8 # ymm8 = (ymm29 * ymm8) + ymm9 vpminsd %ymm0, %ymm8, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm13, %k0 {%k1} kmovb %k0, %r13d testb $0x8, %dl jne 0x1ecf1a0 testq %r13, %r13 je 0x1ecf1a7 andq $-0x10, %rdx vmovdqu (%rdx), %ymm1 vmovdqu 0x20(%rdx), %ymm2 vmovdqa %ymm11, %ymm3 vpternlogd $0xf8, %ymm12, %ymm13, %ymm3 kmovd %r13d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) xorl %eax, %eax blsrq %r13, %rcx jne 0x1ecf1ae testl %eax, %eax je 0x1ecf049 jmp 0x1ecf4a9 movl $0x6, %eax jmp 0x1ecf193 movl $0x4, %eax jmp 0x1ecf193 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ecf208 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rdx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r10) vpermd %ymm13, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r10) addq $0x10, %r10 jmp 0x1ecf193 vpshufd $0xaa, %ymm3, %ymm6 # ymm6 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm7 vpermt2q %ymm2, %ymm6, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm6, %ymm5, %ymm7 vpmaxsd %ymm6, %ymm5, %ymm6 vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 blsrq %rcx, %rcx jne 0x1ecf28a vpermi2q %ymm2, %ymm1, %ymm7 vmovq %xmm7, %rdx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, (%r10) vpermd %ymm13, %ymm6, %ymm3 vmovd %xmm3, 0x8(%r10) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r10) vpermd %ymm13, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r10) addq $0x20, %r10 jmp 0x1ecf193 vmovdqa %ymm13, %ymm14 vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm4, %ymm7, %ymm8 vpmaxsd %ymm4, %ymm7, %ymm7 vpminsd %ymm7, %ymm5, %ymm4 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm6, %ymm5 vpmaxsd %ymm7, %ymm6, %ymm7 blsrq %rcx, %rcx jne 0x1ecf380 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rdx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r10) vmovdqa %ymm14, %ymm13 vpermd %ymm14, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r10) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm5, %ymm3 vmovq %xmm3, 0x10(%r10) vpermd %ymm14, %ymm5, %ymm3 vmovd %xmm3, 0x18(%r10) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r10) vpermd %ymm14, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r10) addq $0x30, %r10 movq 0x28(%rsp), %r8 movq 0x20(%rsp), %rsi vbroadcastss 0x21c9a(%rip), %xmm26 # 0x1ef0fe8 movq 0x18(%rsp), %r11 vmovups 0x180(%rsp), %ymm27 vmovups 0x160(%rsp), %ymm28 vmovups 0x140(%rsp), %ymm29 vmovups 0x120(%rsp), %ymm30 vmovups 0x100(%rsp), %ymm31 jmp 0x1ecf193 valignd $0x3, %ymm3, %ymm3, %ymm6 # ymm6 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x51b30(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x92287(%rip), %ymm13 # 0x1f61620 vpermt2d %ymm8, %ymm13, %ymm3 vpmovsxbd 0x92280(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm5, %ymm8, %ymm3 vpmovsxbd 0x92273(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x43331(%rip), %ymm3 # 0x1f12704 vpermd %ymm6, %ymm3, %ymm3 valignd $0x1, %ymm6, %ymm6, %ymm6 # ymm6 = ymm6[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm6, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ecf3c6 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r10) vpermd %ymm14, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r10) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r10 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ecf429 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rdx movq 0x28(%rsp), %r8 movq 0x20(%rsp), %rsi vbroadcastss 0x21b76(%rip), %xmm26 # 0x1ef0fe8 movq 0x18(%rsp), %r11 vmovups 0x180(%rsp), %ymm27 vmovups 0x160(%rsp), %ymm28 vmovups 0x140(%rsp), %ymm29 vmovups 0x120(%rsp), %ymm30 vmovups 0x100(%rsp), %ymm31 vmovdqa %ymm14, %ymm13 jmp 0x1ecf193 cmpl $0x6, %eax jne 0x1ecf028 movq %r13, 0x68(%rsp) vmovdqu %ymm13, 0x290(%rsp) movl %edx, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x70(%rsp) je 0x1ecfdec andq $-0x10, %rdx movq (%r8), %r13 xorl %eax, %eax movq %rdx, 0x10(%rsp) movq %rax, 0x78(%rsp) leaq (%rax,%rax,4), %rax shlq $0x4, %rax movq 0x10(%rsp), %rcx movl 0x30(%rcx,%rax), %edx movq 0x228(%r13), %rcx movq (%rcx,%rdx,8), %rdx movq 0x10(%rsp), %rdi movl (%rdi,%rax), %r9d movq 0x10(%rsp), %rdi movl 0x4(%rdi,%rax), %edi vmovups (%rdx,%r9,4), %xmm0 movq 0x10(%rsp), %r9 movl 0x10(%r9,%rax), %r9d vmovups (%rdx,%r9,4), %xmm1 movq 0x10(%rsp), %r9 movl 0x20(%r9,%rax), %r9d vmovups (%rdx,%r9,4), %xmm2 movq 0x10(%rsp), %rdx movl 0x34(%rdx,%rax), %edx movq (%rcx,%rdx,8), %rdx vmovups (%rdx,%rdi,4), %xmm3 movq 0x10(%rsp), %rdi movl 0x14(%rdi,%rax), %edi vmovups (%rdx,%rdi,4), %xmm4 movq 0x10(%rsp), %rdi movl 0x24(%rdi,%rax), %edi vmovups (%rdx,%rdi,4), %xmm5 movq 0x10(%rsp), %rdx movl 0x38(%rdx,%rax), %edx movq (%rcx,%rdx,8), %rdx movq 0x10(%rsp), %rdi movl 0x8(%rdi,%rax), %edi vmovups (%rdx,%rdi,4), %xmm6 movq 0x10(%rsp), %rdi movl 0x18(%rdi,%rax), %edi vmovups (%rdx,%rdi,4), %xmm7 movq 0x10(%rsp), %rdi movl 0x28(%rdi,%rax), %edi vmovups (%rdx,%rdi,4), %xmm8 movq 0x10(%rsp), %rdx movl 0x3c(%rdx,%rax), %edx movq (%rcx,%rdx,8), %rcx movq 0x10(%rsp), %rdx movl 0xc(%rdx,%rax), %edx vmovups (%rcx,%rdx,4), %xmm9 movq 0x10(%rsp), %rdx movl 0x1c(%rdx,%rax), %edx vmovups (%rcx,%rdx,4), %xmm10 movq 0x10(%rsp), %rdx movl 0x2c(%rdx,%rax), %edx vmovups (%rcx,%rdx,4), %xmm11 movq 0x10(%rsp), %rdx vunpcklps %xmm6, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] vunpckhps %xmm6, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3] vunpcklps %xmm9, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm9[0],xmm3[1],xmm9[1] vunpckhps %xmm9, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm9[2],xmm3[3],xmm9[3] vunpcklps %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] vunpcklps %xmm6, %xmm12, %xmm3 # xmm3 = xmm12[0],xmm6[0],xmm12[1],xmm6[1] vunpckhps %xmm6, %xmm12, %xmm6 # xmm6 = xmm12[2],xmm6[2],xmm12[3],xmm6[3] vunpcklps %xmm7, %xmm1, %xmm9 # xmm9 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] vunpckhps %xmm7, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3] vunpcklps %xmm10, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm10[0],xmm4[1],xmm10[1] vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3] vunpcklps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] vunpcklps %xmm7, %xmm9, %xmm10 # xmm10 = xmm9[0],xmm7[0],xmm9[1],xmm7[1] vunpckhps %xmm7, %xmm9, %xmm12 # xmm12 = xmm9[2],xmm7[2],xmm9[3],xmm7[3] vunpcklps %xmm8, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm8[0],xmm2[1],xmm8[1] vunpckhps %xmm8, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] vunpcklps %xmm11, %xmm5, %xmm7 # xmm7 = xmm5[0],xmm11[0],xmm5[1],xmm11[1] vunpckhps %xmm11, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm11[2],xmm5[3],xmm11[3] vunpcklps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] vunpcklps %xmm7, %xmm4, %xmm11 # xmm11 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] vmovaps 0x30(%rdx,%rax), %xmm5 vmovaps %xmm5, 0x240(%rsp) vmovaps 0x40(%rdx,%rax), %xmm5 vmovaps %xmm5, 0xf0(%rsp) vunpckhps %xmm7, %xmm4, %xmm13 # xmm13 = xmm4[2],xmm7[2],xmm4[3],xmm7[3] movb $0x0, 0xf(%rsp) vbroadcastss (%rsi), %xmm14 vbroadcastss 0x4(%rsi), %xmm15 vbroadcastss 0x8(%rsi), %xmm16 vbroadcastss 0x10(%rsi), %xmm5 vbroadcastss 0x14(%rsi), %xmm7 leaq 0xf(%rsp), %rax movq %rax, 0x1d0(%rsp) vbroadcastss 0x18(%rsi), %xmm9 vsubps %xmm14, %xmm3, %xmm4 vsubps %xmm15, %xmm6, %xmm6 vsubps %xmm16, %xmm0, %xmm8 vsubps %xmm14, %xmm10, %xmm17 vsubps %xmm15, %xmm12, %xmm19 vsubps %xmm16, %xmm1, %xmm20 vsubps %xmm14, %xmm11, %xmm14 vsubps %xmm15, %xmm13, %xmm21 vsubps %xmm16, %xmm2, %xmm22 vsubps %xmm4, %xmm14, %xmm11 vsubps %xmm6, %xmm21, %xmm13 vsubps %xmm8, %xmm22, %xmm12 vsubps %xmm17, %xmm4, %xmm15 vsubps %xmm19, %xmm6, %xmm18 vsubps %xmm20, %xmm8, %xmm16 vsubps %xmm14, %xmm17, %xmm0 vsubps %xmm21, %xmm19, %xmm1 vsubps %xmm22, %xmm20, %xmm2 vaddps %xmm4, %xmm14, %xmm3 vaddps %xmm6, %xmm21, %xmm10 vaddps %xmm8, %xmm22, %xmm23 vmulps %xmm12, %xmm10, %xmm24 vfmsub231ps %xmm23, %xmm13, %xmm24 # xmm24 = (xmm13 * xmm23) - xmm24 vmulps %xmm11, %xmm23, %xmm23 vfmsub231ps %xmm3, %xmm12, %xmm23 # xmm23 = (xmm12 * xmm3) - xmm23 vmulps %xmm3, %xmm13, %xmm3 vfmsub231ps %xmm10, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm10) - xmm3 vmulps %xmm3, %xmm9, %xmm3 vfmadd231ps %xmm23, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm23) + xmm3 vfmadd231ps %xmm24, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm24) + xmm3 vaddps %xmm17, %xmm4, %xmm10 vaddps %xmm19, %xmm6, %xmm23 vaddps %xmm20, %xmm8, %xmm24 vmulps %xmm16, %xmm23, %xmm25 vfmsub231ps %xmm24, %xmm18, %xmm25 # xmm25 = (xmm18 * xmm24) - xmm25 vmulps %xmm15, %xmm24, %xmm24 vfmsub231ps %xmm10, %xmm16, %xmm24 # xmm24 = (xmm16 * xmm10) - xmm24 vmulps %xmm18, %xmm10, %xmm10 vfmsub231ps %xmm23, %xmm15, %xmm10 # xmm10 = (xmm15 * xmm23) - xmm10 vmulps %xmm10, %xmm9, %xmm10 vfmadd231ps %xmm24, %xmm7, %xmm10 # xmm10 = (xmm7 * xmm24) + xmm10 vfmadd231ps %xmm25, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm25) + xmm10 vbroadcastss 0x1c29b(%rip), %xmm25 # 0x1eeba20 vaddps %xmm14, %xmm17, %xmm14 vaddps %xmm21, %xmm19, %xmm17 vaddps %xmm22, %xmm20, %xmm19 vmulps %xmm2, %xmm17, %xmm20 vfmsub231ps %xmm19, %xmm1, %xmm20 # xmm20 = (xmm1 * xmm19) - xmm20 vmulps %xmm0, %xmm19, %xmm19 vfmsub231ps %xmm14, %xmm2, %xmm19 # xmm19 = (xmm2 * xmm14) - xmm19 vmulps %xmm1, %xmm14, %xmm14 vfmsub231ps %xmm17, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm17) - xmm14 vmulps %xmm14, %xmm9, %xmm21 vfmadd231ps %xmm19, %xmm7, %xmm21 # xmm21 = (xmm7 * xmm19) + xmm21 vfmadd231ps %xmm20, %xmm5, %xmm21 # xmm21 = (xmm5 * xmm20) + xmm21 vaddps %xmm3, %xmm10, %xmm14 vaddps %xmm14, %xmm21, %xmm14 vandps 0x516e5(%rip){1to4}, %xmm14, %xmm17 # 0x1f20ec4 vmulps 0x516e3(%rip){1to4}, %xmm17, %xmm19 # 0x1f20ecc vminps %xmm10, %xmm3, %xmm20 vminps %xmm21, %xmm20, %xmm20 vbroadcastss 0x516c1(%rip), %xmm22 # 0x1f20ec0 vxorps %xmm22, %xmm19, %xmm22 vcmpnltps %xmm22, %xmm20, %k0 vmaxps %xmm10, %xmm3, %xmm20 vmaxps %xmm21, %xmm20, %xmm20 vcmpleps %xmm19, %xmm20, %k1 korw %k1, %k0, %k0 kshiftlb $0x4, %k0, %k0 kshiftrb $0x4, %k0, %k0 kortestb %k0, %k0 movl $0x1, %edi je 0x1ecfdd9 vmulps %xmm18, %xmm12, %xmm19 vmulps %xmm16, %xmm11, %xmm20 vmulps %xmm15, %xmm13, %xmm21 vmulps %xmm1, %xmm16, %xmm22 vmulps %xmm2, %xmm15, %xmm23 vmulps %xmm0, %xmm18, %xmm24 vfmsub213ps %xmm19, %xmm16, %xmm13 # xmm13 = (xmm16 * xmm13) - xmm19 vfmsub213ps %xmm20, %xmm15, %xmm12 # xmm12 = (xmm15 * xmm12) - xmm20 vfmsub213ps %xmm21, %xmm18, %xmm11 # xmm11 = (xmm18 * xmm11) - xmm21 vfmsub213ps %xmm22, %xmm18, %xmm2 # xmm2 = (xmm18 * xmm2) - xmm22 vfmsub213ps %xmm23, %xmm16, %xmm0 # xmm0 = (xmm16 * xmm0) - xmm23 vfmsub213ps %xmm24, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm1) - xmm24 vbroadcastss 0x51634(%rip), %xmm18 # 0x1f20ec4 vandps %xmm18, %xmm19, %xmm15 vandps %xmm18, %xmm22, %xmm16 vcmpltps %xmm16, %xmm15, %k1 vandps %xmm18, %xmm20, %xmm15 vandps %xmm18, %xmm23, %xmm16 vcmpltps %xmm16, %xmm15, %k2 vandps %xmm18, %xmm21, %xmm15 vandps %xmm18, %xmm24, %xmm16 vcmpltps %xmm16, %xmm15, %k3 vmovaps %xmm13, %xmm2 {%k1} vmovaps %xmm12, %xmm0 {%k2} vmovaps %xmm11, %xmm1 {%k3} vmulps %xmm1, %xmm9, %xmm9 vfmadd213ps %xmm9, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm7) + xmm9 vfmadd213ps %xmm7, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm5) + xmm7 vaddps %xmm5, %xmm5, %xmm5 vmulps %xmm1, %xmm8, %xmm7 vfmadd213ps %xmm7, %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + xmm7 vfmadd213ps %xmm6, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm4) + xmm6 vaddps %xmm4, %xmm4, %xmm4 vrcp14ps %xmm5, %xmm6 vmovaps %xmm6, %xmm7 vfnmadd213ps 0x1ce01(%rip){1to4}, %xmm5, %xmm7 # xmm7 = -(xmm5 * xmm7) + mem vfmadd132ps %xmm6, %xmm6, %xmm7 # xmm7 = (xmm7 * xmm6) + xmm6 vmulps %xmm7, %xmm4, %xmm6 vcmpgeps 0xc(%rsi){1to4}, %xmm6, %k1 vbroadcastss 0x51593(%rip), %xmm4 # 0x1f20ec0 vxorps %xmm4, %xmm5, %xmm4 vcmpleps 0x20(%rsi){1to4}, %xmm6, %k1 {%k1} vcmpneqps %xmm4, %xmm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ecfdd9 vmovaps %xmm3, 0x1a0(%rsp) vmovaps %xmm10, 0x1b0(%rsp) vmovaps %xmm14, 0x1c0(%rsp) movq %rax, 0x1d0(%rsp) kmovb %k1, 0x1d8(%rsp) vmovaps %xmm6, 0x200(%rsp) vmovaps %xmm2, 0x210(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps %xmm1, 0x230(%rsp) vcmpnltps %xmm26, %xmm17, %k2 vrcp14ps %xmm14, %xmm0 vbroadcastss 0x1cd60(%rip), %xmm1 # 0x1eec714 vfnmadd213ps %xmm1, %xmm0, %xmm14 # xmm14 = -(xmm0 * xmm14) + xmm1 vfmadd132ps %xmm0, %xmm0, %xmm14 {%k2} {z} # xmm14 {%k2} {z} = (xmm14 * xmm0) + xmm0 vmulps %xmm3, %xmm14, %xmm0 vminps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, 0x1e0(%rsp) vmulps %xmm10, %xmm14, %xmm0 vminps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, 0x1f0(%rsp) kmovd %k1, %r15d vblendmps %xmm6, %xmm25, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax cmovel %r15d, %eax movzbl %al, %eax tzcntq %rax, %r12 movl 0x240(%rsp,%r12,4), %ebx movq 0x1e8(%r13), %rax movq (%rax,%rbx,8), %r14 movl 0x24(%rsi), %eax testl %eax, 0x34(%r14) je 0x1ecfa4f movq 0x10(%r8), %rbp cmpq $0x0, 0x10(%rbp) jne 0x1ecfaba cmpq $0x0, 0x40(%r14) jne 0x1ecfaba xorl %eax, %eax jmp 0x1ecfa6a shlxl %r12d, %edi, %eax kmovd %eax, %k0 movzbl %r15b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %r15d movb $0x1, %al testb %al, %al je 0x1ecfd62 testb %r15b, %r15b je 0x1ecfdd9 kmovd %r15d, %k1 vblendmps %xmm6, %xmm25, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %r15b, %al movzbl %al, %eax movzbl %r15b, %ecx cmovnel %eax, %ecx tzcntl %ecx, %r12d jmp 0x1ecfa1d vmovss 0x1e0(%rsp,%r12,4), %xmm0 vmovss 0x1f0(%rsp,%r12,4), %xmm1 movq 0x8(%r8), %rax movl 0xf0(%rsp,%r12,4), %ecx vmovss 0x210(%rsp,%r12,4), %xmm2 vmovss 0x220(%rsp,%r12,4), %xmm3 vmovss 0x230(%rsp,%r12,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movl %ebx, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%rsi), %xmm0 vmovss %xmm0, 0x30(%rsp) vmovss 0x200(%rsp,%r12,4), %xmm0 vmovss %xmm0, 0x20(%rsi) movl $0xffffffff, 0x34(%rsp) # imm = 0xFFFFFFFF leaq 0x34(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%r14), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %rsi, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq 0x40(%r14), %rax testq %rax, %rax vmovaps %xmm6, 0xe0(%rsp) je 0x1ecfc43 leaq 0x80(%rsp), %rdi movq %r10, 0x38(%rsp) vzeroupper callq *%rax vmovaps 0xe0(%rsp), %xmm6 movl $0x1, %edi vbroadcastss 0x1be34(%rip), %xmm25 # 0x1eeba20 vmovups 0x100(%rsp), %ymm31 vmovups 0x120(%rsp), %ymm30 vmovups 0x140(%rsp), %ymm29 vmovups 0x160(%rsp), %ymm28 vmovups 0x180(%rsp), %ymm27 movq 0x18(%rsp), %r11 vbroadcastss 0x213c5(%rip), %xmm26 # 0x1ef0fe8 movq 0x38(%rsp), %r10 movq 0x20(%rsp), %rsi movq 0x28(%rsp), %r8 movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ecfd31 movq 0x10(%rbp), %rax testq %rax, %rax je 0x1ecfcd6 testb $0x2, (%rbp) jne 0x1ecfc5d testb $0x40, 0x3e(%r14) je 0x1ecfcc9 leaq 0x80(%rsp), %rdi movq %r10, %r14 vzeroupper callq *%rax vmovaps 0xe0(%rsp), %xmm6 movl $0x1, %edi vbroadcastss 0x1bd9b(%rip), %xmm25 # 0x1eeba20 vmovups 0x100(%rsp), %ymm31 vmovups 0x120(%rsp), %ymm30 vmovups 0x140(%rsp), %ymm29 vmovups 0x160(%rsp), %ymm28 vmovups 0x180(%rsp), %ymm27 movq 0x18(%rsp), %r11 vbroadcastss 0x2132c(%rip), %xmm26 # 0x1ef0fe8 movq %r14, %r10 movq 0x20(%rsp), %rsi movq 0x28(%rsp), %r8 movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ecfd31 movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ecfd3c vmovss 0x30(%rsp), %xmm0 vmovss %xmm0, 0x20(%rsi) shlxl %r12d, %edi, %eax kmovd %eax, %k0 movzbl %r15b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 vcmpleps 0x20(%rsi){1to4}, %xmm6, %k1 kandb %k1, %k0, %k0 jmp 0x1ecfa64 vmovss 0x1e0(%rsp,%r12,4), %xmm0 vmovss 0x1f0(%rsp,%r12,4), %xmm1 vmovss 0x200(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x20(%rsi) vmovss 0x210(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x30(%rsi) vmovss 0x220(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x34(%rsi) vmovss 0x230(%rsp,%r12,4), %xmm2 vmovss %xmm2, 0x38(%rsi) vmovss %xmm0, 0x3c(%rsi) vmovss %xmm1, 0x40(%rsi) movl 0xf0(%rsp,%r12,4), %eax movl %eax, 0x44(%rsi) movl %ebx, 0x48(%rsi) movq 0x8(%r8), %rax movl (%rax), %ecx movl %ecx, 0x4c(%rsi) movl 0x4(%rax), %eax movl %eax, 0x50(%rsi) movq 0x78(%rsp), %rax incq %rax cmpq 0x70(%rsp), %rax jne 0x1ecf4e2 vbroadcastss 0x20(%rsi), %ymm0 movq 0x60(%rsp), %rdi movq 0x58(%rsp), %r9 movq 0x50(%rsp), %rbx movq 0x48(%rsp), %r14 movq 0x40(%rsp), %r15 vmovups 0x270(%rsp), %ymm9 vmovups 0x250(%rsp), %ymm10 vpmovsxbd 0x8dc4a(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x9178d(%rip), %ymm12 # 0x1f615bc leaq 0x2b0(%rsp), %r12 vmovdqu 0x290(%rsp), %ymm13 movq 0x68(%rsp), %r13 jmp 0x1ecf028
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1048576, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMiIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ecfe5b xorl %eax, %eax jmp 0x1ed063c pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r12 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ecfe9c vmovss 0x10(%r12), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1ecfea7 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r12), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r12), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r12), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x8db6d(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x916b0(%rip), %ymm12 # 0x1f615bc vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ed0627 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1ecff82 movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1ed0128 testb $0x8, %bpl jne 0x1ed00ba movq %rbp, %rcx andq $-0x10, %rcx leaq 0x40(%rcx), %rax testq %rcx, %rcx cmoveq %rcx, %rax vbroadcastss 0x30(%rax), %ymm0 vbroadcastss 0x3c(%rax), %ymm1 vmovq (%rax), %xmm2 vpmovzxbd %xmm2, %ymm3 # ymm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero vcvtdq2ps %ymm3, %ymm3 vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0 vmovq 0x8(%rax), %xmm4 vpmovzxbd %xmm4, %ymm5 # ymm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero vcvtdq2ps %ymm5, %ymm5 vbroadcastss 0x34(%rax), %ymm6 vfmadd213ps %ymm0, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm0 vbroadcastss 0x40(%rax), %ymm0 vpmovzxbd 0x10(%rax), %ymm1 vcvtdq2ps %ymm1, %ymm1 vfmadd213ps %ymm6, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + ymm6 vpmovzxbd 0x18(%rax), %ymm7 vcvtdq2ps %ymm7, %ymm7 vbroadcastss 0x38(%rax), %ymm8 vfmadd213ps %ymm6, %ymm0, %ymm7 # ymm7 = (ymm0 * ymm7) + ymm6 vbroadcastss 0x44(%rax), %ymm0 vpmovzxbd 0x20(%rax), %ymm6 vcvtdq2ps %ymm6, %ymm6 vfmadd213ps %ymm8, %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + ymm8 vpmovzxbd 0x28(%rax), %ymm9 vcvtdq2ps %ymm9, %ymm9 vfmadd213ps %ymm8, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm9) + ymm8 vmovups 0x80(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm0 vminps %ymm5, %ymm0, %ymm0 vsubps %ymm8, %ymm0, %ymm0 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm1 vminps %ymm7, %ymm1, %ymm1 vsubps %ymm8, %ymm1, %ymm1 vmovups 0x40(%rsp), %ymm7 vmaxps %ymm6, %ymm7, %ymm6 vminps %ymm9, %ymm6, %ymm6 vsubps %ymm7, %ymm6, %ymm6 vmulps %ymm0, %ymm0, %ymm0 vmulps %ymm1, %ymm1, %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmulps %ymm6, %ymm6, %ymm1 vaddps %ymm1, %ymm0, %ymm7 vcmpleps %ymm5, %ymm3, %k1 vpcmpleub %xmm4, %xmm2, %k0 vcmpleps 0x180(%rsp), %ymm7, %k1 {%k1} kandb %k0, %k1, %k0 kmovb %k0, %edi testb $0x8, %bpl jne 0x1ed0284 testq %rdi, %rdi je 0x1ed028e andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ed0298 testl %eax, %eax je 0x1ecff9f jmp 0x1ed0513 testb $0x8, %bpl jne 0x1ed00ba movq %rbp, %rcx andq $-0x10, %rcx leaq 0x40(%rcx), %rax testq %rcx, %rcx cmoveq %rcx, %rax vmovq (%rax), %xmm0 vmovq 0x8(%rax), %xmm1 vbroadcastss 0x30(%rax), %ymm2 vbroadcastss 0x3c(%rax), %ymm3 vpcmpleub %xmm1, %xmm0, %k1 vpmovzxbd %xmm0, %ymm0 # ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero vcvtdq2ps %ymm0, %ymm0 vfmadd213ps %ymm2, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm0) + ymm2 vpmovzxbd %xmm1, %ymm1 # ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero vcvtdq2ps %ymm1, %ymm1 vbroadcastss 0x34(%rax), %ymm4 vbroadcastss 0x40(%rax), %ymm5 vpmovzxbd 0x10(%rax), %ymm6 vfmadd213ps %ymm2, %ymm3, %ymm1 # ymm1 = (ymm3 * ymm1) + ymm2 vcvtdq2ps %ymm6, %ymm2 vfmadd213ps %ymm4, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm2) + ymm4 vpmovzxbd 0x18(%rax), %ymm3 vcvtdq2ps %ymm3, %ymm3 vbroadcastss 0x38(%rax), %ymm6 vbroadcastss 0x44(%rax), %ymm7 vpmovzxbd 0x20(%rax), %ymm8 vfmadd213ps %ymm4, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm4 vcvtdq2ps %ymm8, %ymm4 vfmadd213ps %ymm6, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm4) + ymm6 vpmovzxbd 0x28(%rax), %ymm5 vcvtdq2ps %ymm5, %ymm5 vfmadd213ps %ymm6, %ymm7, %ymm5 # ymm5 = (ymm7 * ymm5) + ymm6 vmovups 0x80(%rsp), %ymm7 vmaxps %ymm0, %ymm7, %ymm6 vminps %ymm1, %ymm6, %ymm6 vsubps %ymm7, %ymm6, %ymm6 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm2, %ymm8, %ymm7 vminps %ymm3, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x40(%rsp), %ymm9 vmaxps %ymm4, %ymm9, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vcmpltps 0x140(%rsp), %ymm1, %k0 vcmpnleps 0x120(%rsp), %ymm0, %k2 korb %k0, %k2, %k0 vcmpnleps 0xe0(%rsp), %ymm2, %k2 korb %k2, %k0, %k0 vcmpltps 0x100(%rsp), %ymm3, %k2 vcmpltps 0xc0(%rsp), %ymm5, %k3 vcmpnleps 0xa0(%rsp), %ymm4, %k4 korb %k4, %k2, %k2 korb %k2, %k0, %k0 korb %k3, %k0, %k0 knotb %k0, %k0 kmovd %k0, %eax vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ed00ba movl $0x6, %eax jmp 0x1ed011b movl $0x4, %eax jmp 0x1ed011b vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed02f5 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ed011b vpshufd $0xaa, %ymm2, %ymm5 # ymm5 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm6 vpermt2q %ymm1, %ymm5, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm5, %ymm4, %ymm6 vpmaxsd %ymm5, %ymm4, %ymm5 vpminsd %ymm5, %ymm3, %ymm4 vpmaxsd %ymm5, %ymm3, %ymm5 blsrq %rcx, %rcx jne 0x1ed0377 vpermi2q %ymm1, %ymm0, %ymm6 vmovq %xmm6, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm5, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1ed011b movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm3, %ymm6, %ymm7 vpmaxsd %ymm3, %ymm6, %ymm6 vpminsd %ymm6, %ymm4, %ymm3 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm6 blsrq %rcx, %rcx jne 0x1ed042c vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm4, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm4, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1ed011b valignd $0x3, %ymm2, %ymm2, %ymm5 # ymm5 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x50a84(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x911db(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x911d4(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm4, %ymm7, %ymm2 vpmovsxbd 0x911c7(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x42285(%rip), %ymm2 # 0x1f12704 vpermd %ymm5, %ymm2, %ymm2 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm5, %ymm4 vmovq %xmm4, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed0472 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed04d5 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovdqa %ymm9, %ymm7 jmp 0x1ed0424 cmpl $0x6, %eax jne 0x1ecff82 movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1ecff82 movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %rbp addq $0x40, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x28(%rsp) xorl %r13d, %r13d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ed059b movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r12, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b incq %r14 cmpq $0x4, %r14 jne 0x1ed055b movq 0x28(%rsp), %rax orb %r13b, %al incq %r15 addq $0x50, %rbp cmpq 0x20(%rsp), %r15 jne 0x1ed0550 testb $0x1, %al movq 0x18(%rsp), %r8 vmovaps 0x30(%rsp), %xmm10 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x8d4a1(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x90fe4(%rip), %ymm12 # 0x1f615bc vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1ecff82 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ed0611 vmovss 0x10(%r12), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ed061c vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ecff0c movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1048576, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ed068b pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x25e8, %rsp # imm = 0x25E8 movq 0x70(%rax), %rax movq %rax, 0x2a0(%rsp) movl $0x0, 0x2a8(%rsp) cmpq $0x8, %rax jne 0x1ed068f addq $0x25e8, %rsp # imm = 0x25E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rsi, %r14 vmovaps 0x10(%rsi), %xmm0 leaq 0x2b0(%rsp), %r8 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x5080d(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x20928(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x1c037(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %edi, %edi vucomiss %xmm1, %xmm0 setb %dil vbroadcastss %xmm0, %ymm19 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm20 vbroadcastss 0x507d7(%rip), %ymm5 # 0x1f20edc vshufpd $0x1, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[1,0] vpermps %ymm0, %ymm5, %ymm21 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss 0x41fe6(%rip), %ymm22 # 0x1f12704 vbroadcastss %xmm0, %ymm7 vpermps %ymm0, %ymm22, %ymm8 vpermps %ymm0, %ymm5, %ymm5 xorl %eax, %eax vucomiss %xmm1, %xmm4 setb %al xorl %ecx, %ecx vucomiss %xmm1, %xmm6 setb %cl vbroadcastss %xmm3, %ymm0 shll $0x3, %edi movq %rdi, %r9 xorq $0x8, %r9 leal 0x10(,%rax,8), %r10d movq %r10, %r11 xorq $0x8, %r11 leal 0x20(,%rcx,8), %esi movq %rsi, %r15 xorq $0x8, %r15 vbroadcastss 0x5074b(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm7, %ymm23 vxorps %ymm1, %ymm8, %ymm24 vxorps %ymm1, %ymm5, %ymm25 vbroadcastss %xmm2, %ymm26 vpmovsxbd 0x8d2d9(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0x90e1b(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x50715(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0x90e6b(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0x90e69(%rip), %ymm31 # 0x1f61628 leaq 0x2a0(%rsp), %r13 movq %r15, 0x78(%rsp) vmovss 0x20(%r14), %xmm1 cmpq %r13, %r8 je 0x1ed067a vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1ed07d2 movq (%r8), %rbx testb $0x8, %bl jne 0x1ed08e6 movq %rbx, %rcx andq $-0x10, %rcx leaq 0x40(%rcx), %rax testq %rcx, %rcx cmoveq %rcx, %rax vmovq (%rax), %xmm1 vmovq 0x8(%rax), %xmm2 vpcmpleub %xmm2, %xmm1, %k1 vbroadcastss 0x30(%rax), %ymm1 vbroadcastss 0x3c(%rax), %ymm2 vpmovzxbd (%rax,%rdi), %ymm3 vcvtdq2ps %ymm3, %ymm3 vfmadd213ps %ymm1, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + ymm1 vpmovzxbd (%rax,%r9), %ymm4 vcvtdq2ps %ymm4, %ymm4 vfmadd213ps %ymm1, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) + ymm1 vbroadcastss 0x34(%rax), %ymm1 vbroadcastss 0x40(%rax), %ymm2 vpmovzxbd (%rax,%r10), %ymm5 vcvtdq2ps %ymm5, %ymm5 vfmadd213ps %ymm1, %ymm2, %ymm5 # ymm5 = (ymm2 * ymm5) + ymm1 vpmovzxbd (%rax,%r11), %ymm6 vcvtdq2ps %ymm6, %ymm6 vfmadd213ps %ymm1, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm6) + ymm1 vbroadcastss 0x38(%rax), %ymm1 vbroadcastss 0x44(%rax), %ymm2 vpmovzxbd (%rax,%rsi), %ymm7 vcvtdq2ps %ymm7, %ymm7 vfmadd213ps %ymm1, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm7) + ymm1 vpmovzxbd (%rax,%r15), %ymm8 vcvtdq2ps %ymm8, %ymm8 vfmadd213ps %ymm1, %ymm2, %ymm8 # ymm8 = (ymm2 * ymm8) + ymm1 vfmadd213ps %ymm23, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm3) + ymm23 vfmadd213ps %ymm24, %ymm20, %ymm5 # ymm5 = (ymm20 * ymm5) + ymm24 vpmaxsd %ymm5, %ymm3, %ymm1 vfmadd213ps %ymm25, %ymm21, %ymm7 # ymm7 = (ymm21 * ymm7) + ymm25 vpmaxsd %ymm26, %ymm7, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm9 vfmadd213ps %ymm23, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm4) + ymm23 vfmadd213ps %ymm24, %ymm20, %ymm6 # ymm6 = (ymm20 * ymm6) + ymm24 vpminsd %ymm6, %ymm4, %ymm1 vfmadd213ps %ymm25, %ymm21, %ymm8 # ymm8 = (ymm21 * ymm8) + ymm25 vpminsd %ymm0, %ymm8, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm9, %k0 {%k1} kmovb %k0, %ebp testb $0x8, %bl jne 0x1ed0947 testq %rbp, %rbp je 0x1ed094e andq $-0x10, %rbx vmovdqu (%rbx), %ymm1 vmovdqu 0x20(%rbx), %ymm2 vmovdqa64 %ymm27, %ymm3 vpternlogd $0xf8, %ymm28, %ymm9, %ymm3 kmovd %ebp, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbx prefetcht0 (%rbx) prefetcht0 0x40(%rbx) prefetcht0 0x80(%rbx) xorl %eax, %eax blsrq %rbp, %rcx jne 0x1ed0955 testl %eax, %eax je 0x1ed07ee jmp 0x1ed0bd3 movl $0x6, %eax jmp 0x1ed093a movl $0x4, %eax jmp 0x1ed093a movq %rsi, %r12 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ed09b5 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm9, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 movq %r12, %rsi jmp 0x1ed093a vpshufd $0xaa, %ymm3, %ymm6 # ymm6 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm7 vpermt2q %ymm2, %ymm6, %ymm7 vmovq %xmm7, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) vpminsd %ymm6, %ymm5, %ymm7 vpmaxsd %ymm6, %ymm5, %ymm6 vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 blsrq %rcx, %rcx jne 0x1ed0a37 vpermi2q %ymm2, %ymm1, %ymm7 vmovq %xmm7, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm6, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm9, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1ed09b0 movq %r15, %r13 movq %r11, %r15 movq %r10, %r11 movq %r9, %r10 movq %rdi, %r9 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm4, %ymm7, %ymm8 vpmaxsd %ymm4, %ymm7, %ymm7 vpminsd %ymm7, %ymm5, %ymm4 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm6, %ymm5 vpmaxsd %ymm7, %ymm6, %ymm7 blsrq %rcx, %rcx jne 0x1ed0b0d vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm5, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm9, %ymm5, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm9, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rdx movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r15, %r11 movq %r12, %rsi movq %r13, %r15 leaq 0x2a0(%rsp), %r13 jmp 0x1ed093a valignd $0x3, %ymm3, %ymm3, %ymm6 # ymm6 = ymm3[3,4,5,6,7,0,1,2] vmovdqa64 %ymm29, %ymm3 vpermt2d %ymm8, %ymm30, %ymm3 vpermt2d %ymm4, %ymm31, %ymm3 vpermt2d %ymm5, %ymm31, %ymm3 vpmovsxbd 0x90afb(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpermps %ymm6, %ymm22, %ymm3 valignd $0x1, %ymm6, %ymm6, %ymm6 # ymm6 = ymm6[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm6, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ed0b3e popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm9, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ed0b99 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbx jmp 0x1ed0aeb cmpl $0x6, %eax jne 0x1ed07cc vmovdqu %ymm9, 0x280(%rsp) movl %ebx, %r15d andl $0xf, %r15d addq $-0x8, %r15 je 0x1ed12bc andq $-0x10, %rbx xorl %r13d, %r13d imulq $0xb0, %r13, %r12 vmovaps 0x80(%rbx,%r12), %xmm7 vmovaps 0x40(%rbx,%r12), %xmm8 vmovaps 0x70(%rbx,%r12), %xmm9 vmovaps 0x50(%rbx,%r12), %xmm10 vmulps %xmm7, %xmm8, %xmm0 vmovaps 0x60(%rbx,%r12), %xmm11 vmovaps (%rbx,%r12), %xmm3 vmovaps 0x10(%rbx,%r12), %xmm5 vmovaps 0x20(%rbx,%r12), %xmm6 vmovaps 0x30(%rbx,%r12), %xmm12 vfmsub231ps %xmm10, %xmm9, %xmm0 # xmm0 = (xmm9 * xmm10) - xmm0 vmulps %xmm11, %xmm10, %xmm1 vfmsub231ps %xmm12, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm12) - xmm1 vmulps %xmm9, %xmm12, %xmm2 vbroadcastss 0x10(%r14), %xmm13 vbroadcastss 0x14(%r14), %xmm14 vbroadcastss 0x18(%r14), %xmm15 vsubps (%r14){1to4}, %xmm3, %xmm4 vfmsub231ps %xmm8, %xmm11, %xmm2 # xmm2 = (xmm11 * xmm8) - xmm2 vsubps 0x4(%r14){1to4}, %xmm5, %xmm5 vsubps 0x8(%r14){1to4}, %xmm6, %xmm6 vmulps %xmm6, %xmm14, %xmm16 vfmsub231ps %xmm15, %xmm5, %xmm16 # xmm16 = (xmm5 * xmm15) - xmm16 vmulps %xmm4, %xmm15, %xmm17 vfmsub231ps %xmm13, %xmm6, %xmm17 # xmm17 = (xmm6 * xmm13) - xmm17 vmulps %xmm5, %xmm13, %xmm18 vfmsub231ps %xmm14, %xmm4, %xmm18 # xmm18 = (xmm4 * xmm14) - xmm18 vmulps %xmm2, %xmm15, %xmm15 vfmadd231ps %xmm14, %xmm1, %xmm15 # xmm15 = (xmm1 * xmm14) + xmm15 vfmadd231ps %xmm13, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm13) + xmm15 vandps 0x50200(%rip){1to4}, %xmm15, %xmm3 # 0x1f20ec4 vmulps %xmm18, %xmm7, %xmm7 vfmadd231ps %xmm9, %xmm17, %xmm7 # xmm7 = (xmm17 * xmm9) + xmm7 vfmadd231ps %xmm11, %xmm16, %xmm7 # xmm7 = (xmm16 * xmm11) + xmm7 vandpd 0x50288(%rip){1to2}, %xmm15, %xmm9 # 0x1f20f68 vxorps %xmm7, %xmm9, %xmm7 vmulps %xmm18, %xmm10, %xmm10 vfmadd231ps %xmm17, %xmm8, %xmm10 # xmm10 = (xmm8 * xmm17) + xmm10 vfmadd231ps %xmm16, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm16) + xmm10 vxorps %xmm10, %xmm9, %xmm8 vxorps %xmm10, %xmm10, %xmm10 vcmpnltps %xmm10, %xmm7, %k1 vcmpnltps %xmm10, %xmm8, %k1 {%k1} vcmpneqps %xmm10, %xmm15, %k1 {%k1} vaddps %xmm7, %xmm8, %xmm10 vcmpleps %xmm3, %xmm10, %k0 {%k1} kortestb %k0, %k0 jne 0x1ed0d37 incq %r13 cmpq %r15, %r13 jne 0x1ed0bfd jmp 0x1ed12bc vmulps %xmm6, %xmm2, %xmm6 vfmadd213ps %xmm6, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm6 vfmadd213ps %xmm5, %xmm0, %xmm4 # xmm4 = (xmm0 * xmm4) + xmm5 vxorps %xmm4, %xmm9, %xmm4 vmulps 0xc(%r14){1to4}, %xmm3, %xmm5 vmulps 0x20(%r14){1to4}, %xmm3, %xmm6 vcmpleps %xmm6, %xmm4, %k1 vcmpltps %xmm4, %xmm5, %k1 {%k1} kandb %k0, %k1, %k1 kortestb %k1, %k1 je 0x1ed0d26 vmovaps %xmm7, 0xe0(%rsp) vmovaps %xmm8, 0xf0(%rsp) vmovaps %xmm4, 0x100(%rsp) vmovaps %xmm3, 0x110(%rsp) kmovb %k1, 0x121(%rsp) vmovaps %xmm0, 0x160(%rsp) vmovaps %xmm1, 0x170(%rsp) addq %rbx, %r12 vmovaps %xmm2, 0x180(%rsp) vrcp14ps %xmm3, %xmm0 vfnmadd213ps 0x1b94a(%rip){1to4}, %xmm0, %xmm3 # xmm3 = -(xmm0 * xmm3) + mem vfmadd132ps %xmm0, %xmm0, %xmm3 # xmm3 = (xmm3 * xmm0) + xmm0 vmulps 0x100(%rsp), %xmm3, %xmm4 vmovaps %xmm4, 0x150(%rsp) vmulps 0xe0(%rsp), %xmm3, %xmm0 vmovaps %xmm0, 0x130(%rsp) vmulps 0xf0(%rsp), %xmm3, %xmm0 vmovaps %xmm0, 0x140(%rsp) movq %rdx, 0x10(%rsp) movq (%rdx), %rax movq %rax, 0x70(%rsp) kmovd %k1, %ecx vbroadcastss 0x1ac01(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm4, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kandb %k1, %k0, %k2 ktestb %k1, %k0 kmovd %k2, %eax movl %ecx, 0xc(%rsp) cmovel %ecx, %eax movzbl %al, %eax tzcntq %rax, %rdx movl 0x90(%r12,%rdx,4), %ecx movq 0x70(%rsp), %rax movq 0x1e8(%rax), %rax movq %rcx, 0x30(%rsp) movq (%rax,%rcx,8), %rcx movl 0x24(%r14), %eax testl %eax, 0x34(%rcx) movq %rdx, 0x18(%rsp) je 0x1ed0eb3 movq %rcx, %rax movq 0x10(%rsp), %rdx movq 0x10(%rdx), %rcx movq %rcx, 0x28(%rsp) cmpq $0x0, 0x10(%rcx) movl 0xc(%rsp), %ecx jne 0x1ed0f31 cmpq $0x0, 0x40(%rax) jne 0x1ed0f31 xorl %eax, %eax jmp 0x1ed0ed9 movl $0x1, %eax shlxl %edx, %eax, %eax kmovd %eax, %k0 movzbl 0xc(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al movq 0x10(%rsp), %rdx testb %al, %al je 0x1ed1232 testb %cl, %cl je 0x1ed0d26 kmovd %ecx, %k1 vbroadcastss 0x1ab2a(%rip), %xmm0 # 0x1eeba20 vblendmps %xmm4, %xmm0, %xmm0 {%k1} vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2] vminps %xmm0, %xmm1, %xmm1 vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0] vminps %xmm1, %xmm2, %xmm1 vcmpeqps %xmm1, %xmm0, %k0 kmovd %k0, %eax andb %cl, %al movzbl %al, %eax movl %ecx, 0xc(%rsp) movzbl %cl, %ecx cmovnel %eax, %ecx tzcntl %ecx, %edx jmp 0x1ed0e59 vmovaps %xmm4, 0x190(%rsp) vmovups %ymm26, 0x1a0(%rsp) vmovups %ymm25, 0x1c0(%rsp) vmovups %ymm24, 0x1e0(%rsp) vmovups %ymm23, 0x200(%rsp) movq %rsi, 0x40(%rsp) movq %r11, 0x48(%rsp) movq %r10, 0x50(%rsp) movq %r9, 0x58(%rsp) vmovups %ymm21, 0x220(%rsp) vmovups %ymm20, 0x240(%rsp) vmovups %ymm19, 0x260(%rsp) movq %rdi, 0x60(%rsp) movq %r8, 0x68(%rsp) movq 0x18(%rsp), %rsi vmovss 0x130(%rsp,%rsi,4), %xmm0 vmovss 0x140(%rsp,%rsi,4), %xmm1 movq %rax, %rdi movq 0x8(%rdx), %rax movl 0xa0(%r12,%rsi,4), %ecx vmovss 0x160(%rsp,%rsi,4), %xmm2 vmovss 0x170(%rsp,%rsi,4), %xmm3 vmovss 0x180(%rsp,%rsi,4), %xmm4 vmovss %xmm2, 0xb0(%rsp) vmovss %xmm3, 0xb4(%rsp) vmovss %xmm4, 0xb8(%rsp) vmovss %xmm0, 0xbc(%rsp) vmovss %xmm1, 0xc0(%rsp) movl %ecx, 0xc4(%rsp) movq 0x30(%rsp), %rcx movl %ecx, 0xc8(%rsp) movl (%rax), %ecx movl %ecx, 0xcc(%rsp) movl 0x4(%rax), %ecx movl %ecx, 0xd0(%rsp) vmovss 0x20(%r14), %xmm0 vmovss %xmm0, 0x20(%rsp) vmovss 0x150(%rsp,%rsi,4), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF leaq 0x24(%rsp), %rcx movq %rcx, 0x80(%rsp) movq 0x18(%rdi), %rcx movq %rcx, 0x88(%rsp) movq %rax, 0x90(%rsp) movq %r14, 0x98(%rsp) leaq 0xb0(%rsp), %rax movq %rax, 0xa0(%rsp) movl $0x1, 0xa8(%rsp) movq %rdi, 0x38(%rsp) movq 0x40(%rdi), %rax testq %rax, %rax je 0x1ed10b7 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ed114f movq 0x28(%rsp), %rax movq 0x10(%rax), %rax testq %rax, %rax je 0x1ed10f4 movq 0x28(%rsp), %rcx testb $0x2, (%rcx) jne 0x1ed10da movq 0x38(%rsp), %rcx testb $0x40, 0x3e(%rcx) je 0x1ed10e7 leaq 0x80(%rsp), %rdi vzeroupper callq *%rax movq 0x80(%rsp), %rax cmpl $0x0, (%rax) je 0x1ed114f movq 0x98(%rsp), %rax movq 0xa0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ed115b vmovss 0x20(%rsp), %xmm0 vmovss %xmm0, 0x20(%r14) movl $0x1, %eax movq 0x18(%rsp), %rcx shlxl %ecx, %eax, %eax kmovd %eax, %k0 movzbl 0xc(%rsp), %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 vmovaps 0x190(%rsp), %xmm4 vcmpleps 0x20(%r14){1to4}, %xmm4, %k1 kandb %k1, %k0, %k0 kmovd %k0, %ecx movb $0x1, %al movq 0x10(%rsp), %rdx movq 0x68(%rsp), %r8 movq 0x60(%rsp), %rdi vmovups 0x260(%rsp), %ymm19 vmovups 0x240(%rsp), %ymm20 vmovups 0x220(%rsp), %ymm21 vbroadcastss 0x4153d(%rip), %ymm22 # 0x1f12704 movq 0x58(%rsp), %r9 movq 0x50(%rsp), %r10 movq 0x48(%rsp), %r11 movq 0x40(%rsp), %rsi vmovups 0x200(%rsp), %ymm23 vmovups 0x1e0(%rsp), %ymm24 vmovups 0x1c0(%rsp), %ymm25 vmovups 0x1a0(%rsp), %ymm26 vpmovsxbd 0x8c86b(%rip), %ymm27 # 0x1f5da70 vpbroadcastd 0x903ad(%rip), %ymm28 # 0x1f615bc vpbroadcastd 0x4fca7(%rip), %xmm29 # 0x1f20ec0 vpmovsxbd 0x903fd(%rip), %ymm30 # 0x1f61620 vpmovsxbd 0x903fb(%rip), %ymm31 # 0x1f61628 jmp 0x1ed0ed9 movq 0x18(%rsp), %rax vmovss 0x130(%rsp,%rax,4), %xmm0 vmovss 0x140(%rsp,%rax,4), %xmm1 vmovss 0x150(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x20(%r14) vmovss 0x160(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x30(%r14) vmovss 0x170(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x34(%r14) vmovss 0x180(%rsp,%rax,4), %xmm2 vmovss %xmm2, 0x38(%r14) vmovss %xmm0, 0x3c(%r14) vmovss %xmm1, 0x40(%r14) movl 0xa0(%r12,%rax,4), %eax movl %eax, 0x44(%r14) movq 0x30(%rsp), %rax movl %eax, 0x48(%r14) movq 0x8(%rdx), %rax movl (%rax), %ecx movl %ecx, 0x4c(%r14) movl 0x4(%rax), %eax movl %eax, 0x50(%r14) jmp 0x1ed0d26 vbroadcastss 0x20(%r14), %ymm0 movq 0x78(%rsp), %r15 leaq 0x2a0(%rsp), %r13 vmovdqu 0x280(%rsp), %ymm9 jmp 0x1ed07cc nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1048576, false, embree::avx512::ArrayIntersector1<embree::avx512::TriangleMIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ed12ef xorl %eax, %eax jmp 0x1ed1adb pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r13 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ed132f vmovss 0x10(%r13), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1ed133a vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 vbroadcastss (%r13), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r13), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r13), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 leaq 0x1b0(%rsp), %r8 movl $0x0, 0xc(%rsp) vpmovsxbd 0x8c6e4(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x90227(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ed1ac6 vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1ed1413 movq (%r8), %r12 cmpl $0x1, 0x18(%rbx) jne 0x1ed15c1 testb $0x8, %r12b jne 0x1ed154b movq %r12, %rcx andq $-0x10, %rcx leaq 0x40(%rcx), %rax testq %rcx, %rcx cmoveq %rcx, %rax vbroadcastss 0x30(%rax), %ymm0 vbroadcastss 0x3c(%rax), %ymm1 vmovq (%rax), %xmm2 vpmovzxbd %xmm2, %ymm3 # ymm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero vcvtdq2ps %ymm3, %ymm3 vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0 vmovq 0x8(%rax), %xmm4 vpmovzxbd %xmm4, %ymm5 # ymm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero vcvtdq2ps %ymm5, %ymm5 vbroadcastss 0x34(%rax), %ymm6 vfmadd213ps %ymm0, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm0 vbroadcastss 0x40(%rax), %ymm0 vpmovzxbd 0x10(%rax), %ymm1 vcvtdq2ps %ymm1, %ymm1 vfmadd213ps %ymm6, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + ymm6 vpmovzxbd 0x18(%rax), %ymm7 vcvtdq2ps %ymm7, %ymm7 vbroadcastss 0x38(%rax), %ymm8 vfmadd213ps %ymm6, %ymm0, %ymm7 # ymm7 = (ymm0 * ymm7) + ymm6 vbroadcastss 0x44(%rax), %ymm0 vpmovzxbd 0x20(%rax), %ymm6 vcvtdq2ps %ymm6, %ymm6 vfmadd213ps %ymm8, %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + ymm8 vpmovzxbd 0x28(%rax), %ymm9 vcvtdq2ps %ymm9, %ymm9 vfmadd213ps %ymm8, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm9) + ymm8 vmovups 0x80(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm0 vminps %ymm5, %ymm0, %ymm0 vsubps %ymm8, %ymm0, %ymm0 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm1 vminps %ymm7, %ymm1, %ymm1 vsubps %ymm8, %ymm1, %ymm1 vmovups 0x40(%rsp), %ymm7 vmaxps %ymm6, %ymm7, %ymm6 vminps %ymm9, %ymm6, %ymm6 vsubps %ymm7, %ymm6, %ymm6 vmulps %ymm0, %ymm0, %ymm0 vmulps %ymm1, %ymm1, %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmulps %ymm6, %ymm6, %ymm1 vaddps %ymm1, %ymm0, %ymm7 vcmpleps %ymm5, %ymm3, %k1 vpcmpleub %xmm4, %xmm2, %k0 vcmpleps 0x180(%rsp), %ymm7, %k1 {%k1} kandb %k0, %k1, %k0 kmovb %k0, %edi testb $0x8, %r12b jne 0x1ed171d testq %rdi, %rdi je 0x1ed1727 andq $-0x10, %r12 vmovdqu (%r12), %ymm0 vmovdqu 0x20(%r12), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ed1731 testl %eax, %eax je 0x1ed1430 jmp 0x1ed19ac testb $0x8, %r12b jne 0x1ed154b movq %r12, %rcx andq $-0x10, %rcx leaq 0x40(%rcx), %rax testq %rcx, %rcx cmoveq %rcx, %rax vmovq (%rax), %xmm0 vmovq 0x8(%rax), %xmm1 vbroadcastss 0x30(%rax), %ymm2 vbroadcastss 0x3c(%rax), %ymm3 vpcmpleub %xmm1, %xmm0, %k1 vpmovzxbd %xmm0, %ymm0 # ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero vcvtdq2ps %ymm0, %ymm0 vfmadd213ps %ymm2, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm0) + ymm2 vpmovzxbd %xmm1, %ymm1 # ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero vcvtdq2ps %ymm1, %ymm1 vbroadcastss 0x34(%rax), %ymm4 vbroadcastss 0x40(%rax), %ymm5 vpmovzxbd 0x10(%rax), %ymm6 vfmadd213ps %ymm2, %ymm3, %ymm1 # ymm1 = (ymm3 * ymm1) + ymm2 vcvtdq2ps %ymm6, %ymm2 vfmadd213ps %ymm4, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm2) + ymm4 vpmovzxbd 0x18(%rax), %ymm3 vcvtdq2ps %ymm3, %ymm3 vbroadcastss 0x38(%rax), %ymm6 vbroadcastss 0x44(%rax), %ymm7 vpmovzxbd 0x20(%rax), %ymm8 vfmadd213ps %ymm4, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm4 vcvtdq2ps %ymm8, %ymm4 vfmadd213ps %ymm6, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm4) + ymm6 vpmovzxbd 0x28(%rax), %ymm5 vcvtdq2ps %ymm5, %ymm5 vfmadd213ps %ymm6, %ymm7, %ymm5 # ymm5 = (ymm7 * ymm5) + ymm6 vmovups 0x80(%rsp), %ymm7 vmaxps %ymm0, %ymm7, %ymm6 vminps %ymm1, %ymm6, %ymm6 vsubps %ymm7, %ymm6, %ymm6 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm2, %ymm8, %ymm7 vminps %ymm3, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x40(%rsp), %ymm9 vmaxps %ymm4, %ymm9, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vcmpltps 0x140(%rsp), %ymm1, %k0 vcmpnleps 0x120(%rsp), %ymm0, %k2 korb %k0, %k2, %k0 vcmpnleps 0xe0(%rsp), %ymm2, %k2 korb %k2, %k0, %k0 vcmpltps 0x100(%rsp), %ymm3, %k2 vcmpltps 0xc0(%rsp), %ymm5, %k3 vcmpnleps 0xa0(%rsp), %ymm4, %k4 korb %k4, %k2, %k2 korb %k2, %k0, %k0 korb %k3, %k0, %k0 knotb %k0, %k0 kmovd %k0, %eax vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ed154b movl $0x6, %eax jmp 0x1ed15b4 movl $0x4, %eax jmp 0x1ed15b4 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed178e vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r12 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ed15b4 vpshufd $0xaa, %ymm2, %ymm5 # ymm5 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm6 vpermt2q %ymm1, %ymm5, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm5, %ymm4, %ymm6 vpmaxsd %ymm5, %ymm4, %ymm5 vpminsd %ymm5, %ymm3, %ymm4 vpmaxsd %ymm5, %ymm3, %ymm5 blsrq %rcx, %rcx jne 0x1ed1810 vpermi2q %ymm1, %ymm0, %ymm6 vmovq %xmm6, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm5, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1ed15b4 movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm3, %ymm6, %ymm7 vpmaxsd %ymm3, %ymm6, %ymm6 vpminsd %ymm6, %ymm4, %ymm3 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm6 blsrq %rcx, %rcx jne 0x1ed18c5 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r12 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm4, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm4, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1ed15b4 valignd $0x3, %ymm2, %ymm2, %ymm5 # ymm5 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x4f5eb(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x8fd42(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x8fd3b(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm4, %ymm7, %ymm2 vpmovsxbd 0x8fd2e(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x40dec(%rip), %ymm2 # 0x1f12704 vpermd %ymm5, %ymm2, %ymm2 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm5, %ymm4 vmovq %xmm4, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed190b popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed196e vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r12 vmovdqa %ymm9, %ymm7 jmp 0x1ed18bd cmpl $0x6, %eax jne 0x1ed1413 movl %r12d, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1ed1413 movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %r12 addq $0xa0, %r12 xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x28(%rsp) movq $-0x4, %r14 xorl %ebp, %ebp movl (%r12,%r14,4), %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmpq %rcx, %rax je 0x1ed1a38 movq (%rbx), %rcx movq 0x1e8(%rcx), %rcx movq (%rcx,%rax,8), %rdi movl %eax, 0x44(%rbx) movl 0x10(%r12,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r13, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %bpl incq %r14 jne 0x1ed19fb movq 0x28(%rsp), %rax orb %bpl, %al incq %r15 addq $0xb0, %r12 cmpq 0x20(%rsp), %r15 jne 0x1ed19ed testb $0x1, %al vmovaps 0x30(%rsp), %xmm10 movq 0x18(%rsp), %r8 vpmovsxbd 0x8c009(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x8fb4c(%rip), %ymm12 # 0x1f615bc leaq 0x1a0(%rsp), %r9 vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1ed1413 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ed1ab0 vmovss 0x10(%r13), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ed1abb vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ed139d movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1048576, false, embree::avx512::ArrayIntersector1<embree::avx512::QuadMiIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ed2a73 xorl %eax, %eax jmp 0x1ed3254 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24e8, %rsp # imm = 0x24E8 movq %rdx, %rbx movq %rsi, %r12 movq 0x70(%rax), %rax movq %rax, 0x1a0(%rsp) movl $0x0, 0x1a8(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ed2ab4 vmovss 0x10(%r12), %xmm0 vmulss %xmm0, %xmm0, %xmm10 jmp 0x1ed2abf vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm10 leaq 0x1b0(%rsp), %r8 vbroadcastss (%r12), %ymm0 vmovups %ymm0, 0x80(%rsp) vbroadcastss 0x4(%r12), %ymm0 vmovups %ymm0, 0x60(%rsp) vbroadcastss 0x8(%r12), %ymm0 vmovups %ymm0, 0x40(%rsp) vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x8af55(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x8ea98(%rip), %ymm12 # 0x1f615bc vmovups 0x80(%rsp), %ymm3 vsubps %ymm0, %ymm3, %ymm4 vmovups %ymm4, 0x140(%rsp) vaddps %ymm0, %ymm3, %ymm3 vmovups %ymm3, 0x120(%rsp) vmovups 0x60(%rsp), %ymm3 vsubps %ymm1, %ymm3, %ymm4 vmovups %ymm4, 0x100(%rsp) vaddps %ymm1, %ymm3, %ymm1 vmovups %ymm1, 0xe0(%rsp) vmovups 0x40(%rsp), %ymm1 vsubps %ymm2, %ymm1, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm2, %ymm1, %ymm1 vmovups %ymm1, 0xa0(%rsp) vmulps %ymm0, %ymm0, %ymm0 vmovups %ymm0, 0x180(%rsp) vmovaps %xmm10, 0x30(%rsp) cmpq %r9, %r8 je 0x1ed323f vmovss -0x8(%r8), %xmm0 addq $-0x10, %r8 vucomiss %xmm10, %xmm0 ja 0x1ed2b9a movq (%r8), %rbp cmpl $0x1, 0x18(%rbx) jne 0x1ed2d40 testb $0x8, %bpl jne 0x1ed2cd2 movq %rbp, %rcx andq $-0x10, %rcx leaq 0x40(%rcx), %rax testq %rcx, %rcx cmoveq %rcx, %rax vbroadcastss 0x30(%rax), %ymm0 vbroadcastss 0x3c(%rax), %ymm1 vmovq (%rax), %xmm2 vpmovzxbd %xmm2, %ymm3 # ymm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero vcvtdq2ps %ymm3, %ymm3 vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0 vmovq 0x8(%rax), %xmm4 vpmovzxbd %xmm4, %ymm5 # ymm5 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero vcvtdq2ps %ymm5, %ymm5 vbroadcastss 0x34(%rax), %ymm6 vfmadd213ps %ymm0, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm0 vbroadcastss 0x40(%rax), %ymm0 vpmovzxbd 0x10(%rax), %ymm1 vcvtdq2ps %ymm1, %ymm1 vfmadd213ps %ymm6, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + ymm6 vpmovzxbd 0x18(%rax), %ymm7 vcvtdq2ps %ymm7, %ymm7 vbroadcastss 0x38(%rax), %ymm8 vfmadd213ps %ymm6, %ymm0, %ymm7 # ymm7 = (ymm0 * ymm7) + ymm6 vbroadcastss 0x44(%rax), %ymm0 vpmovzxbd 0x20(%rax), %ymm6 vcvtdq2ps %ymm6, %ymm6 vfmadd213ps %ymm8, %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + ymm8 vpmovzxbd 0x28(%rax), %ymm9 vcvtdq2ps %ymm9, %ymm9 vfmadd213ps %ymm8, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm9) + ymm8 vmovups 0x80(%rsp), %ymm8 vmaxps %ymm3, %ymm8, %ymm0 vminps %ymm5, %ymm0, %ymm0 vsubps %ymm8, %ymm0, %ymm0 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm1, %ymm8, %ymm1 vminps %ymm7, %ymm1, %ymm1 vsubps %ymm8, %ymm1, %ymm1 vmovups 0x40(%rsp), %ymm7 vmaxps %ymm6, %ymm7, %ymm6 vminps %ymm9, %ymm6, %ymm6 vsubps %ymm7, %ymm6, %ymm6 vmulps %ymm0, %ymm0, %ymm0 vmulps %ymm1, %ymm1, %ymm1 vaddps %ymm1, %ymm0, %ymm0 vmulps %ymm6, %ymm6, %ymm1 vaddps %ymm1, %ymm0, %ymm7 vcmpleps %ymm5, %ymm3, %k1 vpcmpleub %xmm4, %xmm2, %k0 vcmpleps 0x180(%rsp), %ymm7, %k1 {%k1} kandb %k0, %k1, %k0 kmovb %k0, %edi testb $0x8, %bpl jne 0x1ed2e9c testq %rdi, %rdi je 0x1ed2ea6 andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm11, %ymm2 vpternlogd $0xf8, %ymm12, %ymm7, %ymm2 kmovd %edi, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) xorl %eax, %eax blsrq %rdi, %rcx jne 0x1ed2eb0 testl %eax, %eax je 0x1ed2bb7 jmp 0x1ed312b testb $0x8, %bpl jne 0x1ed2cd2 movq %rbp, %rcx andq $-0x10, %rcx leaq 0x40(%rcx), %rax testq %rcx, %rcx cmoveq %rcx, %rax vmovq (%rax), %xmm0 vmovq 0x8(%rax), %xmm1 vbroadcastss 0x30(%rax), %ymm2 vbroadcastss 0x3c(%rax), %ymm3 vpcmpleub %xmm1, %xmm0, %k1 vpmovzxbd %xmm0, %ymm0 # ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero vcvtdq2ps %ymm0, %ymm0 vfmadd213ps %ymm2, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm0) + ymm2 vpmovzxbd %xmm1, %ymm1 # ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero vcvtdq2ps %ymm1, %ymm1 vbroadcastss 0x34(%rax), %ymm4 vbroadcastss 0x40(%rax), %ymm5 vpmovzxbd 0x10(%rax), %ymm6 vfmadd213ps %ymm2, %ymm3, %ymm1 # ymm1 = (ymm3 * ymm1) + ymm2 vcvtdq2ps %ymm6, %ymm2 vfmadd213ps %ymm4, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm2) + ymm4 vpmovzxbd 0x18(%rax), %ymm3 vcvtdq2ps %ymm3, %ymm3 vbroadcastss 0x38(%rax), %ymm6 vbroadcastss 0x44(%rax), %ymm7 vpmovzxbd 0x20(%rax), %ymm8 vfmadd213ps %ymm4, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm4 vcvtdq2ps %ymm8, %ymm4 vfmadd213ps %ymm6, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm4) + ymm6 vpmovzxbd 0x28(%rax), %ymm5 vcvtdq2ps %ymm5, %ymm5 vfmadd213ps %ymm6, %ymm7, %ymm5 # ymm5 = (ymm7 * ymm5) + ymm6 vmovups 0x80(%rsp), %ymm7 vmaxps %ymm0, %ymm7, %ymm6 vminps %ymm1, %ymm6, %ymm6 vsubps %ymm7, %ymm6, %ymm6 vmovups 0x60(%rsp), %ymm8 vmaxps %ymm2, %ymm8, %ymm7 vminps %ymm3, %ymm7, %ymm7 vsubps %ymm8, %ymm7, %ymm7 vmovups 0x40(%rsp), %ymm9 vmaxps %ymm4, %ymm9, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm9, %ymm8, %ymm8 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vcmpltps 0x140(%rsp), %ymm1, %k0 vcmpnleps 0x120(%rsp), %ymm0, %k2 korb %k0, %k2, %k0 vcmpnleps 0xe0(%rsp), %ymm2, %k2 korb %k2, %k0, %k0 vcmpltps 0x100(%rsp), %ymm3, %k2 vcmpltps 0xc0(%rsp), %ymm5, %k3 vcmpnleps 0xa0(%rsp), %ymm4, %k4 korb %k4, %k2, %k2 korb %k2, %k0, %k0 korb %k3, %k0, %k0 knotb %k0, %k0 kmovd %k0, %eax vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovd %k0, %ecx andb %al, %cl movzbl %cl, %edi jmp 0x1ed2cd2 movl $0x6, %eax jmp 0x1ed2d33 movl $0x4, %eax jmp 0x1ed2d33 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed2f0d vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r8) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r8) addq $0x10, %r8 jmp 0x1ed2d33 vpshufd $0xaa, %ymm2, %ymm5 # ymm5 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm6 vpermt2q %ymm1, %ymm5, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm5, %ymm4, %ymm6 vpmaxsd %ymm5, %ymm4, %ymm5 vpminsd %ymm5, %ymm3, %ymm4 vpmaxsd %ymm5, %ymm3, %ymm5 blsrq %rcx, %rcx jne 0x1ed2f8f vpermi2q %ymm1, %ymm0, %ymm6 vmovq %xmm6, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, (%r8) vpermd %ymm7, %ymm5, %ymm2 vmovd %xmm2, 0x8(%r8) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r8) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r8) addq $0x20, %r8 jmp 0x1ed2d33 movq %rdi, %r10 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) vpminsd %ymm3, %ymm6, %ymm7 vpmaxsd %ymm3, %ymm6, %ymm6 vpminsd %ymm6, %ymm4, %ymm3 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm6 blsrq %rcx, %rcx jne 0x1ed3044 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r8) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r8) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm4, %ymm2 vmovq %xmm2, 0x10(%r8) vpermd %ymm9, %ymm4, %ymm2 vmovd %xmm2, 0x18(%r8) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r8) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r8) addq $0x30, %r8 movq %r10, %rdi jmp 0x1ed2d33 valignd $0x3, %ymm2, %ymm2, %ymm5 # ymm5 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x4de6c(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x8e5c3(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x8e5bc(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm4, %ymm7, %ymm2 vpmovsxbd 0x8e5af(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x3f66d(%rip), %ymm2 # 0x1f12704 vpermd %ymm5, %ymm2, %ymm2 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm5, %ymm4 vmovq %xmm4, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed308a popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r8) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed30ed vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovdqa %ymm9, %ymm7 jmp 0x1ed303c cmpl $0x6, %eax jne 0x1ed2b9a movl %ebp, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x20(%rsp) je 0x1ed2b9a movq %rdi, 0x10(%rsp) vmovups %ymm7, 0x160(%rsp) movq %r8, 0x18(%rsp) andq $-0x10, %rbp addq $0x50, %rbp xorl %r15d, %r15d xorl %eax, %eax movq %rax, 0x28(%rsp) xorl %r13d, %r13d xorl %r14d, %r14d cmpl $-0x1, (%rbp,%r14,4) je 0x1ed31b3 movq (%rbx), %rax movl -0x10(%rbp,%r14,4), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%rbx) movl (%rbp,%r14,4), %eax movl %eax, 0x40(%rbx) movq %r12, %rsi movq %rbx, %rdx vzeroupper callq 0x91bd12 orb %al, %r13b incq %r14 cmpq $0x4, %r14 jne 0x1ed3173 movq 0x28(%rsp), %rax orb %r13b, %al incq %r15 addq $0x60, %rbp cmpq 0x20(%rsp), %r15 jne 0x1ed3168 testb $0x1, %al movq 0x18(%rsp), %r8 vmovaps 0x30(%rsp), %xmm10 leaq 0x1a0(%rsp), %r9 vpmovsxbd 0x8a889(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x8e3cc(%rip), %ymm12 # 0x1f615bc vmovups 0x160(%rsp), %ymm7 movq 0x10(%rsp), %rdi je 0x1ed2b9a vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ed3229 vmovss 0x10(%r12), %xmm3 vmulss %xmm3, %xmm3, %xmm10 jmp 0x1ed3234 vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm10 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ed2b24 movl 0xc(%rsp), %eax addq $0x24e8, %rsp # imm = 0x24E8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::ObjectIntersector1<false>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24d8, %rsp # imm = 0x24D8 movq %rdx, 0x8(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ed3297 movq 0x70(%rax), %rax movq %rax, 0x190(%rsp) movl $0x0, 0x198(%rsp) cmpq $0x8, %rax jne 0x1ed32ac addq $0x24d8, %rsp # imm = 0x24D8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 vxorps %xmm2, %xmm2, %xmm2 vmaxss 0xc(%rsi), %xmm2, %xmm1 vmaxss 0x20(%rsi), %xmm2, %xmm3 vandps 0x4dbfb(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x1dd16(%rip), %xmm5 # 0x1ef0fe8 leaq 0x1a0(%rsp), %r8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x1941d(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %edi, %edi vucomiss %xmm2, %xmm0 setb %dil vbroadcastss %xmm0, %ymm10 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm11 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x4dbba(%rip), %ymm6 # 0x1f20edc vmulps (%rsi), %xmm0, %xmm7 vpermps %ymm0, %ymm6, %ymm12 vbroadcastss %xmm7, %ymm8 vbroadcastss 0x3f3cb(%rip), %ymm0 # 0x1f12704 vpermps %ymm7, %ymm0, %ymm9 vpermps %ymm7, %ymm6, %ymm6 shll $0x5, %edi xorl %r9d, %r9d vucomiss %xmm2, %xmm4 setb %r9b shll $0x5, %r9d orq $0x40, %r9 xorl %r10d, %r10d vucomiss %xmm2, %xmm5 setb %r10b shll $0x5, %r10d orq $0x80, %r10 movq %rdi, %r11 xorq $0x20, %r11 movq %r9, %r15 xorq $0x20, %r15 movq %r10, %r12 xorq $0x20, %r12 vbroadcastss %xmm3, %ymm0 vbroadcastss 0x4db2e(%rip), %ymm2 # 0x1f20ec0 vxorps %ymm2, %ymm8, %ymm8 vxorps %ymm2, %ymm9, %ymm9 vxorps %ymm2, %ymm6, %ymm13 vbroadcastss %xmm1, %ymm14 leaq 0x190(%rsp), %rbp vpmovsxbd 0x8a6bb(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8e1fd(%rip), %ymm17 # 0x1f615bc movq %rdi, 0x38(%rsp) vmovups %ymm10, 0x150(%rsp) vmovups %ymm11, 0x130(%rsp) vmovups %ymm12, 0x110(%rsp) movq %r9, 0x30(%rsp) movq %r10, 0x28(%rsp) movq %r11, 0x20(%rsp) movq %r15, 0x18(%rsp) movq %r12, 0x10(%rsp) vmovups %ymm8, 0xf0(%rsp) vmovups %ymm9, 0xd0(%rsp) vmovups %ymm13, 0xb0(%rsp) vmovups %ymm14, 0x90(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %rbp, %r8 je 0x1ed3297 vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1ed3421 movq (%r8), %rbx testb $0x8, %bl jne 0x1ed34b3 vmovaps 0x40(%rbx,%rdi), %ymm1 vfmadd132ps %ymm10, %ymm8, %ymm1 # ymm1 = (ymm1 * ymm10) + ymm8 vmovaps 0x40(%rbx,%r9), %ymm2 vfmadd132ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm2 * ymm11) + ymm9 vmovaps 0x40(%rbx,%r10), %ymm3 vpmaxsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm12, %ymm13, %ymm3 # ymm3 = (ymm3 * ymm12) + ymm13 vpmaxsd %ymm14, %ymm3, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm18 vmovaps 0x40(%rbx,%r11), %ymm1 vfmadd132ps %ymm10, %ymm8, %ymm1 # ymm1 = (ymm1 * ymm10) + ymm8 vmovaps 0x40(%rbx,%r15), %ymm2 vfmadd132ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm2 * ymm11) + ymm9 vmovaps 0x40(%rbx,%r12), %ymm3 vpminsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm12, %ymm13, %ymm3 # ymm3 = (ymm3 * ymm12) + ymm13 vpminsd %ymm0, %ymm3, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm18, %k0 kmovb %k0, %r14d testb $0x8, %bl jne 0x1ed351c testq %r14, %r14 je 0x1ed3523 andq $-0x10, %rbx vmovdqu (%rbx), %ymm1 vmovdqu 0x20(%rbx), %ymm2 vmovdqa64 %ymm16, %ymm3 vpternlogd $0xf8, %ymm17, %ymm18, %ymm3 kmovd %r14d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbx prefetcht0 (%rbx) prefetcht0 0x40(%rbx) prefetcht0 0x80(%rbx) prefetcht0 0xc0(%rbx) xorl %eax, %eax blsrq %r14, %rcx jne 0x1ed352a testl %eax, %eax je 0x1ed343d jmp 0x1ed3845 movl $0x6, %eax jmp 0x1ed350f movl $0x4, %eax jmp 0x1ed350f vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ed358c vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm18, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 jmp 0x1ed350f vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ed3617 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm18, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm18, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1ed350f vmovdqa64 %ymm18, %ymm19 vmovaps %ymm14, %ymm15 vmovaps %ymm13, %ymm14 vmovaps %ymm9, %ymm13 vmovaps %ymm8, %ymm9 movq %r12, %r13 movq %r15, %r12 movq %r11, %r15 movq %r10, %r11 movq %r9, %r10 movq %rdi, %r9 movq %rsi, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ed372a vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa64 %ymm19, %ymm18 vpermd %ymm19, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm19, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm19, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rsi movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r15, %r11 movq %r12, %r15 movq %r13, %r12 vmovaps %ymm9, %ymm8 vmovaps %ymm13, %ymm9 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 jmp 0x1ed350f valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x4d786(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x8dedc(%rip), %ymm18 # 0x1f61620 vpermt2d %ymm8, %ymm18, %ymm3 vpmovsxbd 0x8ded5(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x8dec8(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x3ef86(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ed3771 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm19, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ed37db vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbx movq %rdi, %rsi movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r15, %r11 movq %r12, %r15 movq %r13, %r12 vmovaps %ymm9, %ymm8 vmovaps %ymm13, %ymm9 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 vmovdqa64 %ymm19, %ymm18 jmp 0x1ed350f cmpl $0x6, %eax jne 0x1ed341c movq %r8, 0x40(%rsp) movl %ebx, %r12d andl $0xf, %r12d addq $-0x8, %r12 je 0x1ed397d andq $-0x10, %rbx movq 0x8(%rsp), %rax movq (%rax), %r13 xorl %ebp, %ebp vmovdqu64 %ymm18, 0x170(%rsp) movl (%rbx,%rbp,8), %ecx movq 0x1e8(%r13), %rax movq (%rax,%rcx,8), %rax movl 0x34(%rax), %edx testl %edx, 0x24(%rsi) je 0x1ed3971 movl 0x4(%rbx,%rbp,8), %edx movl $0xffffffff, 0x4(%rsp) # imm = 0xFFFFFFFF leaq 0x4(%rsp), %rdi movq %rdi, 0x48(%rsp) movq 0x18(%rax), %rdi movq %rdi, 0x50(%rsp) movq 0x8(%rsp), %rdi movq 0x8(%rdi), %r8 movq %r8, 0x60(%rsp) movq %rsi, %r15 movq %rsi, 0x68(%rsp) movl $0x1, 0x70(%rsp) movl %ecx, 0x74(%rsp) movl %edx, 0x58(%rsp) movq %rax, 0x78(%rsp) movq $0x0, 0x80(%rsp) movq 0x10(%rdi), %rcx movq %rcx, 0x88(%rsp) movq 0x18(%rcx), %rcx testq %rcx, %rcx jne 0x1ed3906 movq 0x60(%rax), %rcx leaq 0x48(%rsp), %rdi vzeroupper callq *%rcx movq %r15, %rsi vmovups 0x150(%rsp), %ymm10 vmovups 0x130(%rsp), %ymm11 vmovups 0x110(%rsp), %ymm12 vmovups 0xf0(%rsp), %ymm8 vmovups 0xd0(%rsp), %ymm9 vmovups 0xb0(%rsp), %ymm13 vmovups 0x90(%rsp), %ymm14 vpmovsxbd 0x8a114(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8dc56(%rip), %ymm17 # 0x1f615bc vmovdqu64 0x170(%rsp), %ymm18 incq %rbp cmpq %rbp, %r12 jne 0x1ed387d vbroadcastss 0x20(%rsi), %ymm0 movq 0x40(%rsp), %r8 movq 0x38(%rsp), %rdi movq 0x30(%rsp), %r9 movq 0x28(%rsp), %r10 movq 0x20(%rsp), %r11 movq 0x18(%rsp), %r15 movq 0x10(%rsp), %r12 leaq 0x190(%rsp), %rbp jmp 0x1ed341c nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::ObjectIntersector1<false>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ed39c5 xorl %eax, %eax jmp 0x1ed409a pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24d8, %rsp # imm = 0x24D8 movq %rdx, %r13 movq %rsi, %rbx movq 0x70(%rax), %rax movq %rax, 0x190(%rsp) movl $0x0, 0x198(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ed3a04 vmovss 0x10(%rbx), %xmm0 vmulss %xmm0, %xmm0, %xmm11 jmp 0x1ed3a10 vmovaps 0x50(%r13), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm11 leaq 0x1a0(%rsp), %r9 vbroadcastss (%rbx), %ymm9 vbroadcastss 0x4(%rbx), %ymm10 vbroadcastss 0x8(%rbx), %ymm12 vbroadcastss 0x50(%r13), %ymm0 vbroadcastss 0x54(%r13), %ymm1 vbroadcastss 0x58(%r13), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x190(%rsp), %r10 vpmovsxbd 0x8a01c(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0x8db5f(%rip), %ymm14 # 0x1f615bc vmovups %ymm9, 0x130(%rsp) vmovups %ymm10, 0x110(%rsp) vmovups %ymm12, 0xf0(%rsp) vsubps %ymm0, %ymm9, %ymm3 vmovups %ymm3, 0xd0(%rsp) vaddps %ymm0, %ymm9, %ymm3 vmovups %ymm3, 0xb0(%rsp) vsubps %ymm1, %ymm10, %ymm3 vmovups %ymm3, 0x90(%rsp) vaddps %ymm1, %ymm10, %ymm1 vmovups %ymm1, 0x70(%rsp) vsubps %ymm2, %ymm12, %ymm1 vmovups %ymm1, 0x50(%rsp) vaddps %ymm2, %ymm12, %ymm1 vmovups %ymm1, 0x30(%rsp) vmulps %ymm0, %ymm0, %ymm15 vmovaps %xmm11, 0x20(%rsp) vmovups %ymm15, 0x150(%rsp) cmpq %r10, %r9 je 0x1ed4085 vmovss -0x8(%r9), %xmm0 addq $-0x10, %r9 vucomiss %xmm11, %xmm0 ja 0x1ed3ad0 movq (%r9), %rbp cmpl $0x1, 0x18(%r13) jne 0x1ed3bdb testb $0x8, %bpl jne 0x1ed3b65 vmovaps 0x40(%rbp), %ymm0 vmovaps 0x60(%rbp), %ymm1 vmaxps %ymm0, %ymm9, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm9, %ymm2, %ymm2 vmaxps 0x80(%rbp), %ymm10, %ymm3 vminps 0xa0(%rbp), %ymm3, %ymm3 vmaxps 0xc0(%rbp), %ymm12, %ymm4 vsubps %ymm10, %ymm3, %ymm3 vminps 0xe0(%rbp), %ymm4, %ymm4 vsubps %ymm12, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm15, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %r8d testb $0x8, %bpl jne 0x1ed3cc1 testq %r8, %r8 je 0x1ed3ccb andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm13, %ymm2 vpternlogd $0xf8, %ymm14, %ymm7, %ymm2 kmovd %r8d, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %r8, %rcx jne 0x1ed3cd5 testl %eax, %eax je 0x1ed3aed jmp 0x1ed3f78 testb $0x8, %bpl jne 0x1ed3b65 vmovaps 0xc0(%rbp), %ymm0 vmovaps 0x40(%rbp), %ymm1 vmovaps 0x60(%rbp), %ymm2 vmovaps 0x80(%rbp), %ymm3 vmovaps 0xa0(%rbp), %ymm4 vmovaps 0xe0(%rbp), %ymm5 vmaxps %ymm1, %ymm9, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps %ymm9, %ymm6, %ymm6 vmaxps %ymm3, %ymm10, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm0, %ymm12, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm12, %ymm8, %ymm8 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0xd0(%rsp), %ymm2, %k0 vcmpnleps 0xb0(%rsp), %ymm1, %k1 vcmpltps 0x90(%rsp), %ymm4, %k2 vcmpnleps 0x70(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0x50(%rsp), %ymm5, %k3 korb %k3, %k2, %k2 vcmpnleps 0x30(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %r8d jmp 0x1ed3b65 movl $0x6, %eax jmp 0x1ed3bce movl $0x4, %eax jmp 0x1ed3bce vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed3d39 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r9) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r9) addq $0x10, %r9 jmp 0x1ed3bce vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ed3dc2 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r9) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r9) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r9) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r9) addq $0x20, %r9 jmp 0x1ed3bce movq %r8, %r11 vmovaps %ymm15, %ymm16 vmovdqa %ymm7, %ymm15 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ed3e8a vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r9) vmovdqa %ymm15, %ymm7 vpermd %ymm15, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r9) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r9) vpermd %ymm15, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r9) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r9) vpermd %ymm15, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r9) addq $0x30, %r9 vmovaps %ymm16, %ymm15 movq %r11, %r8 jmp 0x1ed3bce valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x4d026(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x8d77d(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x8d776(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x8d769(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x3e827(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed3ed0 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r9) vpermd %ymm15, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r9) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r9 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed3f3a vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovdqa %ymm15, %ymm7 jmp 0x1ed3e7c cmpl $0x6, %eax jne 0x1ed3ad0 movl %ebp, %r12d andl $0xf, %r12d addq $-0x8, %r12 je 0x1ed3ad0 movq %r8, 0x10(%rsp) vmovups %ymm7, 0x170(%rsp) movq %r9, 0x18(%rsp) andq $-0x10, %rbp xorl %r15d, %r15d xorl %r14d, %r14d movq (%r13), %rax movl (%rbp,%r15,8), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%r13) movl 0x4(%rbp,%r15,8), %eax movl %eax, 0x40(%r13) movq %rbx, %rsi movq %r13, %rdx vzeroupper callq 0x91bd12 orb %al, %r14b incq %r15 cmpq %r15, %r12 jne 0x1ed3faf testb $0x1, %r14b movq 0x18(%rsp), %r9 vmovups 0x130(%rsp), %ymm9 vmovups 0x110(%rsp), %ymm10 vmovaps 0x20(%rsp), %xmm11 vmovups 0xf0(%rsp), %ymm12 leaq 0x190(%rsp), %r10 vpmovsxbd 0x89a4c(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0x8d58f(%rip), %ymm14 # 0x1f615bc vmovups 0x170(%rsp), %ymm7 vmovups 0x150(%rsp), %ymm15 movq 0x10(%rsp), %r8 je 0x1ed3ad0 vbroadcastss 0x50(%r13), %ymm0 vbroadcastss 0x54(%r13), %ymm1 vbroadcastss 0x58(%r13), %ymm2 cmpl $0x1, 0x18(%r13) jne 0x1ed406e vmovss 0x10(%rbx), %xmm3 vmulss %xmm3, %xmm3, %xmm11 jmp 0x1ed407a vmovaps 0x50(%r13), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm11 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ed3a78 movl 0xc(%rsp), %eax addq $0x24d8, %rsp # imm = 0x24D8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::ObjectIntersector1<true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24d8, %rsp # imm = 0x24D8 movq %rdx, 0x8(%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ed40dd movq 0x70(%rax), %rax movq %rax, 0x190(%rsp) movl $0x0, 0x198(%rsp) cmpq $0x8, %rax jne 0x1ed40f2 addq $0x24d8, %rsp # imm = 0x24D8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x4cdb5(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x1ced0(%rip), %xmm5 # 0x1ef0fe8 leaq 0x1a0(%rsp), %r8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x185d7(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %edi, %edi vucomiss %xmm1, %xmm0 setb %dil vbroadcastss %xmm0, %ymm10 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm11 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x4cd74(%rip), %ymm6 # 0x1f20edc vmulps (%rsi), %xmm0, %xmm7 vpermps %ymm0, %ymm6, %ymm12 vbroadcastss %xmm7, %ymm8 vbroadcastss 0x3e585(%rip), %ymm0 # 0x1f12704 vpermps %ymm7, %ymm0, %ymm9 vpermps %ymm7, %ymm6, %ymm6 shll $0x5, %edi xorl %r9d, %r9d vucomiss %xmm1, %xmm4 setb %r9b shll $0x5, %r9d orq $0x40, %r9 xorl %r10d, %r10d vucomiss %xmm1, %xmm5 setb %r10b shll $0x5, %r10d orq $0x80, %r10 movq %rdi, %r11 xorq $0x20, %r11 movq %r9, %r14 xorq $0x20, %r14 movq %r10, %r15 xorq $0x20, %r15 vbroadcastss %xmm2, %ymm13 vbroadcastss %xmm3, %ymm0 vbroadcastss 0x4cce3(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm8, %ymm8 vxorps %ymm1, %ymm9, %ymm9 vxorps %ymm1, %ymm6, %ymm14 leaq 0x190(%rsp), %rbp vpmovsxbd 0x89875(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8d3b7(%rip), %ymm17 # 0x1f615bc movq %rdi, 0x38(%rsp) vmovups %ymm10, 0x150(%rsp) vmovups %ymm11, 0x130(%rsp) vmovups %ymm12, 0x110(%rsp) movq %r9, 0x30(%rsp) movq %r10, 0x28(%rsp) movq %r11, 0x20(%rsp) movq %r14, 0x18(%rsp) movq %r15, 0x10(%rsp) vmovups %ymm13, 0xf0(%rsp) vmovups %ymm8, 0xd0(%rsp) vmovups %ymm9, 0xb0(%rsp) vmovups %ymm14, 0x90(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %rbp, %r8 je 0x1ed40dd vmovss -0x8(%r8), %xmm2 addq $-0x10, %r8 vucomiss %xmm1, %xmm2 ja 0x1ed4267 movq (%r8), %rbx testb $0x8, %bl jne 0x1ed434b movq %rbx, %rax andq $-0x10, %rax vbroadcastss 0x1c(%rsi), %ymm1 vmovaps 0x100(%rax,%rdi), %ymm2 vfmadd213ps 0x40(%rax,%rdi), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vfmadd213ps %ymm8, %ymm10, %ymm2 # ymm2 = (ymm10 * ymm2) + ymm8 vmaxps %ymm2, %ymm13, %ymm2 vmovaps 0x100(%rax,%r9), %ymm3 vfmadd213ps 0x40(%rax,%r9), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vmovaps 0x100(%rax,%r10), %ymm4 vfmadd213ps 0x40(%rax,%r10), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm9, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm3) + ymm9 vfmadd213ps %ymm14, %ymm12, %ymm4 # ymm4 = (ymm12 * ymm4) + ymm14 vmaxps %ymm4, %ymm3, %ymm3 vmaxps %ymm3, %ymm2, %ymm18 vmovaps 0x100(%rax,%r11), %ymm2 vfmadd213ps 0x40(%rax,%r11), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vmovaps 0x100(%rax,%r14), %ymm3 vfmadd213ps 0x40(%rax,%r14), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vfmadd213ps %ymm8, %ymm10, %ymm2 # ymm2 = (ymm10 * ymm2) + ymm8 vfmadd213ps %ymm9, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm3) + ymm9 vmovaps 0x100(%rax,%r15), %ymm4 vfmadd213ps 0x40(%rax,%r15), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm14, %ymm12, %ymm4 # ymm4 = (ymm12 * ymm4) + ymm14 vminps %ymm4, %ymm3, %ymm3 vminps %ymm2, %ymm0, %ymm2 vminps %ymm3, %ymm2, %ymm2 movl %ebx, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ed43bf vcmpleps %ymm2, %ymm18, %k0 kmovb %k0, %r12d testb $0x8, %bl jne 0x1ed43b8 testq %r12, %r12 je 0x1ed43db andq $-0x10, %rbx vmovdqu (%rbx), %ymm1 vmovdqu 0x20(%rbx), %ymm2 vmovdqa64 %ymm16, %ymm3 vpternlogd $0xf8, %ymm17, %ymm18, %ymm3 kmovd %r12d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbx prefetcht0 (%rbx) prefetcht0 0x40(%rbx) prefetcht0 0x80(%rbx) prefetcht0 0xc0(%rbx) xorl %eax, %eax blsrq %r12, %rcx jne 0x1ed43e2 testl %eax, %eax je 0x1ed4283 jmp 0x1ed46f1 movl $0x6, %eax jmp 0x1ed43ab vcmpleps %ymm2, %ymm18, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1ed4347 movl $0x4, %eax jmp 0x1ed43ab vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ed4447 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r8) vpermd %ymm18, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r8) addq $0x10, %r8 jmp 0x1ed43ab vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ed44d2 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vpermd %ymm18, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r8) vpermd %ymm18, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r8) addq $0x20, %r8 jmp 0x1ed43ab vmovdqa64 %ymm18, %ymm19 vmovaps %ymm14, %ymm15 vmovaps %ymm9, %ymm14 vmovaps %ymm8, %ymm9 movq %r15, %r13 movq %r14, %r15 movq %r11, %r14 movq %r10, %r11 movq %r9, %r10 movq %rdi, %r9 movq %rsi, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ed45db vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r8) vmovdqa64 %ymm19, %ymm18 vpermd %ymm19, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r8) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r8) vpermd %ymm19, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r8) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r8) vpermd %ymm19, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r8) addq $0x30, %r8 movq %rdi, %rsi movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r14, %r11 movq %r15, %r14 movq %r13, %r15 vmovaps %ymm9, %ymm8 vmovaps %ymm14, %ymm9 vmovaps %ymm15, %ymm14 jmp 0x1ed43ab valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x4c8d5(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x8d02b(%rip), %ymm18 # 0x1f61620 vpermt2d %ymm8, %ymm18, %ymm3 vpmovsxbd 0x8d024(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x8d017(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x3e0d5(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ed4622 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r8) vpermd %ymm19, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r8) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r8 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ed468c vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbx movq %rdi, %rsi movq %r9, %rdi movq %r10, %r9 movq %r11, %r10 movq %r14, %r11 movq %r15, %r14 movq %r13, %r15 vmovaps %ymm9, %ymm8 vmovaps %ymm14, %ymm9 vmovaps %ymm15, %ymm14 vmovdqa64 %ymm19, %ymm18 jmp 0x1ed43ab cmpl $0x6, %eax jne 0x1ed4262 movq %r8, 0x40(%rsp) movl %ebx, %r14d andl $0xf, %r14d addq $-0x8, %r14 je 0x1ed4829 andq $-0x10, %rbx movq 0x8(%rsp), %rax movq (%rax), %r13 xorl %ebp, %ebp vmovdqu64 %ymm18, 0x170(%rsp) movl (%rbx,%rbp,8), %ecx movq 0x1e8(%r13), %rax movq (%rax,%rcx,8), %rax movl 0x34(%rax), %edx testl %edx, 0x24(%rsi) je 0x1ed481d movl 0x4(%rbx,%rbp,8), %edx movl $0xffffffff, 0x4(%rsp) # imm = 0xFFFFFFFF leaq 0x4(%rsp), %rdi movq %rdi, 0x48(%rsp) movq 0x18(%rax), %rdi movq %rdi, 0x50(%rsp) movq 0x8(%rsp), %rdi movq 0x8(%rdi), %r8 movq %r8, 0x60(%rsp) movq %rsi, %r15 movq %rsi, 0x68(%rsp) movl $0x1, 0x70(%rsp) movl %ecx, 0x74(%rsp) movl %edx, 0x58(%rsp) movq %rax, 0x78(%rsp) movq $0x0, 0x80(%rsp) movq 0x10(%rdi), %rcx movq %rcx, 0x88(%rsp) movq 0x18(%rcx), %rcx testq %rcx, %rcx jne 0x1ed47b2 movq 0x60(%rax), %rcx leaq 0x48(%rsp), %rdi vzeroupper callq *%rcx movq %r15, %rsi vmovups 0x150(%rsp), %ymm10 vmovups 0x130(%rsp), %ymm11 vmovups 0x110(%rsp), %ymm12 vmovups 0xf0(%rsp), %ymm13 vmovups 0xd0(%rsp), %ymm8 vmovups 0xb0(%rsp), %ymm9 vmovups 0x90(%rsp), %ymm14 vpmovsxbd 0x89268(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8cdaa(%rip), %ymm17 # 0x1f615bc vmovdqu64 0x170(%rsp), %ymm18 incq %rbp cmpq %rbp, %r14 jne 0x1ed4729 vbroadcastss 0x20(%rsi), %ymm0 movq 0x40(%rsp), %r8 movq 0x38(%rsp), %rdi movq 0x30(%rsp), %r9 movq 0x28(%rsp), %r10 movq 0x20(%rsp), %r11 movq 0x18(%rsp), %r14 movq 0x10(%rsp), %r15 leaq 0x190(%rsp), %rbp jmp 0x1ed4262 nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::ObjectIntersector1<true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ed4871 xorl %eax, %eax jmp 0x1ed502c pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24d8, %rsp # imm = 0x24D8 movq %rdx, %r13 movq %rsi, %rbx movq 0x70(%rax), %rax movq %rax, 0x190(%rsp) movl $0x0, 0x198(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ed48b0 vmovss 0x10(%rbx), %xmm0 vmulss %xmm0, %xmm0, %xmm12 jmp 0x1ed48bc vmovaps 0x50(%r13), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm12 leaq 0x1a0(%rsp), %r9 vbroadcastss (%rbx), %ymm10 vbroadcastss 0x4(%rbx), %ymm11 vbroadcastss 0x8(%rbx), %ymm13 vbroadcastss 0x50(%r13), %ymm0 vbroadcastss 0x54(%r13), %ymm1 vbroadcastss 0x58(%r13), %ymm2 movl $0x0, 0xc(%rsp) leaq 0x190(%rsp), %r10 vpmovsxbd 0x89170(%rip), %ymm14 # 0x1f5da70 vpbroadcastd 0x8ccb3(%rip), %ymm15 # 0x1f615bc vmovups %ymm10, 0x130(%rsp) vmovups %ymm11, 0x110(%rsp) vmovups %ymm13, 0xf0(%rsp) vsubps %ymm0, %ymm10, %ymm3 vmovups %ymm3, 0xd0(%rsp) vaddps %ymm0, %ymm10, %ymm3 vmovups %ymm3, 0xb0(%rsp) vsubps %ymm1, %ymm11, %ymm3 vmovups %ymm3, 0x90(%rsp) vaddps %ymm1, %ymm11, %ymm1 vmovups %ymm1, 0x70(%rsp) vsubps %ymm2, %ymm13, %ymm1 vmovups %ymm1, 0x50(%rsp) vaddps %ymm2, %ymm13, %ymm1 vmovups %ymm1, 0x30(%rsp) vmulps %ymm0, %ymm0, %ymm16 vmovaps %xmm12, 0x20(%rsp) vmovups %ymm16, 0x150(%rsp) cmpq %r10, %r9 je 0x1ed5017 vmovss -0x8(%r9), %xmm0 addq $-0x10, %r9 vucomiss %xmm12, %xmm0 ja 0x1ed4980 movq (%r9), %rbp cmpl $0x1, 0x18(%r13) jne 0x1ed4af0 testb $0x8, %bpl jne 0x1ed4a7a movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%rbx), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmaxps %ymm1, %ymm10, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm3, %ymm11, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm11, %ymm3, %ymm3 vmaxps %ymm5, %ymm13, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm13, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps %ymm16, %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ed4c58 kmovb %k0, %r8d testb $0x8, %bpl jne 0x1ed4c44 testq %r8, %r8 je 0x1ed4c4e andq $-0x10, %rbp vmovdqu (%rbp), %ymm0 vmovdqu 0x20(%rbp), %ymm1 vmovdqa %ymm14, %ymm2 vpternlogd $0xf8, %ymm15, %ymm7, %ymm2 kmovd %r8d, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rbp prefetcht0 (%rbp) prefetcht0 0x40(%rbp) prefetcht0 0x80(%rbp) prefetcht0 0xc0(%rbp) xorl %eax, %eax blsrq %r8, %rcx jne 0x1ed4c71 testl %eax, %eax je 0x1ed499d jmp 0x1ed4f08 testb $0x8, %bpl jne 0x1ed4a7a movq %rbp, %rax andq $-0x10, %rax vbroadcastss 0xc(%rbx), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmaxps %ymm1, %ymm10, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm3, %ymm11, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm11, %ymm8, %ymm8 vmaxps %ymm5, %ymm13, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm13, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0xd0(%rsp), %ymm2, %k0 vcmpnleps 0xb0(%rsp), %ymm1, %k1 vcmpltps 0x90(%rsp), %ymm4, %k2 vcmpnleps 0x70(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0x50(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0x30(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edi andb %cl, %dil movzbl %dil, %r8d movl %ebp, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1ed4a7a vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %r8b, %al movzbl %al, %r8d jmp 0x1ed4a7a movl $0x6, %eax jmp 0x1ed4ae3 movl $0x4, %eax jmp 0x1ed4ae3 vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1ed4a76 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed4cd5 vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rbp vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%r9) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%r9) addq $0x10, %r9 jmp 0x1ed4ae3 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ed4d5e vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r9) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r9) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%r9) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%r9) addq $0x20, %r9 jmp 0x1ed4ae3 movq %r8, %r11 vmovdqa %ymm7, %ymm9 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ed4e1a vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rbp vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%r9) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%r9) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%r9) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%r9) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%r9) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%r9) addq $0x30, %r9 movq %r11, %r8 jmp 0x1ed4ae3 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x4c096(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x8c7ed(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x8c7e6(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x8c7d9(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x3d897(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed4e60 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%r9) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%r9) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %r9 vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed4eca vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rbp vmovdqa %ymm9, %ymm7 jmp 0x1ed4e12 cmpl $0x6, %eax jne 0x1ed4980 movl %ebp, %r12d andl $0xf, %r12d addq $-0x8, %r12 je 0x1ed4980 movq %r8, 0x10(%rsp) vmovups %ymm7, 0x170(%rsp) movq %r9, 0x18(%rsp) andq $-0x10, %rbp xorl %r15d, %r15d xorl %r14d, %r14d movq (%r13), %rax movl (%rbp,%r15,8), %ecx movq 0x1e8(%rax), %rax movq (%rax,%rcx,8), %rdi movl %ecx, 0x44(%r13) movl 0x4(%rbp,%r15,8), %eax movl %eax, 0x40(%r13) movq %rbx, %rsi movq %r13, %rdx vzeroupper callq 0x91bd12 orb %al, %r14b incq %r15 cmpq %r15, %r12 jne 0x1ed4f3f testb $0x1, %r14b movq 0x18(%rsp), %r9 vmovups 0x130(%rsp), %ymm10 vmovups 0x110(%rsp), %ymm11 vmovaps 0x20(%rsp), %xmm12 vmovups 0xf0(%rsp), %ymm13 leaq 0x190(%rsp), %r10 vpmovsxbd 0x88abc(%rip), %ymm14 # 0x1f5da70 vpbroadcastd 0x8c5ff(%rip), %ymm15 # 0x1f615bc vmovups 0x150(%rsp), %ymm16 vmovups 0x170(%rsp), %ymm7 movq 0x10(%rsp), %r8 je 0x1ed4980 vbroadcastss 0x50(%r13), %ymm0 vbroadcastss 0x54(%r13), %ymm1 vbroadcastss 0x58(%r13), %ymm2 cmpl $0x1, 0x18(%r13) jne 0x1ed5000 vmovss 0x10(%rbx), %xmm3 vmulss %xmm3, %xmm3, %xmm12 jmp 0x1ed500c vmovaps 0x50(%r13), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm12 movb $0x1, %al movl %eax, 0xc(%rsp) jmp 0x1ed4924 movl 0xc(%rsp), %eax addq $0x24d8, %rsp # imm = 0x24D8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::InstanceIntersector1>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ed507b pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2488, %rsp # imm = 0x2488 movq 0x70(%rax), %rax movq %rax, 0x140(%rsp) movl $0x0, 0x148(%rsp) cmpq $0x8, %rax jne 0x1ed507f addq $0x2488, %rsp # imm = 0x2488 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 vxorps %xmm2, %xmm2, %xmm2 vmaxss 0xc(%rsi), %xmm2, %xmm1 vmaxss 0x20(%rsi), %xmm2, %xmm3 vandps 0x4be28(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x1bf43(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x17652(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem leaq 0x150(%rsp), %r9 vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %r8d, %r8d vucomiss %xmm2, %xmm0 setb %r8b vbroadcastss %xmm0, %ymm9 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm10 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x4bde6(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm11 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x3d5f7(%rip), %ymm8 # 0x1f12704 vpermps %ymm0, %ymm8, %ymm8 vpermps %ymm0, %ymm6, %ymm6 shll $0x5, %r8d xorl %r10d, %r10d vucomiss %xmm2, %xmm4 setb %r10b shll $0x5, %r10d orq $0x40, %r10 xorl %r11d, %r11d vucomiss %xmm2, %xmm5 setb %r11b shll $0x5, %r11d orq $0x80, %r11 movq %r8, %rbx xorq $0x20, %rbx movq %r10, %r15 xorq $0x20, %r15 movq %r11, %r13 xorq $0x20, %r13 vbroadcastss %xmm3, %ymm0 vbroadcastss 0x4bd59(%rip), %ymm2 # 0x1f20ec0 vxorps %ymm2, %ymm7, %ymm12 vxorps %ymm2, %ymm8, %ymm8 vxorps %ymm2, %ymm6, %ymm13 vbroadcastss %xmm1, %ymm14 vpmovsxbd 0x888ee(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8c430(%rip), %ymm17 # 0x1f615bc leaq 0x140(%rsp), %rbp movq %r8, 0x30(%rsp) vmovups %ymm9, 0x100(%rsp) vmovups %ymm10, 0xe0(%rsp) vmovups %ymm11, 0xc0(%rsp) movq %r10, 0x28(%rsp) movq %r11, 0x20(%rsp) movq %rbx, 0x18(%rsp) movq %r15, 0x10(%rsp) vmovups %ymm12, 0xa0(%rsp) vmovups %ymm8, 0x80(%rsp) vmovups %ymm13, 0x60(%rsp) vmovups %ymm14, 0x40(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %rbp, %r9 je 0x1ed506a vmovss -0x8(%r9), %xmm2 addq $-0x10, %r9 vucomiss %xmm1, %xmm2 ja 0x1ed51eb movq (%r9), %r12 testb $0x8, %r12b jne 0x1ed527f vmovaps 0x40(%r12,%r8), %ymm1 vfmadd132ps %ymm9, %ymm12, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm12 vmovaps 0x40(%r12,%r10), %ymm2 vfmadd132ps %ymm10, %ymm8, %ymm2 # ymm2 = (ymm2 * ymm10) + ymm8 vmovaps 0x40(%r12,%r11), %ymm3 vpmaxsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm11, %ymm13, %ymm3 # ymm3 = (ymm3 * ymm11) + ymm13 vpmaxsd %ymm14, %ymm3, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm18 vmovaps 0x40(%r12,%rbx), %ymm1 vfmadd132ps %ymm9, %ymm12, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm12 vmovaps 0x40(%r12,%r15), %ymm2 vfmadd132ps %ymm10, %ymm8, %ymm2 # ymm2 = (ymm2 * ymm10) + ymm8 vmovaps 0x40(%r12,%r13), %ymm3 vpminsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm11, %ymm13, %ymm3 # ymm3 = (ymm3 * ymm11) + ymm13 vpminsd %ymm0, %ymm3, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm18, %k0 kmovb %k0, %r14d testb $0x8, %r12b jne 0x1ed52f5 testq %r14, %r14 je 0x1ed52fc andq $-0x10, %r12 vmovdqu (%r12), %ymm1 vmovdqu 0x20(%r12), %ymm2 vmovdqa64 %ymm16, %ymm3 vpternlogd $0xf8, %ymm17, %ymm18, %ymm3 kmovd %r14d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %r14, %rcx jne 0x1ed5303 testl %eax, %eax je 0x1ed5207 jmp 0x1ed561f movl $0x6, %eax jmp 0x1ed52e8 movl $0x4, %eax jmp 0x1ed52e8 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ed5365 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %r12 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r9) vpermd %ymm18, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r9) addq $0x10, %r9 jmp 0x1ed52e8 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ed53f0 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %r12 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r9) vpermd %ymm18, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r9) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r9) vpermd %ymm18, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r9) addq $0x20, %r9 jmp 0x1ed52e8 vmovdqa64 %ymm18, %ymm19 vmovaps %ymm14, %ymm15 vmovaps %ymm13, %ymm14 vmovaps %ymm8, %ymm13 movq %r15, %rbp movq %rbx, %r15 movq %r11, %rbx movq %r10, %r11 movq %r8, %r10 movq %rsi, %r8 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ed5501 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %r12 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r9) vmovdqa64 %ymm19, %ymm18 vpermd %ymm19, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r9) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r9) vpermd %ymm19, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r9) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r9) vpermd %ymm19, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r9) addq $0x30, %r9 movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %rbx, %r11 movq %r15, %rbx movq %rbp, %r15 vmovaps %ymm13, %ymm8 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 leaq 0x140(%rsp), %rbp jmp 0x1ed52e8 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x4b9af(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x8c105(%rip), %ymm18 # 0x1f61620 vpermt2d %ymm8, %ymm18, %ymm3 vpmovsxbd 0x8c0fe(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x8c0f1(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x3d1af(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ed5548 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r9) vpermd %ymm19, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r9) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r9 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ed55b2 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %r12 movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %rbx, %r11 movq %r15, %rbx movq %rbp, %r15 vmovaps %ymm13, %ymm8 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 leaq 0x140(%rsp), %rbp vmovdqa64 %ymm19, %ymm18 jmp 0x1ed52e8 cmpl $0x6, %eax jne 0x1ed51e6 vmovdqu64 %ymm18, 0x120(%rsp) movq %r9, 0x38(%rsp) movl %r12d, %ebx andl $0xf, %ebx addq $-0x8, %rbx je 0x1ed566a andq $-0x10, %r12 leaq 0xf(%rsp), %rdi movq %r12, %rcx movq %rdx, %rbp movq %rsi, %r15 vzeroupper callq 0x8b9e92 movq %r15, %rsi movq %rbp, %rdx addq $0x10, %r12 decq %rbx jne 0x1ed5645 vbroadcastss 0x20(%rsi), %ymm0 movq 0x38(%rsp), %r9 movq 0x30(%rsp), %r8 vmovups 0x100(%rsp), %ymm9 vmovups 0xe0(%rsp), %ymm10 vmovups 0xc0(%rsp), %ymm11 movq 0x28(%rsp), %r10 movq 0x20(%rsp), %r11 movq 0x18(%rsp), %rbx movq 0x10(%rsp), %r15 vmovups 0xa0(%rsp), %ymm12 vmovups 0x80(%rsp), %ymm8 vmovups 0x60(%rsp), %ymm13 vmovups 0x40(%rsp), %ymm14 vpmovsxbd 0x8839f(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8bee1(%rip), %ymm17 # 0x1f615bc leaq 0x140(%rsp), %rbp vmovdqu64 0x120(%rsp), %ymm18 jmp 0x1ed51e6
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::InstanceIntersector1>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ed5701 xorl %eax, %eax jmp 0x1ed5d9a pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24c8, %rsp # imm = 0x24C8 movq %rdx, %rbx movq %rsi, %r14 movq 0x70(%rax), %rax movq %rax, 0x180(%rsp) movl $0x0, 0x188(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ed5741 vmovss 0x10(%r14), %xmm0 vmulss %xmm0, %xmm0, %xmm11 jmp 0x1ed574c vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm11 leaq 0x190(%rsp), %rsi vbroadcastss (%r14), %ymm9 vbroadcastss 0x4(%r14), %ymm10 vbroadcastss 0x8(%r14), %ymm12 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0x4(%rsp) leaq 0x180(%rsp), %r8 vpmovsxbd 0x882e0(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0x8be23(%rip), %ymm14 # 0x1f615bc vmovups %ymm9, 0x120(%rsp) vmovups %ymm10, 0x100(%rsp) vmovups %ymm12, 0xe0(%rsp) vsubps %ymm0, %ymm9, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm0, %ymm9, %ymm3 vmovups %ymm3, 0xa0(%rsp) vsubps %ymm1, %ymm10, %ymm3 vmovups %ymm3, 0x80(%rsp) vaddps %ymm1, %ymm10, %ymm1 vmovups %ymm1, 0x60(%rsp) vsubps %ymm2, %ymm12, %ymm1 vmovups %ymm1, 0x40(%rsp) vaddps %ymm2, %ymm12, %ymm1 vmovups %ymm1, 0x20(%rsp) vmulps %ymm0, %ymm0, %ymm15 vmovaps %xmm11, 0x10(%rsp) vmovups %ymm15, 0x140(%rsp) cmpq %r8, %rsi je 0x1ed5d85 vmovss -0x8(%rsi), %xmm0 addq $-0x10, %rsi vucomiss %xmm11, %xmm0 ja 0x1ed580c movq (%rsi), %r15 cmpl $0x1, 0x18(%rbx) jne 0x1ed591e testb $0x8, %r15b jne 0x1ed58a5 vmovaps 0x40(%r15), %ymm0 vmovaps 0x60(%r15), %ymm1 vmaxps %ymm0, %ymm9, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm9, %ymm2, %ymm2 vmaxps 0x80(%r15), %ymm10, %ymm3 vminps 0xa0(%r15), %ymm3, %ymm3 vmaxps 0xc0(%r15), %ymm12, %ymm4 vsubps %ymm10, %ymm3, %ymm3 vminps 0xe0(%r15), %ymm4, %ymm4 vsubps %ymm12, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm15, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %ebp testb $0x8, %r15b jne 0x1ed59f7 testq %rbp, %rbp je 0x1ed5a01 andq $-0x10, %r15 vmovdqu (%r15), %ymm0 vmovdqu 0x20(%r15), %ymm1 vmovdqa %ymm13, %ymm2 vpternlogd $0xf8, %ymm14, %ymm7, %ymm2 kmovd %ebp, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r15 prefetcht0 (%r15) prefetcht0 0x40(%r15) prefetcht0 0x80(%r15) prefetcht0 0xc0(%r15) xorl %eax, %eax blsrq %rbp, %rcx jne 0x1ed5a0b testl %eax, %eax je 0x1ed5828 jmp 0x1ed5ca3 testb $0x8, %r15b jne 0x1ed58a5 vmovaps 0xc0(%r15), %ymm0 vmovaps 0x40(%r15), %ymm1 vmovaps 0x60(%r15), %ymm2 vmovaps 0x80(%r15), %ymm3 vmovaps 0xa0(%r15), %ymm4 vmovaps 0xe0(%r15), %ymm5 vmaxps %ymm1, %ymm9, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps %ymm9, %ymm6, %ymm6 vmaxps %ymm3, %ymm10, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm0, %ymm12, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm12, %ymm8, %ymm8 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0xc0(%rsp), %ymm2, %k0 vcmpnleps 0xa0(%rsp), %ymm1, %k1 vcmpltps 0x80(%rsp), %ymm4, %k2 vcmpnleps 0x60(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0x40(%rsp), %ymm5, %k3 korb %k3, %k2, %k2 vcmpnleps 0x20(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %ebp jmp 0x1ed58a5 movl $0x6, %eax jmp 0x1ed5911 movl $0x4, %eax jmp 0x1ed5911 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed5a6d vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r15 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%rsi) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%rsi) addq $0x10, %rsi jmp 0x1ed5911 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ed5af2 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r15 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%rsi) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rsi) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%rsi) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%rsi) addq $0x20, %rsi jmp 0x1ed5911 vmovaps %ymm15, %ymm16 vmovdqa %ymm7, %ymm15 movq %rsi, %rdi vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ed5bb4 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r15 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 movq %rdi, %rsi vmovq %xmm2, (%rdi) vmovdqa %ymm15, %ymm7 vpermd %ymm15, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rdi) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%rdi) vpermd %ymm15, %ymm5, %ymm2 vmovd %xmm2, 0x18(%rdi) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%rdi) vpermd %ymm15, %ymm3, %ymm0 vmovd %xmm0, 0x28(%rdi) addq $0x30, %rsi vmovaps %ymm16, %ymm15 jmp 0x1ed5911 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x4b2fc(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x8ba53(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x8ba4c(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x8ba3f(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x3cafd(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed5bfa popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%rdi) vpermd %ymm15, %ymm2, %ymm3 vmovd %xmm3, 0x8(%rdi) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %rdi vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed5c64 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r15 movq %rdi, %rsi vmovdqa %ymm15, %ymm7 jmp 0x1ed5ba9 cmpl $0x6, %eax jne 0x1ed580c movl %r15d, %r13d andl $0xf, %r13d addq $-0x8, %r13 je 0x1ed580c vmovups %ymm7, 0x160(%rsp) movq %rsi, 0x8(%rsp) andq $-0x10, %r15 xorl %r12d, %r12d movq %r14, %rdi movq %rbx, %rsi movq %r15, %rdx vzeroupper callq 0x1edf478 orb %al, %r12b addq $0x10, %r15 decq %r13 jne 0x1ed5cd2 testb $0x1, %r12b movq 0x8(%rsp), %rsi vmovups 0x120(%rsp), %ymm9 vmovups 0x100(%rsp), %ymm10 vmovaps 0x10(%rsp), %xmm11 vmovups 0xe0(%rsp), %ymm12 leaq 0x180(%rsp), %r8 vpmovsxbd 0x87d46(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0x8b889(%rip), %ymm14 # 0x1f615bc vmovups 0x160(%rsp), %ymm7 vmovups 0x140(%rsp), %ymm15 je 0x1ed580c vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ed5d6f vmovss 0x10(%r14), %xmm3 vmulss %xmm3, %xmm3, %xmm11 jmp 0x1ed5d7a vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm11 movb $0x1, %al movl %eax, 0x4(%rsp) jmp 0x1ed57b4 movl 0x4(%rsp), %eax addq $0x24c8, %rsp # imm = 0x24C8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::InstanceIntersector1MB>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ed5de9 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2488, %rsp # imm = 0x2488 movq 0x70(%rax), %rax movq %rax, 0x140(%rsp) movl $0x0, 0x148(%rsp) cmpq $0x8, %rax jne 0x1ed5ded addq $0x2488, %rsp # imm = 0x2488 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x4b0ba(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x1b1d5(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x168e4(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem leaq 0x150(%rsp), %r9 vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %r8d, %r8d vucomiss %xmm1, %xmm0 setb %r8b vbroadcastss %xmm0, %ymm9 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm10 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x4b078(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm11 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x3c889(%rip), %ymm8 # 0x1f12704 vpermps %ymm0, %ymm8, %ymm8 vpermps %ymm0, %ymm6, %ymm6 shll $0x5, %r8d xorl %r10d, %r10d vucomiss %xmm1, %xmm4 setb %r10b shll $0x5, %r10d orq $0x40, %r10 xorl %r11d, %r11d vucomiss %xmm1, %xmm5 setb %r11b shll $0x5, %r11d orq $0x80, %r11 movq %r8, %r15 xorq $0x20, %r15 movq %r10, %r12 xorq $0x20, %r12 movq %r11, %r13 xorq $0x20, %r13 vbroadcastss %xmm2, %ymm12 vbroadcastss 0x4afeb(%rip), %ymm1 # 0x1f20ec0 vbroadcastss %xmm3, %ymm0 vxorps %ymm1, %ymm7, %ymm13 vxorps %ymm1, %ymm8, %ymm8 vxorps %ymm1, %ymm6, %ymm14 vpmovsxbd 0x87b80(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8b6c2(%rip), %ymm17 # 0x1f615bc leaq 0x140(%rsp), %rbp movq %r8, 0x30(%rsp) vmovups %ymm9, 0x100(%rsp) vmovups %ymm10, 0xe0(%rsp) vmovups %ymm11, 0xc0(%rsp) movq %r10, 0x28(%rsp) movq %r11, 0x20(%rsp) movq %r15, 0x18(%rsp) movq %r12, 0x10(%rsp) vmovups %ymm12, 0xa0(%rsp) vmovups %ymm13, 0x80(%rsp) vmovups %ymm8, 0x60(%rsp) vmovups %ymm14, 0x40(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %rbp, %r9 je 0x1ed5dd8 vmovss -0x8(%r9), %xmm2 addq $-0x10, %r9 vucomiss %xmm1, %xmm2 ja 0x1ed5f59 movq (%r9), %rbx testb $0x8, %bl jne 0x1ed603e movq %rbx, %rax andq $-0x10, %rax vbroadcastss 0x1c(%rsi), %ymm1 vmovaps 0x100(%rax,%r8), %ymm2 vfmadd213ps 0x40(%rax,%r8), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vfmadd213ps %ymm13, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm2) + ymm13 vmaxps %ymm2, %ymm12, %ymm2 vmovaps 0x100(%rax,%r10), %ymm3 vfmadd213ps 0x40(%rax,%r10), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vmovaps 0x100(%rax,%r11), %ymm4 vfmadd213ps 0x40(%rax,%r11), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm8, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm3) + ymm8 vfmadd213ps %ymm14, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm4) + ymm14 vmaxps %ymm4, %ymm3, %ymm3 vmaxps %ymm3, %ymm2, %ymm18 vmovaps 0x100(%rax,%r15), %ymm2 vfmadd213ps 0x40(%rax,%r15), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vmovaps 0x100(%rax,%r12), %ymm3 vfmadd213ps 0x40(%rax,%r12), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vfmadd213ps %ymm13, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm2) + ymm13 vfmadd213ps %ymm8, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm3) + ymm8 vmovaps 0x100(%rax,%r13), %ymm4 vfmadd213ps 0x40(%rax,%r13), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm14, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm4) + ymm14 vminps %ymm4, %ymm3, %ymm3 vminps %ymm2, %ymm0, %ymm2 vminps %ymm3, %ymm2, %ymm2 movl %ebx, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ed60b2 vcmpleps %ymm2, %ymm18, %k0 kmovb %k0, %r14d testb $0x8, %bl jne 0x1ed60ab testq %r14, %r14 je 0x1ed60ce andq $-0x10, %rbx vmovdqu (%rbx), %ymm1 vmovdqu 0x20(%rbx), %ymm2 vmovdqa64 %ymm16, %ymm3 vpternlogd $0xf8, %ymm17, %ymm18, %ymm3 kmovd %r14d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbx prefetcht0 (%rbx) prefetcht0 0x40(%rbx) prefetcht0 0x80(%rbx) prefetcht0 0xc0(%rbx) xorl %eax, %eax blsrq %r14, %rcx jne 0x1ed60d5 testl %eax, %eax je 0x1ed5f75 jmp 0x1ed63e5 movl $0x6, %eax jmp 0x1ed609e vcmpleps %ymm2, %ymm18, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1ed603a movl $0x4, %eax jmp 0x1ed609e vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ed613a vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r9) vpermd %ymm18, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r9) addq $0x10, %r9 jmp 0x1ed609e vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ed61c5 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r9) vpermd %ymm18, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r9) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r9) vpermd %ymm18, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r9) addq $0x20, %r9 jmp 0x1ed609e vmovdqa64 %ymm18, %ymm19 vmovaps %ymm14, %ymm15 vmovaps %ymm8, %ymm14 movq %r12, %rbp movq %r15, %r12 movq %r11, %r15 movq %r10, %r11 movq %r8, %r10 movq %rsi, %r8 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ed62cc vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r9) vmovdqa64 %ymm19, %ymm18 vpermd %ymm19, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r9) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r9) vpermd %ymm19, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r9) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r9) vpermd %ymm19, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r9) addq $0x30, %r9 movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %r15, %r11 movq %r12, %r15 movq %rbp, %r12 vmovaps %ymm14, %ymm8 vmovaps %ymm15, %ymm14 leaq 0x140(%rsp), %rbp jmp 0x1ed609e valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x4abe4(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x8b33a(%rip), %ymm18 # 0x1f61620 vpermt2d %ymm8, %ymm18, %ymm3 vpmovsxbd 0x8b333(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x8b326(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x3c3e4(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ed6313 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r9) vpermd %ymm19, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r9) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r9 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ed637d vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbx movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %r15, %r11 movq %r12, %r15 movq %rbp, %r12 vmovaps %ymm14, %ymm8 vmovaps %ymm15, %ymm14 leaq 0x140(%rsp), %rbp vmovdqa64 %ymm19, %ymm18 jmp 0x1ed609e cmpl $0x6, %eax jne 0x1ed5f54 vmovdqu64 %ymm18, 0x120(%rsp) movq %r9, 0x38(%rsp) movl %ebx, %r12d andl $0xf, %r12d addq $-0x8, %r12 je 0x1ed6431 andq $-0x10, %rbx leaq 0xf(%rsp), %rdi movq %rbx, %rcx movq %rdx, %rbp movq %rsi, %r15 vzeroupper callq 0x8ba070 movq %r15, %rsi movq %rbp, %rdx addq $0x10, %rbx decq %r12 jne 0x1ed640c vbroadcastss 0x20(%rsi), %ymm0 movq 0x38(%rsp), %r9 movq 0x30(%rsp), %r8 vmovups 0x100(%rsp), %ymm9 vmovups 0xe0(%rsp), %ymm10 vmovups 0xc0(%rsp), %ymm11 movq 0x28(%rsp), %r10 movq 0x20(%rsp), %r11 movq 0x18(%rsp), %r15 movq 0x10(%rsp), %r12 vmovups 0xa0(%rsp), %ymm12 vmovups 0x80(%rsp), %ymm13 vmovups 0x60(%rsp), %ymm8 vmovups 0x40(%rsp), %ymm14 vpmovsxbd 0x875d8(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8b11a(%rip), %ymm17 # 0x1f615bc leaq 0x140(%rsp), %rbp vmovdqu64 0x120(%rsp), %ymm18 jmp 0x1ed5f54 nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::InstanceIntersector1MB>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ed64c9 xorl %eax, %eax jmp 0x1ed6c3c pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24c8, %rsp # imm = 0x24C8 movq %rdx, %rbx movq %rsi, %r14 movq 0x70(%rax), %rax movq %rax, 0x180(%rsp) movl $0x0, 0x188(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ed6509 vmovss 0x10(%r14), %xmm0 vmulss %xmm0, %xmm0, %xmm12 jmp 0x1ed6514 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm12 leaq 0x190(%rsp), %rsi vbroadcastss (%r14), %ymm10 vbroadcastss 0x4(%r14), %ymm11 vbroadcastss 0x8(%r14), %ymm13 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0x4(%rsp) leaq 0x180(%rsp), %r8 vpmovsxbd 0x87518(%rip), %ymm14 # 0x1f5da70 vpbroadcastd 0x8b05b(%rip), %ymm15 # 0x1f615bc vmovups %ymm10, 0x120(%rsp) vmovups %ymm11, 0x100(%rsp) vmovups %ymm13, 0xe0(%rsp) vsubps %ymm0, %ymm10, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm0, %ymm10, %ymm3 vmovups %ymm3, 0xa0(%rsp) vsubps %ymm1, %ymm11, %ymm3 vmovups %ymm3, 0x80(%rsp) vaddps %ymm1, %ymm11, %ymm1 vmovups %ymm1, 0x60(%rsp) vsubps %ymm2, %ymm13, %ymm1 vmovups %ymm1, 0x40(%rsp) vaddps %ymm2, %ymm13, %ymm1 vmovups %ymm1, 0x20(%rsp) vmulps %ymm0, %ymm0, %ymm16 vmovaps %xmm12, 0x10(%rsp) vmovups %ymm16, 0x140(%rsp) cmpq %r8, %rsi je 0x1ed6c27 vmovss -0x8(%rsi), %xmm0 addq $-0x10, %rsi vucomiss %xmm12, %xmm0 ja 0x1ed65d5 movq (%rsi), %r13 cmpl $0x1, 0x18(%rbx) jne 0x1ed6749 testb $0x8, %r13b jne 0x1ed66ce movq %r13, %rax andq $-0x10, %rax vbroadcastss 0xc(%r14), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmaxps %ymm1, %ymm10, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm3, %ymm11, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm11, %ymm3, %ymm3 vmaxps %ymm5, %ymm13, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm13, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps %ymm16, %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %r13d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ed68a1 kmovb %k0, %ebp testb $0x8, %r13b jne 0x1ed688d testq %rbp, %rbp je 0x1ed6897 andq $-0x10, %r13 vmovdqu (%r13), %ymm0 vmovdqu 0x20(%r13), %ymm1 vmovdqa %ymm14, %ymm2 vpternlogd $0xf8, %ymm15, %ymm7, %ymm2 kmovd %ebp, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r13 prefetcht0 (%r13) prefetcht0 0x40(%r13) prefetcht0 0x80(%r13) prefetcht0 0xc0(%r13) xorl %eax, %eax blsrq %rbp, %rcx jne 0x1ed68ba testl %eax, %eax je 0x1ed65f1 jmp 0x1ed6b46 testb $0x8, %r13b jne 0x1ed66ce movq %r13, %rax andq $-0x10, %rax vbroadcastss 0xc(%r14), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmaxps %ymm1, %ymm10, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm3, %ymm11, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm11, %ymm8, %ymm8 vmaxps %ymm5, %ymm13, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm13, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0xc0(%rsp), %ymm2, %k0 vcmpnleps 0xa0(%rsp), %ymm1, %k1 vcmpltps 0x80(%rsp), %ymm4, %k2 vcmpnleps 0x60(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0x40(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0x20(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edx andb %cl, %dl movzbl %dl, %ebp movl %r13d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1ed66ce vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %bpl, %al movzbl %al, %ebp jmp 0x1ed66ce movl $0x6, %eax jmp 0x1ed673c movl $0x4, %eax jmp 0x1ed673c vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1ed66ca vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed691c vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r13 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%rsi) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%rsi) addq $0x10, %rsi jmp 0x1ed673c vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ed69a1 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r13 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%rsi) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rsi) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%rsi) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%rsi) addq $0x20, %rsi jmp 0x1ed673c vmovdqa %ymm7, %ymm9 movq %rsi, %rdi vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ed6a57 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r13 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 movq %rdi, %rsi vmovq %xmm2, (%rdi) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rdi) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%rdi) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%rdi) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%rdi) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%rdi) addq $0x30, %rsi jmp 0x1ed673c valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x4a459(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x8abb0(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x8aba9(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x8ab9c(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x3bc5a(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed6a9d popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%rdi) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%rdi) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %rdi vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed6b07 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r13 movq %rdi, %rsi vmovdqa %ymm9, %ymm7 jmp 0x1ed673c cmpl $0x6, %eax jne 0x1ed65d5 movl %r13d, %r15d andl $0xf, %r15d addq $-0x8, %r15 je 0x1ed65d5 vmovdqu %ymm7, 0x160(%rsp) movq %rsi, 0x8(%rsp) andq $-0x10, %r13 xorl %r12d, %r12d movq %r14, %rdi movq %rbx, %rsi movq %r13, %rdx vzeroupper callq 0x1edf96e orb %al, %r12b addq $0x10, %r13 decq %r15 jne 0x1ed6b75 testb $0x1, %r12b movq 0x8(%rsp), %rsi vmovups 0x120(%rsp), %ymm10 vmovups 0x100(%rsp), %ymm11 vmovaps 0x10(%rsp), %xmm12 vmovups 0xe0(%rsp), %ymm13 leaq 0x180(%rsp), %r8 vpmovsxbd 0x86ea3(%rip), %ymm14 # 0x1f5da70 vpbroadcastd 0x8a9e6(%rip), %ymm15 # 0x1f615bc vmovups 0x140(%rsp), %ymm16 vmovdqu 0x160(%rsp), %ymm7 je 0x1ed65d5 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ed6c11 vmovss 0x10(%r14), %xmm3 vmulss %xmm3, %xmm3, %xmm12 jmp 0x1ed6c1c vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm12 movb $0x1, %al movl %eax, 0x4(%rsp) jmp 0x1ed657c movl 0x4(%rsp), %eax addq $0x24c8, %rsp # imm = 0x24C8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::InstanceArrayIntersector1>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ed6c8b pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2488, %rsp # imm = 0x2488 movq 0x70(%rax), %rax movq %rax, 0x140(%rsp) movl $0x0, 0x148(%rsp) cmpq $0x8, %rax jne 0x1ed6c8f addq $0x2488, %rsp # imm = 0x2488 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 vxorps %xmm2, %xmm2, %xmm2 vmaxss 0xc(%rsi), %xmm2, %xmm1 vmaxss 0x20(%rsi), %xmm2, %xmm3 vandps 0x4a218(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x1a333(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x15a42(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem leaq 0x150(%rsp), %r9 vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %r8d, %r8d vucomiss %xmm2, %xmm0 setb %r8b vbroadcastss %xmm0, %ymm9 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm10 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x4a1d6(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm11 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x3b9e7(%rip), %ymm8 # 0x1f12704 vpermps %ymm0, %ymm8, %ymm8 vpermps %ymm0, %ymm6, %ymm6 shll $0x5, %r8d xorl %r10d, %r10d vucomiss %xmm2, %xmm4 setb %r10b shll $0x5, %r10d orq $0x40, %r10 xorl %r11d, %r11d vucomiss %xmm2, %xmm5 setb %r11b shll $0x5, %r11d orq $0x80, %r11 movq %r8, %rbx xorq $0x20, %rbx movq %r10, %r15 xorq $0x20, %r15 movq %r11, %r13 xorq $0x20, %r13 vbroadcastss %xmm3, %ymm0 vbroadcastss 0x4a149(%rip), %ymm2 # 0x1f20ec0 vxorps %ymm2, %ymm7, %ymm12 vxorps %ymm2, %ymm8, %ymm8 vxorps %ymm2, %ymm6, %ymm13 vbroadcastss %xmm1, %ymm14 vpmovsxbd 0x86cde(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8a820(%rip), %ymm17 # 0x1f615bc leaq 0x140(%rsp), %rbp movq %r8, 0x30(%rsp) vmovups %ymm9, 0x100(%rsp) vmovups %ymm10, 0xe0(%rsp) vmovups %ymm11, 0xc0(%rsp) movq %r10, 0x28(%rsp) movq %r11, 0x20(%rsp) movq %rbx, 0x18(%rsp) movq %r15, 0x10(%rsp) vmovups %ymm12, 0xa0(%rsp) vmovups %ymm8, 0x80(%rsp) vmovups %ymm13, 0x60(%rsp) vmovups %ymm14, 0x40(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %rbp, %r9 je 0x1ed6c7a vmovss -0x8(%r9), %xmm2 addq $-0x10, %r9 vucomiss %xmm1, %xmm2 ja 0x1ed6dfb movq (%r9), %r12 testb $0x8, %r12b jne 0x1ed6e8f vmovaps 0x40(%r12,%r8), %ymm1 vfmadd132ps %ymm9, %ymm12, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm12 vmovaps 0x40(%r12,%r10), %ymm2 vfmadd132ps %ymm10, %ymm8, %ymm2 # ymm2 = (ymm2 * ymm10) + ymm8 vmovaps 0x40(%r12,%r11), %ymm3 vpmaxsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm11, %ymm13, %ymm3 # ymm3 = (ymm3 * ymm11) + ymm13 vpmaxsd %ymm14, %ymm3, %ymm2 vpmaxsd %ymm2, %ymm1, %ymm18 vmovaps 0x40(%r12,%rbx), %ymm1 vfmadd132ps %ymm9, %ymm12, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm12 vmovaps 0x40(%r12,%r15), %ymm2 vfmadd132ps %ymm10, %ymm8, %ymm2 # ymm2 = (ymm2 * ymm10) + ymm8 vmovaps 0x40(%r12,%r13), %ymm3 vpminsd %ymm2, %ymm1, %ymm1 vfmadd132ps %ymm11, %ymm13, %ymm3 # ymm3 = (ymm3 * ymm11) + ymm13 vpminsd %ymm0, %ymm3, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm18, %k0 kmovb %k0, %r14d testb $0x8, %r12b jne 0x1ed6f05 testq %r14, %r14 je 0x1ed6f0c andq $-0x10, %r12 vmovdqu (%r12), %ymm1 vmovdqu 0x20(%r12), %ymm2 vmovdqa64 %ymm16, %ymm3 vpternlogd $0xf8, %ymm17, %ymm18, %ymm3 kmovd %r14d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %r12 prefetcht0 (%r12) prefetcht0 0x40(%r12) prefetcht0 0x80(%r12) prefetcht0 0xc0(%r12) xorl %eax, %eax blsrq %r14, %rcx jne 0x1ed6f13 testl %eax, %eax je 0x1ed6e17 jmp 0x1ed722f movl $0x6, %eax jmp 0x1ed6ef8 movl $0x4, %eax jmp 0x1ed6ef8 vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ed6f75 vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %r12 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r9) vpermd %ymm18, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r9) addq $0x10, %r9 jmp 0x1ed6ef8 vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ed7000 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %r12 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r9) vpermd %ymm18, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r9) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r9) vpermd %ymm18, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r9) addq $0x20, %r9 jmp 0x1ed6ef8 vmovdqa64 %ymm18, %ymm19 vmovaps %ymm14, %ymm15 vmovaps %ymm13, %ymm14 vmovaps %ymm8, %ymm13 movq %r15, %rbp movq %rbx, %r15 movq %r11, %rbx movq %r10, %r11 movq %r8, %r10 movq %rsi, %r8 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ed7111 vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %r12 vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r9) vmovdqa64 %ymm19, %ymm18 vpermd %ymm19, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r9) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r9) vpermd %ymm19, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r9) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r9) vpermd %ymm19, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r9) addq $0x30, %r9 movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %rbx, %r11 movq %r15, %rbx movq %rbp, %r15 vmovaps %ymm13, %ymm8 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 leaq 0x140(%rsp), %rbp jmp 0x1ed6ef8 valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x49d9f(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x8a4f5(%rip), %ymm18 # 0x1f61620 vpermt2d %ymm8, %ymm18, %ymm3 vpmovsxbd 0x8a4ee(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x8a4e1(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x3b59f(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ed7158 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r9) vpermd %ymm19, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r9) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r9 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ed71c2 vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %r12 movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %rbx, %r11 movq %r15, %rbx movq %rbp, %r15 vmovaps %ymm13, %ymm8 vmovaps %ymm14, %ymm13 vmovaps %ymm15, %ymm14 leaq 0x140(%rsp), %rbp vmovdqa64 %ymm19, %ymm18 jmp 0x1ed6ef8 cmpl $0x6, %eax jne 0x1ed6df6 vmovdqu64 %ymm18, 0x120(%rsp) movq %r9, 0x38(%rsp) movl %r12d, %ebx andl $0xf, %ebx addq $-0x8, %rbx je 0x1ed727a andq $-0x10, %r12 leaq 0xf(%rsp), %rdi movq %r12, %rcx movq %rdx, %rbp movq %rsi, %r15 vzeroupper callq 0x8cee24 movq %r15, %rsi movq %rbp, %rdx addq $0x8, %r12 decq %rbx jne 0x1ed7255 vbroadcastss 0x20(%rsi), %ymm0 movq 0x38(%rsp), %r9 movq 0x30(%rsp), %r8 vmovups 0x100(%rsp), %ymm9 vmovups 0xe0(%rsp), %ymm10 vmovups 0xc0(%rsp), %ymm11 movq 0x28(%rsp), %r10 movq 0x20(%rsp), %r11 movq 0x18(%rsp), %rbx movq 0x10(%rsp), %r15 vmovups 0xa0(%rsp), %ymm12 vmovups 0x80(%rsp), %ymm8 vmovups 0x60(%rsp), %ymm13 vmovups 0x40(%rsp), %ymm14 vpmovsxbd 0x8678f(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8a2d1(%rip), %ymm17 # 0x1f615bc leaq 0x140(%rsp), %rbp vmovdqu64 0x120(%rsp), %ymm18 jmp 0x1ed6df6
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::ArrayIntersector1<embree::avx512::InstanceArrayIntersector1>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ed7311 xorl %eax, %eax jmp 0x1ed79aa pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24c8, %rsp # imm = 0x24C8 movq %rdx, %rbx movq %rsi, %r14 movq 0x70(%rax), %rax movq %rax, 0x180(%rsp) movl $0x0, 0x188(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ed7351 vmovss 0x10(%r14), %xmm0 vmulss %xmm0, %xmm0, %xmm11 jmp 0x1ed735c vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm11 leaq 0x190(%rsp), %rsi vbroadcastss (%r14), %ymm9 vbroadcastss 0x4(%r14), %ymm10 vbroadcastss 0x8(%r14), %ymm12 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0x4(%rsp) leaq 0x180(%rsp), %r8 vpmovsxbd 0x866d0(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0x8a213(%rip), %ymm14 # 0x1f615bc vmovups %ymm9, 0x120(%rsp) vmovups %ymm10, 0x100(%rsp) vmovups %ymm12, 0xe0(%rsp) vsubps %ymm0, %ymm9, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm0, %ymm9, %ymm3 vmovups %ymm3, 0xa0(%rsp) vsubps %ymm1, %ymm10, %ymm3 vmovups %ymm3, 0x80(%rsp) vaddps %ymm1, %ymm10, %ymm1 vmovups %ymm1, 0x60(%rsp) vsubps %ymm2, %ymm12, %ymm1 vmovups %ymm1, 0x40(%rsp) vaddps %ymm2, %ymm12, %ymm1 vmovups %ymm1, 0x20(%rsp) vmulps %ymm0, %ymm0, %ymm15 vmovaps %xmm11, 0x10(%rsp) vmovups %ymm15, 0x140(%rsp) cmpq %r8, %rsi je 0x1ed7995 vmovss -0x8(%rsi), %xmm0 addq $-0x10, %rsi vucomiss %xmm11, %xmm0 ja 0x1ed741c movq (%rsi), %r15 cmpl $0x1, 0x18(%rbx) jne 0x1ed752e testb $0x8, %r15b jne 0x1ed74b5 vmovaps 0x40(%r15), %ymm0 vmovaps 0x60(%r15), %ymm1 vmaxps %ymm0, %ymm9, %ymm2 vminps %ymm1, %ymm2, %ymm2 vsubps %ymm9, %ymm2, %ymm2 vmaxps 0x80(%r15), %ymm10, %ymm3 vminps 0xa0(%r15), %ymm3, %ymm3 vmaxps 0xc0(%r15), %ymm12, %ymm4 vsubps %ymm10, %ymm3, %ymm3 vminps 0xe0(%r15), %ymm4, %ymm4 vsubps %ymm12, %ymm4, %ymm4 vmulps %ymm2, %ymm2, %ymm2 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm2, %ymm2 vmulps %ymm4, %ymm4, %ymm3 vaddps %ymm3, %ymm2, %ymm7 vcmpleps %ymm15, %ymm7, %k1 vcmpleps %ymm1, %ymm0, %k0 {%k1} kmovb %k0, %ebp testb $0x8, %r15b jne 0x1ed7607 testq %rbp, %rbp je 0x1ed7611 andq $-0x10, %r15 vmovdqu (%r15), %ymm0 vmovdqu 0x20(%r15), %ymm1 vmovdqa %ymm13, %ymm2 vpternlogd $0xf8, %ymm14, %ymm7, %ymm2 kmovd %ebp, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r15 prefetcht0 (%r15) prefetcht0 0x40(%r15) prefetcht0 0x80(%r15) prefetcht0 0xc0(%r15) xorl %eax, %eax blsrq %rbp, %rcx jne 0x1ed761b testl %eax, %eax je 0x1ed7438 jmp 0x1ed78b3 testb $0x8, %r15b jne 0x1ed74b5 vmovaps 0xc0(%r15), %ymm0 vmovaps 0x40(%r15), %ymm1 vmovaps 0x60(%r15), %ymm2 vmovaps 0x80(%r15), %ymm3 vmovaps 0xa0(%r15), %ymm4 vmovaps 0xe0(%r15), %ymm5 vmaxps %ymm1, %ymm9, %ymm6 vminps %ymm2, %ymm6, %ymm6 vsubps %ymm9, %ymm6, %ymm6 vmaxps %ymm3, %ymm10, %ymm7 vminps %ymm4, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm0, %ymm12, %ymm8 vminps %ymm5, %ymm8, %ymm8 vsubps %ymm12, %ymm8, %ymm8 vmulps %ymm6, %ymm6, %ymm6 vmulps %ymm7, %ymm7, %ymm7 vaddps %ymm7, %ymm6, %ymm6 vmulps %ymm8, %ymm8, %ymm7 vaddps %ymm7, %ymm6, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %eax vcmpltps 0xc0(%rsp), %ymm2, %k0 vcmpnleps 0xa0(%rsp), %ymm1, %k1 vcmpltps 0x80(%rsp), %ymm4, %k2 vcmpnleps 0x60(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0x40(%rsp), %ymm5, %k3 korb %k3, %k2, %k2 vcmpnleps 0x20(%rsp), %ymm0, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %ecx andb %al, %cl movzbl %cl, %ebp jmp 0x1ed74b5 movl $0x6, %eax jmp 0x1ed7521 movl $0x4, %eax jmp 0x1ed7521 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed767d vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r15 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%rsi) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%rsi) addq $0x10, %rsi jmp 0x1ed7521 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ed7702 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r15 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%rsi) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rsi) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%rsi) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%rsi) addq $0x20, %rsi jmp 0x1ed7521 vmovaps %ymm15, %ymm16 vmovdqa %ymm7, %ymm15 movq %rsi, %rdi vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ed77c4 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r15 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 movq %rdi, %rsi vmovq %xmm2, (%rdi) vmovdqa %ymm15, %ymm7 vpermd %ymm15, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rdi) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%rdi) vpermd %ymm15, %ymm5, %ymm2 vmovd %xmm2, 0x18(%rdi) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%rdi) vpermd %ymm15, %ymm3, %ymm0 vmovd %xmm0, 0x28(%rdi) addq $0x30, %rsi vmovaps %ymm16, %ymm15 jmp 0x1ed7521 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x496ec(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x89e43(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x89e3c(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x89e2f(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x3aeed(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed780a popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%rdi) vpermd %ymm15, %ymm2, %ymm3 vmovd %xmm3, 0x8(%rdi) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %rdi vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed7874 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r15 movq %rdi, %rsi vmovdqa %ymm15, %ymm7 jmp 0x1ed77b9 cmpl $0x6, %eax jne 0x1ed741c movl %r15d, %r13d andl $0xf, %r13d addq $-0x8, %r13 je 0x1ed741c vmovups %ymm7, 0x160(%rsp) movq %rsi, 0x8(%rsp) andq $-0x10, %r15 xorl %r12d, %r12d movq %r14, %rdi movq %rbx, %rsi movq %r15, %rdx vzeroupper callq 0x1ee0f4c orb %al, %r12b addq $0x8, %r15 decq %r13 jne 0x1ed78e2 testb $0x1, %r12b movq 0x8(%rsp), %rsi vmovups 0x120(%rsp), %ymm9 vmovups 0x100(%rsp), %ymm10 vmovaps 0x10(%rsp), %xmm11 vmovups 0xe0(%rsp), %ymm12 leaq 0x180(%rsp), %r8 vpmovsxbd 0x86136(%rip), %ymm13 # 0x1f5da70 vpbroadcastd 0x89c79(%rip), %ymm14 # 0x1f615bc vmovups 0x160(%rsp), %ymm7 vmovups 0x140(%rsp), %ymm15 je 0x1ed741c vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ed797f vmovss 0x10(%r14), %xmm3 vmulss %xmm3, %xmm3, %xmm11 jmp 0x1ed798a vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm11 movb $0x1, %al movl %eax, 0x4(%rsp) jmp 0x1ed73c4 movl 0x4(%rsp), %eax addq $0x24c8, %rsp # imm = 0x24C8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::InstanceArrayIntersector1MB>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ed79f9 pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2488, %rsp # imm = 0x2488 movq 0x70(%rax), %rax movq %rax, 0x140(%rsp) movl $0x0, 0x148(%rsp) cmpq $0x8, %rax jne 0x1ed79fd addq $0x2488, %rsp # imm = 0x2488 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq vmovaps 0x10(%rsi), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rsi), %xmm1, %xmm2 vmaxss 0x20(%rsi), %xmm1, %xmm3 vandps 0x494aa(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vbroadcastss 0x195c5(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x14cd4(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem leaq 0x150(%rsp), %r9 vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %r8d, %r8d vucomiss %xmm1, %xmm0 setb %r8b vbroadcastss %xmm0, %ymm9 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm10 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x49468(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm11 vmulps (%rsi), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x3ac79(%rip), %ymm8 # 0x1f12704 vpermps %ymm0, %ymm8, %ymm8 vpermps %ymm0, %ymm6, %ymm6 shll $0x5, %r8d xorl %r10d, %r10d vucomiss %xmm1, %xmm4 setb %r10b shll $0x5, %r10d orq $0x40, %r10 xorl %r11d, %r11d vucomiss %xmm1, %xmm5 setb %r11b shll $0x5, %r11d orq $0x80, %r11 movq %r8, %r15 xorq $0x20, %r15 movq %r10, %r12 xorq $0x20, %r12 movq %r11, %r13 xorq $0x20, %r13 vbroadcastss %xmm2, %ymm12 vbroadcastss 0x493db(%rip), %ymm1 # 0x1f20ec0 vbroadcastss %xmm3, %ymm0 vxorps %ymm1, %ymm7, %ymm13 vxorps %ymm1, %ymm8, %ymm8 vxorps %ymm1, %ymm6, %ymm14 vpmovsxbd 0x85f70(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x89ab2(%rip), %ymm17 # 0x1f615bc leaq 0x140(%rsp), %rbp movq %r8, 0x30(%rsp) vmovups %ymm9, 0x100(%rsp) vmovups %ymm10, 0xe0(%rsp) vmovups %ymm11, 0xc0(%rsp) movq %r10, 0x28(%rsp) movq %r11, 0x20(%rsp) movq %r15, 0x18(%rsp) movq %r12, 0x10(%rsp) vmovups %ymm12, 0xa0(%rsp) vmovups %ymm13, 0x80(%rsp) vmovups %ymm8, 0x60(%rsp) vmovups %ymm14, 0x40(%rsp) vmovss 0x20(%rsi), %xmm1 cmpq %rbp, %r9 je 0x1ed79e8 vmovss -0x8(%r9), %xmm2 addq $-0x10, %r9 vucomiss %xmm1, %xmm2 ja 0x1ed7b69 movq (%r9), %rbx testb $0x8, %bl jne 0x1ed7c4e movq %rbx, %rax andq $-0x10, %rax vbroadcastss 0x1c(%rsi), %ymm1 vmovaps 0x100(%rax,%r8), %ymm2 vfmadd213ps 0x40(%rax,%r8), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vfmadd213ps %ymm13, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm2) + ymm13 vmaxps %ymm2, %ymm12, %ymm2 vmovaps 0x100(%rax,%r10), %ymm3 vfmadd213ps 0x40(%rax,%r10), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vmovaps 0x100(%rax,%r11), %ymm4 vfmadd213ps 0x40(%rax,%r11), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm8, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm3) + ymm8 vfmadd213ps %ymm14, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm4) + ymm14 vmaxps %ymm4, %ymm3, %ymm3 vmaxps %ymm3, %ymm2, %ymm18 vmovaps 0x100(%rax,%r15), %ymm2 vfmadd213ps 0x40(%rax,%r15), %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem vmovaps 0x100(%rax,%r12), %ymm3 vfmadd213ps 0x40(%rax,%r12), %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + mem vfmadd213ps %ymm13, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm2) + ymm13 vfmadd213ps %ymm8, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm3) + ymm8 vmovaps 0x100(%rax,%r13), %ymm4 vfmadd213ps 0x40(%rax,%r13), %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + mem vfmadd213ps %ymm14, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm4) + ymm14 vminps %ymm4, %ymm3, %ymm3 vminps %ymm2, %ymm0, %ymm2 vminps %ymm3, %ymm2, %ymm2 movl %ebx, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ed7cc2 vcmpleps %ymm2, %ymm18, %k0 kmovb %k0, %r14d testb $0x8, %bl jne 0x1ed7cbb testq %r14, %r14 je 0x1ed7cde andq $-0x10, %rbx vmovdqu (%rbx), %ymm1 vmovdqu 0x20(%rbx), %ymm2 vmovdqa64 %ymm16, %ymm3 vpternlogd $0xf8, %ymm17, %ymm18, %ymm3 kmovd %r14d, %k1 vpcompressd %ymm3, %ymm3 {%k1} vmovdqa %ymm1, %ymm4 vpermt2q %ymm2, %ymm3, %ymm4 vmovq %xmm4, %rbx prefetcht0 (%rbx) prefetcht0 0x40(%rbx) prefetcht0 0x80(%rbx) prefetcht0 0xc0(%rbx) xorl %eax, %eax blsrq %r14, %rcx jne 0x1ed7ce5 testl %eax, %eax je 0x1ed7b85 jmp 0x1ed7ff5 movl $0x6, %eax jmp 0x1ed7cae vcmpleps %ymm2, %ymm18, %k1 vcmpgeps 0x1c0(%rax), %ymm1, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm1, %k0 {%k1} jmp 0x1ed7c4a movl $0x4, %eax jmp 0x1ed7cae vpshufd $0x55, %ymm3, %ymm4 # ymm4 = ymm3[1,1,1,1,5,5,5,5] vmovdqa %ymm1, %ymm5 vpermt2q %ymm2, %ymm4, %ymm5 vmovq %xmm5, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm4, %ymm3, %ymm5 vpmaxsd %ymm4, %ymm3, %ymm4 blsrq %rcx, %rcx jne 0x1ed7d4a vpermi2q %ymm2, %ymm1, %ymm5 vmovq %xmm5, %rbx vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, (%r9) vpermd %ymm18, %ymm4, %ymm1 vmovd %xmm1, 0x8(%r9) addq $0x10, %r9 jmp 0x1ed7cae vpshufd $0xaa, %ymm3, %ymm7 # ymm7 = ymm3[2,2,2,2,6,6,6,6] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm7, %ymm6 vmovq %xmm6, %rdi prefetcht0 (%rdi) prefetcht0 0x40(%rdi) prefetcht0 0x80(%rdi) prefetcht0 0xc0(%rdi) vpminsd %ymm7, %ymm5, %ymm6 vpmaxsd %ymm7, %ymm5, %ymm7 vpminsd %ymm7, %ymm4, %ymm5 vpmaxsd %ymm7, %ymm4, %ymm7 blsrq %rcx, %rcx jne 0x1ed7dd5 vpermi2q %ymm2, %ymm1, %ymm6 vmovq %xmm6, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r9) vpermd %ymm18, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r9) vpermt2q %ymm2, %ymm5, %ymm1 vmovq %xmm1, 0x10(%r9) vpermd %ymm18, %ymm5, %ymm1 vmovd %xmm1, 0x18(%r9) addq $0x20, %r9 jmp 0x1ed7cae vmovdqa64 %ymm18, %ymm19 vmovaps %ymm14, %ymm15 vmovaps %ymm8, %ymm14 movq %r12, %rbp movq %r15, %r12 movq %r11, %r15 movq %r10, %r11 movq %r8, %r10 movq %rsi, %r8 movq %rdx, %rdi vpshufd $0xff, %ymm3, %ymm4 # ymm4 = ymm3[3,3,3,3,7,7,7,7] vmovdqa %ymm1, %ymm8 vpermt2q %ymm2, %ymm4, %ymm8 vmovq %xmm8, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm4, %ymm6, %ymm8 vpmaxsd %ymm4, %ymm6, %ymm6 vpminsd %ymm6, %ymm5, %ymm4 vpmaxsd %ymm6, %ymm5, %ymm5 vpminsd %ymm5, %ymm7, %ymm6 vpmaxsd %ymm5, %ymm7, %ymm7 blsrq %rcx, %rcx jne 0x1ed7edc vpermi2q %ymm2, %ymm1, %ymm8 vmovq %xmm8, %rbx vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm7, %ymm3 vmovq %xmm3, (%r9) vmovdqa64 %ymm19, %ymm18 vpermd %ymm19, %ymm7, %ymm3 vmovd %xmm3, 0x8(%r9) vmovdqa %ymm1, %ymm3 vpermt2q %ymm2, %ymm6, %ymm3 vmovq %xmm3, 0x10(%r9) vpermd %ymm19, %ymm6, %ymm3 vmovd %xmm3, 0x18(%r9) vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, 0x20(%r9) vpermd %ymm19, %ymm4, %ymm1 vmovd %xmm1, 0x28(%r9) addq $0x30, %r9 movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %r15, %r11 movq %r12, %r15 movq %rbp, %r12 vmovaps %ymm14, %ymm8 vmovaps %ymm15, %ymm14 leaq 0x140(%rsp), %rbp jmp 0x1ed7cae valignd $0x3, %ymm3, %ymm3, %ymm5 # ymm5 = ymm3[3,4,5,6,7,0,1,2] vpbroadcastd 0x48fd4(%rip), %xmm3 # 0x1f20ec0 vpmovsxbd 0x8972a(%rip), %ymm18 # 0x1f61620 vpermt2d %ymm8, %ymm18, %ymm3 vpmovsxbd 0x89723(%rip), %ymm8 # 0x1f61628 vpermt2d %ymm4, %ymm8, %ymm3 vpermt2d %ymm6, %ymm8, %ymm3 vpmovsxbd 0x89716(%rip), %ymm4 # 0x1f61630 vpermt2d %ymm7, %ymm4, %ymm3 movq %rcx, %rdx vmovdqa %ymm3, %ymm4 vpbroadcastd 0x3a7d4(%rip), %ymm3 # 0x1f12704 vpermd %ymm5, %ymm3, %ymm3 valignd $0x1, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[1,2,3,4,5,6,7,0] vmovdqa %ymm1, %ymm6 vpermt2q %ymm2, %ymm5, %ymm6 vmovq %xmm6, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm4, %ymm3, %k0 vpmaxsd %ymm4, %ymm3, %ymm3 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm4, %ymm4, %ymm3 {%k1} # ymm3 {%k1} = ymm4[7,0,1,2,3,4,5,6] jne 0x1ed7f23 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm3, %ymm4 vpermi2q %ymm2, %ymm1, %ymm4 vmovq %xmm4, (%r9) vpermd %ymm19, %ymm3, %ymm4 vmovd %xmm4, 0x8(%r9) valignd $0x1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,2,3,4,5,6,7,0] addq $0x10, %r9 vmovdqa %ymm4, %ymm3 decq %rcx jne 0x1ed7f8d vpermt2q %ymm2, %ymm4, %ymm1 vmovq %xmm1, %rbx movq %rdi, %rdx movq %r8, %rsi movq %r10, %r8 movq %r11, %r10 movq %r15, %r11 movq %r12, %r15 movq %rbp, %r12 vmovaps %ymm14, %ymm8 vmovaps %ymm15, %ymm14 leaq 0x140(%rsp), %rbp vmovdqa64 %ymm19, %ymm18 jmp 0x1ed7cae cmpl $0x6, %eax jne 0x1ed7b64 vmovdqu64 %ymm18, 0x120(%rsp) movq %r9, 0x38(%rsp) movl %ebx, %r12d andl $0xf, %r12d addq $-0x8, %r12 je 0x1ed8041 andq $-0x10, %rbx leaq 0xf(%rsp), %rdi movq %rbx, %rcx movq %rdx, %rbp movq %rsi, %r15 vzeroupper callq 0x8cfcc2 movq %r15, %rsi movq %rbp, %rdx addq $0x8, %rbx decq %r12 jne 0x1ed801c vbroadcastss 0x20(%rsi), %ymm0 movq 0x38(%rsp), %r9 movq 0x30(%rsp), %r8 vmovups 0x100(%rsp), %ymm9 vmovups 0xe0(%rsp), %ymm10 vmovups 0xc0(%rsp), %ymm11 movq 0x28(%rsp), %r10 movq 0x20(%rsp), %r11 movq 0x18(%rsp), %r15 movq 0x10(%rsp), %r12 vmovups 0xa0(%rsp), %ymm12 vmovups 0x80(%rsp), %ymm13 vmovups 0x60(%rsp), %ymm8 vmovups 0x40(%rsp), %ymm14 vpmovsxbd 0x859c8(%rip), %ymm16 # 0x1f5da70 vpbroadcastd 0x8950a(%rip), %ymm17 # 0x1f615bc leaq 0x140(%rsp), %rbp vmovdqu64 0x120(%rsp), %ymm18 jmp 0x1ed7b64 nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, false, embree::avx512::ArrayIntersector1<embree::avx512::InstanceArrayIntersector1MB>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return false; /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; /* verify correct input */ assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f)); /* load the point query into SIMD registers */ TravPointQuery<N> tquery(query->p, context->query_radius); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N,types> nodeTraverser; bool changed = false; float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > cull_radius)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(point_query.trav_nodes,1,1,1); bool nodeIntersected; if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) { nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } else { nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask); } if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(point_query.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node)) { changed = true; tquery.rad = context->query_radius; cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE ? query->radius * query->radius : dot(context->query_radius, context->query_radius); } /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } return changed; }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1ed80d9 xorl %eax, %eax jmp 0x1ed884c pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x24c8, %rsp # imm = 0x24C8 movq %rdx, %rbx movq %rsi, %r14 movq 0x70(%rax), %rax movq %rax, 0x180(%rsp) movl $0x0, 0x188(%rsp) cmpl $0x1, 0x18(%rdx) jne 0x1ed8119 vmovss 0x10(%r14), %xmm0 vmulss %xmm0, %xmm0, %xmm12 jmp 0x1ed8124 vmovaps 0x50(%rbx), %xmm0 vdpps $0x7f, %xmm0, %xmm0, %xmm12 leaq 0x190(%rsp), %rsi vbroadcastss (%r14), %ymm10 vbroadcastss 0x4(%r14), %ymm11 vbroadcastss 0x8(%r14), %ymm13 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 movl $0x0, 0x4(%rsp) leaq 0x180(%rsp), %r8 vpmovsxbd 0x85908(%rip), %ymm14 # 0x1f5da70 vpbroadcastd 0x8944b(%rip), %ymm15 # 0x1f615bc vmovups %ymm10, 0x120(%rsp) vmovups %ymm11, 0x100(%rsp) vmovups %ymm13, 0xe0(%rsp) vsubps %ymm0, %ymm10, %ymm3 vmovups %ymm3, 0xc0(%rsp) vaddps %ymm0, %ymm10, %ymm3 vmovups %ymm3, 0xa0(%rsp) vsubps %ymm1, %ymm11, %ymm3 vmovups %ymm3, 0x80(%rsp) vaddps %ymm1, %ymm11, %ymm1 vmovups %ymm1, 0x60(%rsp) vsubps %ymm2, %ymm13, %ymm1 vmovups %ymm1, 0x40(%rsp) vaddps %ymm2, %ymm13, %ymm1 vmovups %ymm1, 0x20(%rsp) vmulps %ymm0, %ymm0, %ymm16 vmovaps %xmm12, 0x10(%rsp) vmovups %ymm16, 0x140(%rsp) cmpq %r8, %rsi je 0x1ed8837 vmovss -0x8(%rsi), %xmm0 addq $-0x10, %rsi vucomiss %xmm12, %xmm0 ja 0x1ed81e5 movq (%rsi), %r13 cmpl $0x1, 0x18(%rbx) jne 0x1ed8359 testb $0x8, %r13b jne 0x1ed82de movq %r13, %rax andq $-0x10, %rax vbroadcastss 0xc(%r14), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmaxps %ymm1, %ymm10, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm3, %ymm11, %ymm3 vminps %ymm4, %ymm3, %ymm3 vsubps %ymm11, %ymm3, %ymm3 vmaxps %ymm5, %ymm13, %ymm4 vminps %ymm6, %ymm4, %ymm4 vsubps %ymm13, %ymm4, %ymm4 vmulps %ymm7, %ymm7, %ymm5 vmulps %ymm3, %ymm3, %ymm3 vaddps %ymm3, %ymm5, %ymm3 vmulps %ymm4, %ymm4, %ymm4 vaddps %ymm4, %ymm3, %ymm7 vcmpleps %ymm16, %ymm7, %k1 vcmpleps %ymm2, %ymm1, %k0 {%k1} movl %r13d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1ed84b1 kmovb %k0, %ebp testb $0x8, %r13b jne 0x1ed849d testq %rbp, %rbp je 0x1ed84a7 andq $-0x10, %r13 vmovdqu (%r13), %ymm0 vmovdqu 0x20(%r13), %ymm1 vmovdqa %ymm14, %ymm2 vpternlogd $0xf8, %ymm15, %ymm7, %ymm2 kmovd %ebp, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %r13 prefetcht0 (%r13) prefetcht0 0x40(%r13) prefetcht0 0x80(%r13) prefetcht0 0xc0(%r13) xorl %eax, %eax blsrq %rbp, %rcx jne 0x1ed84ca testl %eax, %eax je 0x1ed8201 jmp 0x1ed8756 testb $0x8, %r13b jne 0x1ed82de movq %r13, %rax andq $-0x10, %rax vbroadcastss 0xc(%r14), %ymm0 vmovaps 0x100(%rax), %ymm1 vmovaps 0x120(%rax), %ymm2 vmovaps 0x140(%rax), %ymm3 vmovaps 0x160(%rax), %ymm4 vfmadd213ps 0x40(%rax), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vfmadd213ps 0x80(%rax), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vmovaps 0x180(%rax), %ymm5 vfmadd213ps 0xc0(%rax), %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + mem vfmadd213ps 0x60(%rax), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vfmadd213ps 0xa0(%rax), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem vmovaps 0x1a0(%rax), %ymm6 vfmadd213ps 0xe0(%rax), %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + mem vmaxps %ymm1, %ymm10, %ymm7 vminps %ymm2, %ymm7, %ymm7 vsubps %ymm10, %ymm7, %ymm7 vmaxps %ymm3, %ymm11, %ymm8 vminps %ymm4, %ymm8, %ymm8 vsubps %ymm11, %ymm8, %ymm8 vmaxps %ymm5, %ymm13, %ymm9 vminps %ymm6, %ymm9, %ymm9 vsubps %ymm13, %ymm9, %ymm9 vmulps %ymm7, %ymm7, %ymm7 vmulps %ymm8, %ymm8, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vmulps %ymm9, %ymm9, %ymm8 vaddps %ymm7, %ymm8, %ymm7 vcmpleps %ymm2, %ymm1, %k0 kmovd %k0, %ecx vcmpltps 0xc0(%rsp), %ymm2, %k0 vcmpnleps 0xa0(%rsp), %ymm1, %k1 vcmpltps 0x80(%rsp), %ymm4, %k2 vcmpnleps 0x60(%rsp), %ymm3, %k3 korb %k1, %k3, %k1 vcmpltps 0x40(%rsp), %ymm6, %k3 korb %k3, %k2, %k2 vcmpnleps 0x20(%rsp), %ymm5, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %edx andb %cl, %dl movzbl %dl, %ebp movl %r13d, %ecx andl $0x7, %ecx cmpl $0x6, %ecx jne 0x1ed82de vcmpltps 0x1e0(%rax), %ymm0, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k0 {%k1} kmovd %k0, %eax andb %bpl, %al movzbl %al, %ebp jmp 0x1ed82de movl $0x6, %eax jmp 0x1ed834c movl $0x4, %eax jmp 0x1ed834c vcmpgeps 0x1c0(%rax), %ymm0, %k1 vcmpltps 0x1e0(%rax), %ymm0, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1ed82da vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed852c vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %r13 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%rsi) vpermd %ymm7, %ymm3, %ymm0 vmovd %xmm0, 0x8(%rsi) addq $0x10, %rsi jmp 0x1ed834c vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ed85b1 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %r13 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%rsi) vpermd %ymm7, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rsi) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%rsi) vpermd %ymm7, %ymm4, %ymm0 vmovd %xmm0, 0x18(%rsi) addq $0x20, %rsi jmp 0x1ed834c vmovdqa %ymm7, %ymm9 movq %rsi, %rdi vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ed8667 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %r13 vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 movq %rdi, %rsi vmovq %xmm2, (%rdi) vmovdqa %ymm9, %ymm7 vpermd %ymm9, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rdi) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%rdi) vpermd %ymm9, %ymm5, %ymm2 vmovd %xmm2, 0x18(%rdi) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%rdi) vpermd %ymm9, %ymm3, %ymm0 vmovd %xmm0, 0x28(%rdi) addq $0x30, %rsi jmp 0x1ed834c valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x48849(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x88fa0(%rip), %ymm8 # 0x1f61620 vpermt2d %ymm7, %ymm8, %ymm2 vpmovsxbd 0x88f99(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x88f8c(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x3a04a(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed86ad popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%rdi) vpermd %ymm9, %ymm2, %ymm3 vmovd %xmm3, 0x8(%rdi) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %rdi vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed8717 vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %r13 movq %rdi, %rsi vmovdqa %ymm9, %ymm7 jmp 0x1ed834c cmpl $0x6, %eax jne 0x1ed81e5 movl %r13d, %r15d andl $0xf, %r15d addq $-0x8, %r15 je 0x1ed81e5 vmovdqu %ymm7, 0x160(%rsp) movq %rsi, 0x8(%rsp) andq $-0x10, %r13 xorl %r12d, %r12d movq %r14, %rdi movq %rbx, %rsi movq %r13, %rdx vzeroupper callq 0x1ee1eea orb %al, %r12b addq $0x8, %r13 decq %r15 jne 0x1ed8785 testb $0x1, %r12b movq 0x8(%rsp), %rsi vmovups 0x120(%rsp), %ymm10 vmovups 0x100(%rsp), %ymm11 vmovaps 0x10(%rsp), %xmm12 vmovups 0xe0(%rsp), %ymm13 leaq 0x180(%rsp), %r8 vpmovsxbd 0x85293(%rip), %ymm14 # 0x1f5da70 vpbroadcastd 0x88dd6(%rip), %ymm15 # 0x1f615bc vmovups 0x140(%rsp), %ymm16 vmovdqu 0x160(%rsp), %ymm7 je 0x1ed81e5 vbroadcastss 0x50(%rbx), %ymm0 vbroadcastss 0x54(%rbx), %ymm1 vbroadcastss 0x58(%rbx), %ymm2 cmpl $0x1, 0x18(%rbx) jne 0x1ed8821 vmovss 0x10(%r14), %xmm3 vmulss %xmm3, %xmm3, %xmm12 jmp 0x1ed882c vmovaps 0x50(%rbx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm12 movb $0x1, %al movl %eax, 0x4(%rsp) jmp 0x1ed818c movl 0x4(%rsp), %eax addq $0x24c8, %rsp # imm = 0x24C8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp andb $0x1, %al vzeroupper retq
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 1, false, embree::avx512::SubGridIntersector1Moeller<8, true>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx andq $-0x20, %rsp subq $0x2740, %rsp # imm = 0x2740 movq %rdx, 0x8(%rsp) movq %rsi, (%rsp) movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1ed889a movq 0x70(%rax), %rax movq %rax, 0x3e0(%rsp) movl $0x0, 0x3e8(%rsp) cmpq $0x8, %rax jne 0x1ed88ac leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq (%rsp), %rax vmovaps 0x10(%rax), %xmm0 vxorps %xmm1, %xmm1, %xmm1 vmaxss 0xc(%rax), %xmm1, %xmm2 vmaxss 0x20(%rax), %xmm1, %xmm3 vandps 0x485f7(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 leaq 0x3f0(%rsp), %rsi vbroadcastss 0x1870a(%rip), %xmm5 # 0x1ef0fe8 vcmpltps %xmm5, %xmm4, %k1 vmovaps %xmm5, %xmm0 {%k1} vrcp14ps %xmm0, %xmm4 vfnmadd213ps 0x13e19(%rip){1to4}, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm0) + mem vfmadd132ps %xmm4, %xmm4, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm4 xorl %edi, %edi vucomiss %xmm1, %xmm0 setb %dil vbroadcastss %xmm0, %ymm22 vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3] vbroadcastsd %xmm4, %ymm23 vshufpd $0x1, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,0] vbroadcastss 0x485b4(%rip), %ymm6 # 0x1f20edc vpermps %ymm0, %ymm6, %ymm24 vmulps (%rax), %xmm0, %xmm0 vbroadcastss %xmm0, %ymm7 vbroadcastss 0x39dc3(%rip), %ymm25 # 0x1f12704 vpermps %ymm0, %ymm25, %ymm8 vpermps %ymm0, %ymm6, %ymm0 shll $0x5, %edi xorl %r8d, %r8d vucomiss %xmm1, %xmm4 setb %r8b shll $0x5, %r8d orq $0x40, %r8 xorl %r9d, %r9d vucomiss %xmm1, %xmm5 setb %r9b shll $0x5, %r9d orq $0x80, %r9 movq %rdi, %r10 xorq $0x20, %r10 movq %r8, %r11 xorq $0x20, %r11 movq %r9, %rbx xorq $0x20, %rbx vbroadcastss %xmm2, %ymm26 vbroadcastss %xmm3, %ymm27 vbroadcastss 0x4851e(%rip), %ymm1 # 0x1f20ec0 vxorps %ymm1, %ymm7, %ymm28 vxorps %ymm1, %ymm8, %ymm29 vxorps %ymm1, %ymm0, %ymm30 movl %edi, %eax shrl $0x2, %eax movq %rax, 0x98(%rsp) movl %r10d, %eax shrl $0x2, %eax movq %rax, 0x90(%rsp) movl %r8d, %eax shrl $0x2, %eax movq %rax, 0x88(%rsp) movl %r11d, %eax shrl $0x2, %eax movq %rax, 0x80(%rsp) movl %r9d, %eax shrl $0x2, %eax movq %rax, 0x78(%rsp) movl %ebx, %eax vpmovsxbd 0x8506e(%rip), %ymm31 # 0x1f5da70 vpbroadcastd 0x88bb1(%rip), %ymm7 # 0x1f615bc shrl $0x2, %eax movq %rax, 0x70(%rsp) leaq 0x3e0(%rsp), %r15 movq %rdi, 0x58(%rsp) movq %r8, 0x50(%rsp) movq %r9, 0x48(%rsp) movq %r10, 0x40(%rsp) movq %r11, 0x38(%rsp) movq %rbx, 0x30(%rsp) movq (%rsp), %rax vmovss 0x20(%rax), %xmm0 cmpq %r15, %rsi je 0x1ed889a vmovss -0x8(%rsi), %xmm1 addq $-0x10, %rsi vucomiss %xmm0, %xmm1 ja 0x1ed8a42 movq (%rsi), %rdx testb $0x8, %dl jne 0x1ed8ad9 vmovaps 0x40(%rdx,%rdi), %ymm0 vfmadd132ps %ymm22, %ymm28, %ymm0 # ymm0 = (ymm0 * ymm22) + ymm28 vmovaps 0x40(%rdx,%r8), %ymm1 vfmadd132ps %ymm23, %ymm29, %ymm1 # ymm1 = (ymm1 * ymm23) + ymm29 vmovaps 0x40(%rdx,%r9), %ymm2 vpmaxsd %ymm1, %ymm0, %ymm0 vfmadd132ps %ymm24, %ymm30, %ymm2 # ymm2 = (ymm2 * ymm24) + ymm30 vmovaps 0x40(%rdx,%r10), %ymm1 vmovaps 0x40(%rdx,%r11), %ymm3 vfmadd132ps %ymm22, %ymm28, %ymm1 # ymm1 = (ymm1 * ymm22) + ymm28 vfmadd132ps %ymm23, %ymm29, %ymm3 # ymm3 = (ymm3 * ymm23) + ymm29 vpminsd %ymm3, %ymm1, %ymm1 vmovaps 0x40(%rdx,%rbx), %ymm3 vfmadd132ps %ymm24, %ymm30, %ymm3 # ymm3 = (ymm3 * ymm24) + ymm30 vpmaxsd %ymm26, %ymm2, %ymm2 vpmaxsd %ymm2, %ymm0, %ymm6 vpminsd %ymm27, %ymm3, %ymm0 vpminsd %ymm0, %ymm1, %ymm0 vpcmpled %ymm0, %ymm6, %k0 kmovb %k0, %r12d testb $0x8, %dl jne 0x1ed8b42 testq %r12, %r12 je 0x1ed8b49 andq $-0x10, %rdx vmovdqu (%rdx), %ymm0 vmovdqu 0x20(%rdx), %ymm1 vmovdqa64 %ymm31, %ymm2 vpternlogd $0xf8, %ymm7, %ymm6, %ymm2 kmovd %r12d, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) xorl %eax, %eax blsrq %r12, %rcx jne 0x1ed8b50 testl %eax, %eax je 0x1ed8a5d jmp 0x1ed8e26 movl $0x6, %eax jmp 0x1ed8b35 movl $0x4, %eax jmp 0x1ed8b35 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1ed8baf vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rdx vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, (%rsi) vpermd %ymm6, %ymm3, %ymm0 vmovd %xmm0, 0x8(%rsi) addq $0x10, %rsi jmp 0x1ed8b35 vmovdqa %ymm6, %ymm10 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1ed8c3c vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rdx vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 vmovq %xmm2, (%rsi) vpermd %ymm10, %ymm6, %ymm2 vmovdqa %ymm10, %ymm6 vmovd %xmm2, 0x8(%rsi) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%rsi) vpermd %ymm10, %ymm4, %ymm0 vmovd %xmm0, 0x18(%rsi) addq $0x20, %rsi jmp 0x1ed8b35 movq %r12, %r13 movq %r15, %r12 vmovdqa %ymm7, %ymm8 movq %rbx, %r15 movq %r11, %rbx movq %r10, %r11 movq %r9, %r10 movq %r8, %r9 movq %rdi, %r8 movq %rsi, %rdi vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1ed8d26 vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rdx vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 movq %rdi, %rsi vmovq %xmm2, (%rdi) vpermd %ymm10, %ymm6, %ymm2 vmovdqa %ymm10, %ymm6 vmovd %xmm2, 0x8(%rdi) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%rdi) vpermd %ymm10, %ymm5, %ymm2 vmovd %xmm2, 0x18(%rdi) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%rdi) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x28(%rdi) addq $0x30, %rsi movq %r8, %rdi movq %r9, %r8 movq %r10, %r9 movq %r11, %r10 movq %rbx, %r11 movq %r15, %rbx vmovdqa %ymm8, %ymm7 movq %r12, %r15 movq %r13, %r12 jmp 0x1ed8b35 valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x4818a(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x888e1(%rip), %ymm9 # 0x1f61620 vpermt2d %ymm7, %ymm9, %ymm2 vpmovsxbd 0x888da(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x888cd(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpermps %ymm4, %ymm25, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1ed8d6c popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 vmovq %xmm3, (%rdi) vpermd %ymm10, %ymm2, %ymm3 vmovd %xmm3, 0x8(%rdi) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %rdi vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1ed8dce vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rdx movq %rdi, %rsi movq %r8, %rdi movq %r9, %r8 movq %r10, %r9 movq %r11, %r10 movq %rbx, %r11 movq %r15, %rbx vmovdqa %ymm8, %ymm7 movq %r12, %r15 vmovdqa %ymm10, %ymm6 jmp 0x1ed8d1e cmpl $0x6, %eax jne 0x1ed8a39 movq %r12, 0x60(%rsp) vmovdqa %ymm6, 0x3a0(%rsp) movq %rsi, 0x68(%rsp) movl %edx, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0xa8(%rsp) je 0x1ed98e7 andq $-0x10, %rdx xorl %eax, %eax movq %rdx, 0xa0(%rsp) movq %rax, 0xb0(%rsp) leaq (%rax,%rax,8), %rax shlq $0x4, %rax vmovq 0x40(%rdx,%rax), %xmm0 vmovq 0x48(%rdx,%rax), %xmm1 leaq (%rdx,%rax), %rsi vpcmpleub %xmm1, %xmm0, %k0 vbroadcastss 0x70(%rdx,%rax), %ymm0 vbroadcastss 0x7c(%rdx,%rax), %ymm1 movq 0x98(%rsp), %rcx vpmovzxbd 0x40(%rcx,%rsi), %ymm2 vcvtdq2ps %ymm2, %ymm2 vfmadd213ps %ymm0, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm0 movq 0x90(%rsp), %rcx vpmovzxbd 0x40(%rcx,%rsi), %ymm3 vcvtdq2ps %ymm3, %ymm3 vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0 vbroadcastss 0x74(%rdx,%rax), %ymm0 vbroadcastss 0x80(%rdx,%rax), %ymm1 movq 0x88(%rsp), %rcx vpmovzxbd 0x40(%rcx,%rsi), %ymm4 vcvtdq2ps %ymm4, %ymm4 vfmadd213ps %ymm0, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm0 movq 0x80(%rsp), %rcx vpmovzxbd 0x40(%rcx,%rsi), %ymm5 vcvtdq2ps %ymm5, %ymm5 vfmadd213ps %ymm0, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm0 vbroadcastss 0x78(%rdx,%rax), %ymm0 vbroadcastss 0x84(%rdx,%rax), %ymm1 movq 0x78(%rsp), %rax vpmovzxbd 0x40(%rax,%rsi), %ymm6 vcvtdq2ps %ymm6, %ymm6 vfmadd213ps %ymm0, %ymm1, %ymm6 # ymm6 = (ymm1 * ymm6) + ymm0 movq 0x70(%rsp), %rax movq %rsi, 0xb8(%rsp) vpmovzxbd 0x40(%rax,%rsi), %ymm7 vcvtdq2ps %ymm7, %ymm7 vfmadd213ps %ymm0, %ymm1, %ymm7 # ymm7 = (ymm1 * ymm7) + ymm0 vfmadd213ps %ymm28, %ymm22, %ymm2 # ymm2 = (ymm22 * ymm2) + ymm28 vfmadd213ps %ymm29, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm4) + ymm29 vpmaxsd %ymm4, %ymm2, %ymm0 vfmadd213ps %ymm30, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm6) + ymm30 vfmadd213ps %ymm28, %ymm22, %ymm3 # ymm3 = (ymm22 * ymm3) + ymm28 vfmadd213ps %ymm29, %ymm23, %ymm5 # ymm5 = (ymm23 * ymm5) + ymm29 vpminsd %ymm5, %ymm3, %ymm1 vfmadd213ps %ymm30, %ymm24, %ymm7 # ymm7 = (ymm24 * ymm7) + ymm30 vpmaxsd %ymm26, %ymm6, %ymm2 vpmaxsd %ymm2, %ymm0, %ymm0 vpminsd %ymm27, %ymm7, %ymm2 vpminsd %ymm2, %ymm1, %ymm1 vpcmpled %ymm1, %ymm0, %k1 vmovdqa %ymm0, 0x3c0(%rsp) ktestb %k0, %k1 je 0x1ed98c6 kandb %k0, %k1, %k0 kmovd %k0, %eax movzbl %al, %r12d tzcntq %r12, %rsi vmovss 0x3c0(%rsp,%rsi,4), %xmm0 movq (%rsp), %rax vmovss 0x20(%rax), %xmm1 vucomiss %xmm1, %xmm0 ja 0x1ed9299 movq 0xb8(%rsp), %rdi movl 0x88(%rdi), %r13d movl 0x4(%rdi,%rsi,8), %r14d movq 0x8(%rsp), %rax movq (%rax), %rax movq 0x1e8(%rax), %rax movq (%rax,%r13,8), %rbx movq 0x58(%rbx), %rax movq 0x90(%rbx), %rdx movq 0x68(%rbx), %rcx imulq %r14, %rcx movq (%rdi,%rsi,8), %rdi vmovq %rdi, %xmm0 vpbroadcastw 0x88611(%rip), %xmm2 # 0x1f61638 vpand %xmm2, %xmm0, %xmm0 vpmovzxwd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero movq (%rax,%rcx), %r8 vmovq %r8, %xmm2 vpaddd %xmm0, %xmm2, %xmm3 vpmulld %xmm0, %xmm2, %xmm2 vpsrlq $0x20, %xmm2, %xmm2 vpaddd %xmm2, %xmm3, %xmm2 vmovd %xmm2, %r15d leaq 0x1(%r15), %r9 movq 0xa0(%rbx), %rsi movq %rsi, %r10 imulq %r15, %r10 vmovups (%rdx,%r10), %xmm2 imulq %rsi, %r9 vmovups (%rdx,%r9), %xmm3 shrq $0x20, %r8 leaq (%r15,%r8), %r10 leaq (%r15,%r8), %r9 incq %r9 movq %r10, %r11 imulq %rsi, %r11 vmovups (%rdx,%r11), %xmm5 movq %r9, %r11 imulq %rsi, %r11 vmovups (%rdx,%r11), %xmm4 xorl %r11d, %r11d testw %di, %di setns %r11b addq %r11, %r15 incq %r15 imulq %rsi, %r15 vmovups (%rdx,%r15), %xmm6 addq %r9, %r11 movq %r11, %r15 imulq %rsi, %r15 vmovups (%rdx,%r15), %xmm7 shrl $0x10, %edi testw %di, %di movl $0x0, %edi cmovnsq %r8, %rdi addq %rdi, %r10 imulq %rsi, %r10 vmovups (%rdx,%r10), %xmm8 addq %rdi, %r9 imulq %rsi, %r9 vmovups (%rdx,%r9), %xmm9 addq %r11, %rdi imulq %rsi, %rdi vinsertf128 $0x1, (%rdx,%rdi), %ymm4, %ymm10 vunpcklps %xmm7, %xmm3, %xmm11 # xmm11 = xmm3[0],xmm7[0],xmm3[1],xmm7[1] vunpckhps %xmm7, %xmm3, %xmm12 # xmm12 = xmm3[2],xmm7[2],xmm3[3],xmm7[3] vunpcklps %xmm4, %xmm6, %xmm13 # xmm13 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] vunpckhps %xmm4, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3] vunpcklps %xmm6, %xmm12, %xmm6 # xmm6 = xmm12[0],xmm6[0],xmm12[1],xmm6[1] vunpcklps %xmm13, %xmm11, %xmm12 # xmm12 = xmm11[0],xmm13[0],xmm11[1],xmm13[1] vunpckhps %xmm13, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm13[2],xmm11[3],xmm13[3] vunpcklps %xmm9, %xmm5, %xmm13 # xmm13 = xmm5[0],xmm9[0],xmm5[1],xmm9[1] vunpckhps %xmm9, %xmm5, %xmm14 # xmm14 = xmm5[2],xmm9[2],xmm5[3],xmm9[3] vunpcklps %xmm8, %xmm4, %xmm15 # xmm15 = xmm4[0],xmm8[0],xmm4[1],xmm8[1] vunpckhps %xmm8, %xmm4, %xmm8 # xmm8 = xmm4[2],xmm8[2],xmm4[3],xmm8[3] vunpcklps %xmm8, %xmm14, %xmm8 # xmm8 = xmm14[0],xmm8[0],xmm14[1],xmm8[1] vunpcklps %xmm15, %xmm13, %xmm14 # xmm14 = xmm13[0],xmm15[0],xmm13[1],xmm15[1] vunpckhps %xmm15, %xmm13, %xmm13 # xmm13 = xmm13[2],xmm15[2],xmm13[3],xmm15[3] vinsertf128 $0x1, %xmm9, %ymm5, %ymm5 vinsertf128 $0x1, %xmm7, %ymm3, %ymm3 vunpcklps %ymm5, %ymm3, %ymm7 # ymm7 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5] vinsertf128 $0x1, %xmm4, %ymm2, %ymm2 vunpcklps %ymm10, %ymm2, %ymm4 # ymm4 = ymm2[0],ymm10[0],ymm2[1],ymm10[1],ymm2[4],ymm10[4],ymm2[5],ymm10[5] vunpcklps %ymm7, %ymm4, %ymm9 # ymm9 = ymm4[0],ymm7[0],ymm4[1],ymm7[1],ymm4[4],ymm7[4],ymm4[5],ymm7[5] vunpckhps %ymm7, %ymm4, %ymm7 # ymm7 = ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[6],ymm7[6],ymm4[7],ymm7[7] vunpckhps %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7] vunpckhps %ymm10, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm10[2],ymm2[3],ymm10[3],ymm2[6],ymm10[6],ymm2[7],ymm10[7] vunpcklps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5] vinsertf128 $0x1, %xmm12, %ymm12, %ymm3 vinsertf128 $0x1, %xmm11, %ymm11, %ymm4 vinsertf128 $0x1, %xmm6, %ymm6, %ymm5 vinsertf128 $0x1, %xmm14, %ymm14, %ymm6 vinsertf128 $0x1, %xmm13, %ymm13, %ymm10 vinsertf128 $0x1, %xmm8, %ymm8, %ymm8 vsubps %ymm3, %ymm9, %ymm12 vsubps %ymm4, %ymm7, %ymm13 vsubps %ymm5, %ymm2, %ymm14 vsubps %ymm9, %ymm6, %ymm11 vsubps %ymm7, %ymm10, %ymm10 vsubps %ymm2, %ymm8, %ymm15 vmulps %ymm15, %ymm13, %ymm3 vfmsub231ps %ymm14, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm14) - ymm3 vmulps %ymm11, %ymm14, %ymm4 vfmsub231ps %ymm12, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm12) - ymm4 vmulps %ymm10, %ymm12, %ymm5 movq (%rsp), %rdx vbroadcastss 0x10(%rdx), %ymm16 vbroadcastss 0x14(%rdx), %ymm17 vbroadcastss 0x18(%rdx), %ymm18 vsubps (%rdx){1to8}, %ymm9, %ymm6 vfmsub231ps %ymm13, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm13) - ymm5 vsubps 0x4(%rdx){1to8}, %ymm7, %ymm7 vsubps 0x8(%rdx){1to8}, %ymm2, %ymm8 vmulps %ymm8, %ymm17, %ymm19 vfmsub231ps %ymm18, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm18) - ymm19 vmulps %ymm6, %ymm18, %ymm20 vfmsub231ps %ymm16, %ymm8, %ymm20 # ymm20 = (ymm8 * ymm16) - ymm20 vmulps %ymm7, %ymm16, %ymm21 vfmsub231ps %ymm17, %ymm6, %ymm21 # ymm21 = (ymm6 * ymm17) - ymm21 vmulps %ymm5, %ymm18, %ymm18 vfmadd231ps %ymm17, %ymm4, %ymm18 # ymm18 = (ymm4 * ymm17) + ymm18 vfmadd231ps %ymm16, %ymm3, %ymm18 # ymm18 = (ymm3 * ymm16) + ymm18 vandps 0x47c8f(%rip){1to8}, %ymm18, %ymm2 # 0x1f20ec4 vmulps %ymm21, %ymm15, %ymm9 vfmadd231ps %ymm10, %ymm20, %ymm9 # ymm9 = (ymm20 * ymm10) + ymm9 vfmadd231ps %ymm11, %ymm19, %ymm9 # ymm9 = (ymm19 * ymm11) + ymm9 vandpd 0x47d17(%rip){1to4}, %ymm18, %ymm11 # 0x1f20f68 vxorps %ymm9, %ymm11, %ymm9 vmulps %ymm21, %ymm14, %ymm10 vfmadd231ps %ymm20, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm20) + ymm10 vfmadd231ps %ymm19, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm19) + ymm10 vxorps %ymm10, %ymm11, %ymm10 vxorps %xmm12, %xmm12, %xmm12 vcmpnltps %ymm12, %ymm9, %k1 vcmpnltps %ymm12, %ymm10, %k1 {%k1} vcmpneqps %ymm12, %ymm18, %k1 {%k1} vaddps %ymm10, %ymm9, %ymm12 vcmpleps %ymm2, %ymm12, %k0 {%k1} kortestb %k0, %k0 jne 0x1ed92a9 blsrq %r12, %r12 jne 0x1ed8fba jmp 0x1ed98c6 vmulps %ymm5, %ymm8, %ymm8 vfmadd213ps %ymm8, %ymm4, %ymm7 # ymm7 = (ymm4 * ymm7) + ymm8 vfmadd213ps %ymm7, %ymm3, %ymm6 # ymm6 = (ymm3 * ymm6) + ymm7 vxorps %ymm6, %ymm11, %ymm6 movq (%rsp), %rdx vmulps 0xc(%rdx){1to8}, %ymm2, %ymm7 vbroadcastss %xmm1, %ymm1 vmulps %ymm2, %ymm1, %ymm1 vcmpleps %ymm1, %ymm6, %k1 vcmpltps %ymm6, %ymm7, %k1 {%k1} kandb %k0, %k1, %k0 kortestb %k0, %k0 je 0x1ed9299 vmovaps %ymm9, 0x220(%rsp) vmovaps %ymm10, 0x240(%rsp) vmovaps %ymm6, 0x260(%rsp) vmovaps %ymm2, 0x280(%rsp) kmovb %k0, 0x2a1(%rsp) vmovaps 0x240(%rsp), %ymm1 vmovaps 0x220(%rsp), %ymm7 vsubps %ymm1, %ymm2, %ymm6 vblendps $0xf0, %ymm6, %ymm7, %ymm6 # ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7] vsubps %ymm7, %ymm2, %ymm7 vblendps $0xf0, %ymm7, %ymm1, %ymm1 # ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7] vmovaps %ymm6, 0x220(%rsp) vmovaps %ymm1, 0x240(%rsp) vmovaps 0x875ec(%rip), %ymm7 # 0x1f60940 vmulps %ymm7, %ymm3, %ymm3 vmovaps %ymm3, 0x320(%rsp) vmulps %ymm7, %ymm4, %ymm3 vmovaps %ymm3, 0x340(%rsp) vmulps %ymm7, %ymm5, %ymm3 vmovaps %ymm3, 0x360(%rsp) vpbroadcastd %xmm0, %ymm3 vpaddd 0x84138(%rip), %ymm3, %ymm3 # 0x1f5d4c0 vpermps %ymm0, %ymm25, %ymm0 movzwl 0x8(%rax,%rcx), %edx decl %edx vcvtsi2ss %edx, %xmm22, %xmm4 vpaddd 0x8413d(%rip), %ymm0, %ymm5 # 0x1f5d4e0 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm4, %xmm9, %xmm0 # xmm0 = xmm4[0],xmm9[1,2,3] vrcp14ss %xmm0, %xmm9, %xmm0 vmovss 0x17c3e(%rip), %xmm8 # 0x1ef0ff8 vfnmadd213ss %xmm8, %xmm0, %xmm4 # xmm4 = -(xmm0 * xmm4) + xmm8 vmulss %xmm4, %xmm0, %xmm0 movzwl 0xa(%rax,%rcx), %eax decl %eax vcvtsi2ss %eax, %xmm22, %xmm4 vmovss %xmm4, %xmm9, %xmm7 # xmm7 = xmm4[0],xmm9[1,2,3] vrcp14ss %xmm7, %xmm9, %xmm7 vfnmadd213ss %xmm8, %xmm7, %xmm4 # xmm4 = -(xmm7 * xmm4) + xmm8 vmulss %xmm4, %xmm7, %xmm4 vcvtdq2ps %ymm3, %ymm3 vmulps %ymm3, %ymm2, %ymm3 vaddps %ymm6, %ymm3, %ymm3 vbroadcastss %xmm0, %ymm0 vmulps %ymm0, %ymm3, %ymm0 vmovaps %ymm0, 0x220(%rsp) vcvtdq2ps %ymm5, %ymm3 vmulps %ymm3, %ymm2, %ymm3 vaddps %ymm1, %ymm3, %ymm1 vbroadcastss %xmm4, %ymm3 vmulps %ymm3, %ymm1, %ymm1 vmovaps %ymm1, 0x240(%rsp) movq (%rsp), %rax movl 0x24(%rax), %eax testl %eax, 0x34(%rbx) je 0x1ed9299 movzbl 0x2a1(%rsp), %r15d vrcp14ps %ymm2, %ymm3 vfnmadd213ps 0x132cc(%rip){1to8}, %ymm3, %ymm2 # ymm2 = -(ymm3 * ymm2) + mem vfmadd132ps %ymm3, %ymm3, %ymm2 # ymm2 = (ymm2 * ymm3) + ymm3 vmulps 0x260(%rsp), %ymm2, %ymm3 vmovaps %ymm3, 0x300(%rsp) vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x2c0(%rsp) vmulps %ymm2, %ymm1, %ymm0 vmovaps %ymm0, 0x2e0(%rsp) kmovd %r15d, %k1 vbroadcastss 0x12599(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm3, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kmovd %k0, %eax andb %r15b, %al movzbl %al, %eax cmovel %r15d, %eax movzbl %al, %eax tzcntq %rax, %rdi movq 0x8(%rsp), %rax movq 0x10(%rax), %rsi cmpq $0x0, 0x10(%rsi) jne 0x1ed955a cmpq $0x0, 0x40(%rbx) jne 0x1ed955a vmovss 0x2c0(%rsp,%rdi,4), %xmm0 vmovss 0x2e0(%rsp,%rdi,4), %xmm1 vmovd 0x320(%rsp,%rdi,4), %xmm2 vmovss 0x340(%rsp,%rdi,4), %xmm3 vmovss 0x360(%rsp,%rdi,4), %xmm4 vmovss 0x300(%rsp,%rdi,4), %xmm5 movq (%rsp), %rdx vmovss %xmm5, 0x20(%rdx) vmovd %xmm2, 0x30(%rdx) vmovss %xmm3, 0x34(%rdx) vmovss %xmm4, 0x38(%rdx) vmovss %xmm0, 0x3c(%rdx) vmovss %xmm1, 0x40(%rdx) movl %r14d, 0x44(%rdx) movl %r13d, 0x48(%rdx) movq 0x8(%rsp), %rax movq 0x8(%rax), %rax movl (%rax), %ecx movl %ecx, 0x4c(%rdx) movl 0x4(%rax), %eax movl %eax, 0x50(%rdx) jmp 0x1ed9299 vmovaps %ymm3, 0x380(%rsp) movq 0x8(%rsp), %rax movq 0x8(%rax), %rax movq %rax, 0x28(%rsp) movq %r13, %rax movq %r14, %rcx movq %rsi, 0x18(%rsp) vmovaps %ymm22, 0x200(%rsp) vmovaps %ymm23, 0x1e0(%rsp) vmovaps %ymm24, 0x1c0(%rsp) vmovaps %ymm26, 0x1a0(%rsp) vmovaps %ymm27, 0x180(%rsp) vmovaps %ymm28, 0x160(%rsp) vmovaps %ymm29, 0x140(%rsp) vmovaps %ymm30, 0x120(%rsp) vmovss 0x2c0(%rsp,%rdi,4), %xmm0 vmovss 0x2e0(%rsp,%rdi,4), %xmm1 movq (%rsp), %rdx vmovss 0x20(%rdx), %xmm2 vmovss %xmm2, 0x10(%rsp) vmovss 0x300(%rsp,%rdi,4), %xmm2 vmovss %xmm2, 0x20(%rdx) vmovss 0x320(%rsp,%rdi,4), %xmm2 vmovss 0x340(%rsp,%rdi,4), %xmm3 vmovss 0x360(%rsp,%rdi,4), %xmm4 vmovss %xmm2, 0xf0(%rsp) vmovss %xmm3, 0xf4(%rsp) vmovss %xmm4, 0xf8(%rsp) vmovss %xmm0, 0xfc(%rsp) vmovss %xmm1, 0x100(%rsp) movl %ecx, 0x104(%rsp) movl %eax, 0x108(%rsp) movq 0x28(%rsp), %rcx movl (%rcx), %eax movl %eax, 0x10c(%rsp) movl 0x4(%rcx), %eax movl %eax, 0x110(%rsp) movl $0xffffffff, 0x14(%rsp) # imm = 0xFFFFFFFF leaq 0x14(%rsp), %rax movq %rax, 0xc0(%rsp) movq 0x18(%rbx), %rax movq %rax, 0xc8(%rsp) movq %rcx, 0xd0(%rsp) movq %rdx, 0xd8(%rsp) leaq 0xf0(%rsp), %rax movq %rax, 0xe0(%rsp) movl $0x1, 0xe8(%rsp) movq 0x40(%rbx), %rax testq %rax, %rax movq %rdi, 0x20(%rsp) je 0x1ed972f leaq 0xc0(%rsp), %rdi vzeroupper callq *%rax movq 0x20(%rsp), %rdi movq 0x18(%rsp), %rsi vpmovsxbd 0x8439c(%rip), %ymm31 # 0x1f5da70 vmovaps 0x120(%rsp), %ymm30 vmovaps 0x140(%rsp), %ymm29 vmovaps 0x160(%rsp), %ymm28 vmovaps 0x180(%rsp), %ymm27 vmovaps 0x1a0(%rsp), %ymm26 vbroadcastss 0x38ffe(%rip), %ymm25 # 0x1f12704 vmovaps 0x1c0(%rsp), %ymm24 vmovaps 0x1e0(%rsp), %ymm23 vmovaps 0x200(%rsp), %ymm22 movq 0xc0(%rsp), %rax cmpl $0x0, (%rax) je 0x1ed981a movq 0x10(%rsi), %rax testq %rax, %rax je 0x1ed97bf testb $0x2, (%rsi) jne 0x1ed9747 testb $0x40, 0x3e(%rbx) je 0x1ed9754 leaq 0xc0(%rsp), %rdi vzeroupper callq *%rax movq 0xc0(%rsp), %rax cmpl $0x0, (%rax) vmovaps 0x200(%rsp), %ymm22 vmovaps 0x1e0(%rsp), %ymm23 vmovaps 0x1c0(%rsp), %ymm24 vbroadcastss 0x38f83(%rip), %ymm25 # 0x1f12704 vmovaps 0x1a0(%rsp), %ymm26 vmovaps 0x180(%rsp), %ymm27 vmovaps 0x160(%rsp), %ymm28 vmovaps 0x140(%rsp), %ymm29 vmovaps 0x120(%rsp), %ymm30 vpmovsxbd 0x842bd(%rip), %ymm31 # 0x1f5da70 movq 0x18(%rsp), %rsi movq 0x20(%rsp), %rdi je 0x1ed981a movq 0xd8(%rsp), %rax movq 0xe0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1ed9829 movq (%rsp), %rax vmovss 0x10(%rsp), %xmm0 vmovss %xmm0, 0x20(%rax) movl $0x1, %eax shlxl %edi, %eax, %eax kmovd %eax, %k0 movzbl %r15b, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 movq (%rsp), %rax vmovaps 0x380(%rsp), %ymm1 vcmpleps 0x20(%rax){1to8}, %ymm1, %k1 kandb %k1, %k0, %k2 kmovd %k2, %r15d ktestb %k1, %k0 je 0x1ed98b2 kmovd %r15d, %k1 vbroadcastss 0x121ac(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm1, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kmovd %k0, %eax andb %r15b, %al movzbl %al, %eax movzbl %r15b, %ecx cmovnel %eax, %ecx tzcntl %ecx, %edi testb %r15b, %r15b movq %r13, %rax movq %r14, %rcx jne 0x1ed95bc jmp 0x1ed9299 movq 0xb0(%rsp), %rax incq %rax cmpq 0xa8(%rsp), %rax movq 0xa0(%rsp), %rdx jne 0x1ed8e67 movq (%rsp), %rax vbroadcastss 0x20(%rax), %ymm27 movq 0x68(%rsp), %rsi movq 0x58(%rsp), %rdi movq 0x50(%rsp), %r8 movq 0x48(%rsp), %r9 movq 0x40(%rsp), %r10 movq 0x38(%rsp), %r11 movq 0x30(%rsp), %rbx vpbroadcastd 0x87c9e(%rip), %ymm7 # 0x1f615bc leaq 0x3e0(%rsp), %r15 vmovdqa 0x3a0(%rsp), %ymm6 movq 0x60(%rsp), %r12 jmp 0x1ed8a39 nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, true, embree::avx512::SubGridMBIntersector1Pluecker<8, true>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This, RayHit& __restrict__ ray, RayQueryContext* __restrict__ context) { const BVH* __restrict__ bvh = (const BVH*)This->ptr; /* we may traverse an empty BVH in case all geometry was invalid */ if (bvh->root == BVH::emptyNode) return; /* perform per ray precalculations required by the primitive intersector */ Precalculations pre(ray, bvh); /* stack state */ StackItemT<NodeRef> stack[stackSize]; // stack of nodes StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer StackItemT<NodeRef>* stackEnd = stack+stackSize; stack[0].ptr = bvh->root; stack[0].dist = neg_inf; if (bvh->root == BVH::emptyNode) return; /* filter out invalid rays */ #if defined(EMBREE_IGNORE_INVALID_RAYS) if (!ray.valid()) return; #endif /* verify correct input */ assert(ray.valid()); assert(ray.tnear() >= 0.0f); assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f)); /* load the ray into SIMD registers */ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f)); /* initialize the node traverser */ BVHNNodeTraverser1Hit<N, types> nodeTraverser; /* pop loop */ while (true) pop: { /* pop next node */ if (unlikely(stackPtr == stack)) break; stackPtr--; NodeRef cur = NodeRef(stackPtr->ptr); /* if popped node is too far, pop next one */ if (unlikely(*(float*)&stackPtr->dist > ray.tfar)) continue; /* downtraversal loop */ while (true) { /* intersect node */ size_t mask; vfloat<N> tNear; STAT3(normal.trav_nodes,1,1,1); bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask); if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; } /* if no child is hit, pop next node */ if (unlikely(mask == 0)) goto pop; /* select next child and push other children */ nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd); } /* this is a leaf node */ assert(cur != BVH::emptyNode); STAT3(normal.trav_leaves,1,1,1); size_t num; Primitive* prim = (Primitive*)cur.leaf(num); size_t lazy_node = 0; PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node); tray.tfar = ray.tfar; /* push lazy node onto stack */ if (unlikely(lazy_node)) { stackPtr->ptr = lazy_node; stackPtr->dist = neg_inf; stackPtr++; } } }
movq (%rdi), %rax cmpq $0x8, 0x70(%rax) je 0x1eda2b3 pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx andq $-0x20, %rsp subq $0x27a0, %rsp # imm = 0x27A0 movq 0x70(%rax), %rax movq %rax, 0x440(%rsp) movl $0x0, 0x448(%rsp) cmpq $0x8, %rax jne 0x1eda2b7 leaq -0x28(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq movq %rdx, %r14 movq %rsi, %r15 vmovaps 0x10(%rsi), %xmm0 vxorps %xmm27, %xmm27, %xmm27 vmaxss 0xc(%rsi), %xmm27, %xmm1 vmaxss 0x20(%rsi), %xmm27, %xmm2 vandps 0x46be4(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec4 vcmpltps 0x16cfd(%rip){1to4}, %xmm3, %k1 # 0x1ef0fe8 vbroadcastss 0x12420(%rip), %xmm3 # 0x1eec714 vdivps %xmm0, %xmm3, %xmm0 vbroadcastss 0x46c5e(%rip), %xmm0 {%k1} # 0x1f20f60 vmulps 0x45c04(%rip){1to4}, %xmm0, %xmm3 # 0x1f1ff10 vmulps 0x45bfe(%rip){1to4}, %xmm0, %xmm0 # 0x1f1ff14 vbroadcastss (%rsi), %ymm28 leaq 0x450(%rsp), %rax vbroadcastss 0x4(%rsi), %ymm29 vbroadcastss 0x8(%rsi), %ymm30 xorl %ecx, %ecx vucomiss %xmm27, %xmm3 setb %cl vbroadcastss %xmm3, %ymm31 vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3] vbroadcastsd %xmm4, %ymm7 vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0] vbroadcastss 0x46b82(%rip), %ymm6 # 0x1f20edc vpermps %ymm3, %ymm6, %ymm3 vbroadcastss %xmm0, %ymm8 vbroadcastss 0x38397(%rip), %ymm9 # 0x1f12704 vpermps %ymm0, %ymm9, %ymm9 vpermps %ymm0, %ymm6, %ymm0 vmovaps %ymm0, 0x2a0(%rsp) vmovaps %ymm8, %ymm6 shll $0x5, %ecx xorl %edx, %edx vucomiss %xmm27, %xmm4 vmovaps %ymm9, %ymm4 setb %dl shll $0x5, %edx orq $0x40, %rdx xorl %esi, %esi vucomiss %xmm27, %xmm5 vmovaps %ymm3, %ymm5 setb %sil shll $0x5, %esi orq $0x80, %rsi movq %rcx, 0x88(%rsp) xorq $0x20, %rcx movq %rcx, 0x70(%rsp) movq %rdx, 0x80(%rsp) xorq $0x20, %rdx movq %rdx, 0x68(%rsp) movq %rsi, 0x78(%rsp) xorq $0x20, %rsi movq %rsi, 0x60(%rsp) vbroadcastss %xmm1, %ymm0 vmovaps %ymm0, 0x280(%rsp) vbroadcastss %xmm2, %ymm0 vmovaps %ymm0, 0x200(%rsp) vpmovsxbd 0x83664(%rip), %ymm8 # 0x1f5da70 vpbroadcastd 0x871a7(%rip), %ymm9 # 0x1f615bc vmovaps %ymm28, 0x1c0(%rsp) vmovaps %ymm29, 0x1a0(%rsp) vmovaps %ymm30, 0x180(%rsp) vmovaps %ymm31, 0x160(%rsp) vmovaps %ymm4, 0x100(%rsp) movq %r14, 0xa8(%rsp) movq %r15, 0xa0(%rsp) vmovaps %ymm7, 0x260(%rsp) vmovaps %ymm3, 0x240(%rsp) vmovaps %ymm6, 0x220(%rsp) vmovss 0x20(%r15), %xmm0 leaq 0x440(%rsp), %rcx cmpq %rcx, %rax je 0x1eda2a5 vmovss -0x8(%rax), %xmm1 addq $-0x10, %rax vucomiss %xmm0, %xmm1 ja 0x1eda46f movq %rax, 0x8(%rsp) movq (%rax), %rdx movq %rdx, %rsi testb $0x8, %sil movq 0x98(%rsp), %rax jne 0x1eda5c6 movq %rsi, %rax andq $-0x10, %rax vbroadcastss 0x1c(%r15), %ymm0 movq 0x88(%rsp), %rcx vmovaps 0x100(%rax,%rcx), %ymm1 vfmadd213ps 0x40(%rax,%rcx), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vsubps %ymm28, %ymm1, %ymm1 vmulps %ymm1, %ymm31, %ymm1 movq 0x80(%rsp), %rcx vmovaps 0x100(%rax,%rcx), %ymm2 vfmadd213ps 0x40(%rax,%rcx), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vsubps %ymm29, %ymm2, %ymm2 vmulps %ymm2, %ymm7, %ymm2 movq 0x78(%rsp), %rcx vmovaps 0x100(%rax,%rcx), %ymm3 vfmadd213ps 0x40(%rax,%rcx), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vsubps %ymm30, %ymm3, %ymm3 vmulps %ymm3, %ymm5, %ymm3 vmaxps %ymm3, %ymm2, %ymm2 vmovaps 0x280(%rsp), %ymm3 vmaxps %ymm1, %ymm3, %ymm1 vmaxps %ymm2, %ymm1, %ymm10 movq 0x70(%rsp), %rcx vmovaps 0x100(%rax,%rcx), %ymm1 vfmadd213ps 0x40(%rax,%rcx), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem vsubps %ymm28, %ymm1, %ymm1 vmulps %ymm1, %ymm6, %ymm1 movq 0x68(%rsp), %rcx vmovaps 0x100(%rax,%rcx), %ymm2 vfmadd213ps 0x40(%rax,%rcx), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem vsubps %ymm29, %ymm2, %ymm2 vmulps %ymm2, %ymm4, %ymm2 movq 0x60(%rsp), %rcx vmovaps 0x100(%rax,%rcx), %ymm3 vfmadd213ps 0x40(%rax,%rcx), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem vsubps %ymm30, %ymm3, %ymm3 vmulps 0x2a0(%rsp), %ymm3, %ymm3 vminps %ymm3, %ymm2, %ymm2 vmovaps 0x200(%rsp), %ymm3 vminps %ymm1, %ymm3, %ymm1 vminps %ymm2, %ymm1, %ymm1 movl %esi, %ecx andl $0x7, %ecx cmpl $0x6, %ecx je 0x1eda646 vcmpleps %ymm1, %ymm10, %k0 kmovb %k0, %eax movq %rdx, %rsi testb $0x8, %sil movq %rax, 0x98(%rsp) jne 0x1eda63f testq %rax, %rax je 0x1eda662 andq $-0x10, %rdx vmovdqu (%rdx), %ymm0 vmovdqu 0x20(%rdx), %ymm1 vmovdqa %ymm8, %ymm2 vpternlogd $0xf8, %ymm9, %ymm10, %ymm2 kmovd %eax, %k1 vpcompressd %ymm2, %ymm2 {%k1} vmovdqa %ymm0, %ymm3 vpermt2q %ymm1, %ymm2, %ymm3 vmovq %xmm3, %rcx prefetcht0 (%rcx) prefetcht0 0x40(%rcx) prefetcht0 0x80(%rcx) movq %rcx, %rdx prefetcht0 0xc0(%rcx) movq %rax, %rcx xorl %eax, %eax blsrq %rcx, %rcx jne 0x1eda669 testl %eax, %eax je 0x1eda497 jmp 0x1eda9dd movl $0x6, %eax jmp 0x1eda632 vcmpleps %ymm1, %ymm10, %k1 vcmpgeps 0x1c0(%rax), %ymm0, %k1 {%k1} vcmpltps 0x1e0(%rax), %ymm0, %k0 {%k1} jmp 0x1eda5bf movl $0x4, %eax jmp 0x1eda632 vpshufd $0x55, %ymm2, %ymm3 # ymm3 = ymm2[1,1,1,1,5,5,5,5] vmovdqa %ymm0, %ymm4 vpermt2q %ymm1, %ymm3, %ymm4 vmovq %xmm4, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm2, %ymm4 vpmaxsd %ymm3, %ymm2, %ymm3 blsrq %rcx, %rcx jne 0x1eda6de vpermi2q %ymm1, %ymm0, %ymm4 vmovq %xmm4, %rdx vpermt2q %ymm1, %ymm3, %ymm0 movq 0x8(%rsp), %rcx vmovq %xmm0, (%rcx) vpermd %ymm10, %ymm3, %ymm0 vmovd %xmm0, 0x8(%rcx) addq $0x10, %rcx movq %rcx, 0x8(%rsp) vmovaps 0x100(%rsp), %ymm4 jmp 0x1eda632 vmovdqa %ymm10, %ymm13 vmovaps %ymm6, %ymm10 vmovaps %ymm5, %ymm9 vpshufd $0xaa, %ymm2, %ymm6 # ymm6 = ymm2[2,2,2,2,6,6,6,6] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm6, %ymm5 vmovq %xmm5, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm6, %ymm4, %ymm5 vpmaxsd %ymm6, %ymm4, %ymm6 vpminsd %ymm6, %ymm3, %ymm4 vpmaxsd %ymm6, %ymm3, %ymm6 blsrq %rcx, %rcx jne 0x1eda799 vpermi2q %ymm1, %ymm0, %ymm5 vmovq %xmm5, %rdx vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 movq 0x8(%rsp), %rcx vmovq %xmm2, (%rcx) vpermd %ymm13, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rcx) vpermt2q %ymm1, %ymm4, %ymm0 vmovq %xmm0, 0x10(%rcx) vpermd %ymm13, %ymm4, %ymm0 vmovd %xmm0, 0x18(%rcx) addq $0x20, %rcx movq %rcx, 0x8(%rsp) vmovaps %ymm9, %ymm5 vmovaps %ymm10, %ymm6 vmovdqa %ymm13, %ymm10 vmovaps 0x100(%rsp), %ymm4 vpbroadcastd 0x86e28(%rip), %ymm9 # 0x1f615bc jmp 0x1eda632 vmovdqa %ymm8, %ymm11 vmovaps %ymm7, %ymm8 vpshufd $0xff, %ymm2, %ymm3 # ymm3 = ymm2[3,3,3,3,7,7,7,7] vmovdqa %ymm0, %ymm7 vpermt2q %ymm1, %ymm3, %ymm7 vmovq %xmm7, %rdx prefetcht0 (%rdx) prefetcht0 0x40(%rdx) prefetcht0 0x80(%rdx) prefetcht0 0xc0(%rdx) vpminsd %ymm3, %ymm5, %ymm7 vpmaxsd %ymm3, %ymm5, %ymm5 vpminsd %ymm5, %ymm4, %ymm3 vpmaxsd %ymm5, %ymm4, %ymm4 vpminsd %ymm4, %ymm6, %ymm5 vpmaxsd %ymm4, %ymm6, %ymm6 blsrq %rcx, %rcx jne 0x1eda89d vpermi2q %ymm1, %ymm0, %ymm7 vmovq %xmm7, %rdx vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm6, %ymm2 movq 0x8(%rsp), %rcx vmovq %xmm2, (%rcx) vpermd %ymm13, %ymm6, %ymm2 vmovd %xmm2, 0x8(%rcx) vmovdqa %ymm0, %ymm2 vpermt2q %ymm1, %ymm5, %ymm2 vmovq %xmm2, 0x10(%rcx) vpermd %ymm13, %ymm5, %ymm2 vmovd %xmm2, 0x18(%rcx) vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, 0x20(%rcx) vpermd %ymm13, %ymm3, %ymm0 vmovd %xmm0, 0x28(%rcx) addq $0x30, %rcx movq %rcx, 0x8(%rsp) vxorps %xmm27, %xmm27, %xmm27 vmovaps 0x1c0(%rsp), %ymm28 vmovaps 0x1a0(%rsp), %ymm29 vmovaps 0x180(%rsp), %ymm30 vmovaps 0x160(%rsp), %ymm31 vmovaps %ymm8, %ymm7 vmovaps %ymm9, %ymm5 vmovaps %ymm10, %ymm6 vmovdqa %ymm13, %ymm10 vmovaps 0x100(%rsp), %ymm4 vmovdqa %ymm11, %ymm8 jmp 0x1eda78b valignd $0x3, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[3,4,5,6,7,0,1,2] vpbroadcastd 0x46613(%rip), %xmm2 # 0x1f20ec0 vpmovsxbd 0x86d6a(%rip), %ymm12 # 0x1f61620 vpermt2d %ymm7, %ymm12, %ymm2 vpmovsxbd 0x86d63(%rip), %ymm7 # 0x1f61628 vpermt2d %ymm3, %ymm7, %ymm2 vpermt2d %ymm5, %ymm7, %ymm2 vpmovsxbd 0x86d56(%rip), %ymm3 # 0x1f61630 vpermt2d %ymm6, %ymm3, %ymm2 movq %rcx, %rdx vmovdqa %ymm2, %ymm3 vpbroadcastd 0x37e14(%rip), %ymm2 # 0x1f12704 vpermd %ymm4, %ymm2, %ymm2 valignd $0x1, %ymm4, %ymm4, %ymm4 # ymm4 = ymm4[1,2,3,4,5,6,7,0] vmovdqa %ymm0, %ymm5 vpermt2q %ymm1, %ymm4, %ymm5 vmovq %xmm5, %rsi prefetcht0 (%rsi) prefetcht0 0x40(%rsi) prefetcht0 0x80(%rsi) prefetcht0 0xc0(%rsi) blsrq %rdx, %rdx vpcmpnltd %ymm3, %ymm2, %k0 vpmaxsd %ymm3, %ymm2, %ymm2 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm3, %ymm3, %ymm2 {%k1} # ymm2 {%k1} = ymm3[7,0,1,2,3,4,5,6] jne 0x1eda8e3 popcntq %rcx, %rcx addq $0x3, %rcx vmovdqa %ymm2, %ymm3 vpermi2q %ymm1, %ymm0, %ymm3 movq 0x8(%rsp), %rdx vmovq %xmm3, (%rdx) vpermd %ymm13, %ymm2, %ymm3 vmovd %xmm3, 0x8(%rdx) valignd $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,2,3,4,5,6,7,0] addq $0x10, %rdx movq %rdx, 0x8(%rsp) vmovdqa %ymm3, %ymm2 decq %rcx jne 0x1eda94d vpermt2q %ymm1, %ymm3, %ymm0 vmovq %xmm0, %rdx vxorps %xmm27, %xmm27, %xmm27 vmovaps 0x1c0(%rsp), %ymm28 vmovaps 0x1a0(%rsp), %ymm29 vmovaps 0x180(%rsp), %ymm30 vmovaps 0x160(%rsp), %ymm31 vmovaps %ymm8, %ymm7 vmovaps %ymm9, %ymm5 vmovaps %ymm10, %ymm6 vmovaps 0x100(%rsp), %ymm4 vmovdqa %ymm11, %ymm8 vpbroadcastd 0x86be9(%rip), %ymm9 # 0x1f615bc vmovdqa %ymm13, %ymm10 jmp 0x1eda632 cmpl $0x6, %eax jne 0x1edb8ae vmovdqa %ymm10, 0x400(%rsp) movl %edx, %eax andl $0xf, %eax addq $-0x8, %rax movq %rax, 0x58(%rsp) je 0x1edb856 andq $-0x10, %rdx xorl %eax, %eax movq %rdx, 0x48(%rsp) movq %rax, 0x90(%rsp) imulq $0xe0, %rax, %rax vmovss 0x1c(%r15), %xmm0 vsubss 0xd0(%rdx,%rax), %xmm0, %xmm0 vmulss 0xd4(%rdx,%rax), %xmm0, %xmm0 vmovq 0x40(%rdx,%rax), %xmm1 vmovq 0x48(%rdx,%rax), %xmm2 vpcmpleub %xmm2, %xmm1, %k0 vpmovzxbd %xmm1, %ymm1 # ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero vbroadcastss 0x7c(%rdx,%rax), %ymm3 vcvtdq2ps %ymm1, %ymm4 vbroadcastss 0x70(%rdx,%rax), %ymm5 vfmadd213ps %ymm5, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + ymm5 vpmovzxbd 0x88(%rdx,%rax), %ymm1 vcvtdq2ps %ymm1, %ymm1 vbroadcastss 0xc4(%rdx,%rax), %ymm6 vbroadcastss 0xb8(%rdx,%rax), %ymm7 vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7 vbroadcastss %xmm0, %ymm0 vsubps %ymm4, %ymm1, %ymm1 vfmadd213ps %ymm4, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + ymm4 vpmovzxbd %xmm2, %ymm2 # ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero vcvtdq2ps %ymm2, %ymm2 vfmadd213ps %ymm5, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm5 vpmovzxbd 0x90(%rdx,%rax), %ymm3 vcvtdq2ps %ymm3, %ymm3 vfmadd213ps %ymm7, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm7 vsubps %ymm2, %ymm3, %ymm3 vfmadd213ps %ymm2, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm2 vpmovzxbd 0x50(%rdx,%rax), %ymm2 vbroadcastss 0x80(%rdx,%rax), %ymm4 vbroadcastss 0x74(%rdx,%rax), %ymm5 vcvtdq2ps %ymm2, %ymm2 vfmadd213ps %ymm5, %ymm4, %ymm2 # ymm2 = (ymm4 * ymm2) + ymm5 vpmovzxbd 0x98(%rdx,%rax), %ymm6 vbroadcastss 0xc8(%rdx,%rax), %ymm7 vcvtdq2ps %ymm6, %ymm6 vbroadcastss 0xbc(%rdx,%rax), %ymm8 vfmadd213ps %ymm8, %ymm7, %ymm6 # ymm6 = (ymm7 * ymm6) + ymm8 vsubps %ymm2, %ymm6, %ymm6 vfmadd213ps %ymm2, %ymm0, %ymm6 # ymm6 = (ymm0 * ymm6) + ymm2 vpmovzxbd 0x58(%rdx,%rax), %ymm2 vcvtdq2ps %ymm2, %ymm2 vfmadd213ps %ymm5, %ymm4, %ymm2 # ymm2 = (ymm4 * ymm2) + ymm5 vpmovzxbd 0xa0(%rdx,%rax), %ymm4 vcvtdq2ps %ymm4, %ymm4 vfmadd213ps %ymm8, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm4) + ymm8 vsubps %ymm2, %ymm4, %ymm4 vfmadd213ps %ymm2, %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + ymm2 vpmovzxbd 0x60(%rdx,%rax), %ymm2 vcvtdq2ps %ymm2, %ymm2 vbroadcastss 0x84(%rdx,%rax), %ymm5 vbroadcastss 0x78(%rdx,%rax), %ymm7 vfmadd213ps %ymm7, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm2) + ymm7 vpmovzxbd 0xa8(%rdx,%rax), %ymm8 vbroadcastss 0xcc(%rdx,%rax), %ymm9 vbroadcastss 0xc0(%rdx,%rax), %ymm10 vcvtdq2ps %ymm8, %ymm8 vfmadd213ps %ymm10, %ymm9, %ymm8 # ymm8 = (ymm9 * ymm8) + ymm10 vsubps %ymm2, %ymm8, %ymm8 vfmadd213ps %ymm2, %ymm0, %ymm8 # ymm8 = (ymm0 * ymm8) + ymm2 vpmovzxbd 0x68(%rdx,%rax), %ymm2 vcvtdq2ps %ymm2, %ymm2 vfmadd213ps %ymm7, %ymm5, %ymm2 # ymm2 = (ymm5 * ymm2) + ymm7 movq %rax, 0x18(%rsp) vpmovzxbd 0xb0(%rdx,%rax), %ymm5 vcvtdq2ps %ymm5, %ymm5 vfmadd213ps %ymm10, %ymm9, %ymm5 # ymm5 = (ymm9 * ymm5) + ymm10 vsubps %ymm2, %ymm5, %ymm5 vfmadd213ps %ymm2, %ymm0, %ymm5 # ymm5 = (ymm0 * ymm5) + ymm2 vsubps %ymm28, %ymm1, %ymm0 vmulps %ymm0, %ymm31, %ymm0 vsubps %ymm29, %ymm6, %ymm1 vmulps 0x260(%rsp), %ymm1, %ymm1 vsubps %ymm30, %ymm8, %ymm2 vmulps 0x240(%rsp), %ymm2, %ymm2 vsubps %ymm28, %ymm3, %ymm3 vmulps 0x220(%rsp), %ymm3, %ymm3 vsubps %ymm29, %ymm4, %ymm4 vmulps 0x100(%rsp), %ymm4, %ymm4 vsubps %ymm30, %ymm5, %ymm5 vmulps 0x2a0(%rsp), %ymm5, %ymm5 vpminsd %ymm3, %ymm0, %ymm6 vpmaxsd %ymm3, %ymm0, %ymm0 vpminsd %ymm4, %ymm1, %ymm3 vpmaxsd %ymm3, %ymm6, %ymm3 vpmaxsd %ymm4, %ymm1, %ymm1 vpminsd %ymm1, %ymm0, %ymm0 vpminsd %ymm5, %ymm2, %ymm1 vpmaxsd %ymm5, %ymm2, %ymm2 vpmaxsd 0x280(%rsp), %ymm1, %ymm1 vpmaxsd %ymm1, %ymm3, %ymm1 vpminsd 0x200(%rsp), %ymm2, %ymm2 vpminsd %ymm2, %ymm0, %ymm0 vpcmpled %ymm0, %ymm1, %k1 vmovdqa %ymm1, 0x420(%rsp) ktestb %k0, %k1 je 0x1edb83b movq 0x18(%rsp), %rax addq 0x48(%rsp), %rax movq %rax, 0x18(%rsp) kandb %k0, %k1, %k0 kmovd %k0, %eax movzbl %al, %ecx tzcntq %rcx, %rax vmovss 0x420(%rsp,%rax,4), %xmm1 vmovss 0x20(%r15), %xmm0 vucomiss %xmm0, %xmm1 ja 0x1edb56a movq %rcx, 0xc8(%rsp) movq 0x18(%rsp), %rcx movzwl (%rcx,%rax,8), %r8d movl %r8d, 0x10(%rsp) movzwl 0x2(%rcx,%rax,8), %r9d movl %r9d, 0x1e0(%rsp) movl 0xd8(%rcx), %edx movq %rdx, 0x30(%rsp) movl 0x4(%rcx,%rax,8), %ecx movq (%r14), %rax movq %rax, 0xc0(%rsp) movq 0x1e8(%rax), %rax movq (%rax,%rdx,8), %rax movq 0x58(%rax), %rsi movq 0x68(%rax), %rdi movq %rcx, 0x50(%rsp) imulq %rcx, %rdi vmovss 0x1c(%r15), %xmm1 vmovss 0x28(%rax), %xmm2 vmovss 0x2c(%rax), %xmm3 vmovss 0x30(%rax), %xmm4 vsubss %xmm3, %xmm1, %xmm1 vsubss %xmm3, %xmm4, %xmm3 vdivss %xmm3, %xmm1, %xmm1 vmulss %xmm1, %xmm2, %xmm1 vroundss $0x9, %xmm1, %xmm1, %xmm3 vaddss 0x15c8c(%rip), %xmm2, %xmm2 # 0x1ef09cc vminss %xmm2, %xmm3, %xmm2 vmaxss %xmm2, %xmm27, %xmm2 movl $0x7fff, %edx # imm = 0x7FFF andl %edx, %r8d movl (%rsi,%rdi), %ecx movl %r8d, 0x40(%rsp) addl %r8d, %ecx movl %r9d, %r8d andl %edx, %r8d movq %rsi, 0xb8(%rsp) movq %rdi, 0xb0(%rsp) movl 0x4(%rsi,%rdi), %r9d movl %r9d, %r15d movl %r8d, 0x3c(%rsp) imull %r8d, %r15d addl %ecx, %r15d vcvttss2si %xmm2, %ecx movslq %ecx, %rcx movq 0xe0(%rax), %rdx movq %rdx, 0x28(%rsp) imulq $0x38, %rcx, %r13 movq 0x10(%rdx,%r13), %rbx movq %rbx, %rax imulq %r15, %rax movq (%rdx,%r13), %r12 vmovups (%r12,%rax), %xmm3 leaq 0x1(%r15), %rax movq 0x48(%rdx,%r13), %r10 movq %r10, %rcx imulq %rax, %rcx movq %rcx, 0x20(%rsp) imulq %rbx, %rax vmovups (%r12,%rax), %xmm4 leaq (%r15,%r9), %rdx movq %rdx, %rax imulq %rbx, %rax vmovups (%r12,%rax), %xmm5 leaq (%r15,%r9), %rdi incq %rdi movq %rdi, %rax imulq %rbx, %rax vmovups (%r12,%rax), %xmm6 movq %r10, %r11 imulq %r15, %r11 movq %r10, %r14 imulq %rdx, %r14 movq %r10, %r8 imulq %rdi, %r8 xorl %ecx, %ecx cmpw $0x0, 0x10(%rsp) setns %cl leaq (%r15,%rcx), %rax incq %rax movq %r10, %r15 imulq %rax, %r15 imulq %rbx, %rax vmovups (%r12,%rax), %xmm7 addq %rdi, %rcx movq %rcx, %rax imulq %rbx, %rax vmovups (%r12,%rax), %xmm8 movq %r10, %rsi imulq %rcx, %rsi cmpw $0x0, 0x1e0(%rsp) movl $0x0, %eax cmovnsq %r9, %rax addq %rax, %rdx movq %r10, %r9 imulq %rdx, %r9 imulq %rbx, %rdx vmovups (%r12,%rdx), %xmm9 addq %rax, %rdi movq %r10, %rdx imulq %rdi, %rdx imulq %rbx, %rdi vmovups (%r12,%rdi), %xmm10 addq %rcx, %rax imulq %rax, %rbx vmovups (%r12,%rbx), %xmm11 movq 0x28(%rsp), %rcx movq 0x38(%rcx,%r13), %rcx vmovups (%rcx,%r11), %xmm12 movq 0x20(%rsp), %rdi vmovups (%rcx,%rdi), %xmm13 vmovups (%rcx,%r14), %xmm14 movq 0xa8(%rsp), %r14 vmovups (%rcx,%r8), %xmm15 vmovups (%rcx,%r15), %xmm16 movq 0xa0(%rsp), %r15 vmovups (%rcx,%rsi), %xmm17 movq 0x30(%rsp), %rsi vmovups (%rcx,%r9), %xmm18 vmovups (%rcx,%rdx), %xmm19 imulq %r10, %rax vmovups (%rcx,%rax), %xmm20 vsubss %xmm2, %xmm1, %xmm1 vbroadcastss %xmm1, %xmm1 vsubps %xmm3, %xmm12, %xmm2 vfmadd213ps %xmm3, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm3 vsubps %xmm4, %xmm13, %xmm3 vfmadd213ps %xmm4, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm4 vsubps %xmm5, %xmm14, %xmm4 vfmadd213ps %xmm5, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm4) + xmm5 vsubps %xmm6, %xmm15, %xmm5 vfmadd213ps %xmm6, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm6 vsubps %xmm7, %xmm16, %xmm6 vfmadd213ps %xmm7, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm6) + xmm7 vsubps %xmm8, %xmm17, %xmm7 vfmadd213ps %xmm8, %xmm1, %xmm7 # xmm7 = (xmm1 * xmm7) + xmm8 vsubps %xmm9, %xmm18, %xmm8 vfmadd213ps %xmm9, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + xmm9 vsubps %xmm10, %xmm19, %xmm9 vfmadd213ps %xmm10, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm10 vsubps %xmm11, %xmm20, %xmm10 vfmadd213ps %xmm11, %xmm1, %xmm10 # xmm10 = (xmm1 * xmm10) + xmm11 vunpcklps %xmm7, %xmm3, %xmm1 # xmm1 = xmm3[0],xmm7[0],xmm3[1],xmm7[1] vunpckhps %xmm7, %xmm3, %xmm11 # xmm11 = xmm3[2],xmm7[2],xmm3[3],xmm7[3] vunpcklps %xmm5, %xmm6, %xmm12 # xmm12 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] vunpckhps %xmm5, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm5[2],xmm6[3],xmm5[3] vunpcklps %xmm6, %xmm11, %xmm6 # xmm6 = xmm11[0],xmm6[0],xmm11[1],xmm6[1] vunpcklps %xmm12, %xmm1, %xmm11 # xmm11 = xmm1[0],xmm12[0],xmm1[1],xmm12[1] vunpckhps %xmm12, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm12[2],xmm1[3],xmm12[3] vunpcklps %xmm9, %xmm4, %xmm12 # xmm12 = xmm4[0],xmm9[0],xmm4[1],xmm9[1] vunpckhps %xmm9, %xmm4, %xmm13 # xmm13 = xmm4[2],xmm9[2],xmm4[3],xmm9[3] vunpcklps %xmm8, %xmm5, %xmm14 # xmm14 = xmm5[0],xmm8[0],xmm5[1],xmm8[1] vunpckhps %xmm8, %xmm5, %xmm8 # xmm8 = xmm5[2],xmm8[2],xmm5[3],xmm8[3] vunpcklps %xmm8, %xmm13, %xmm8 # xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1] vunpcklps %xmm14, %xmm12, %xmm13 # xmm13 = xmm12[0],xmm14[0],xmm12[1],xmm14[1] vunpckhps %xmm14, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm14[2],xmm12[3],xmm14[3] vinsertf128 $0x1, %xmm9, %ymm4, %ymm4 vinsertf128 $0x1, %xmm7, %ymm3, %ymm3 vunpcklps %ymm4, %ymm3, %ymm7 # ymm7 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5] vinsertf128 $0x1, %xmm10, %ymm5, %ymm9 vinsertf128 $0x1, %xmm5, %ymm2, %ymm2 vunpcklps %ymm9, %ymm2, %ymm5 # ymm5 = ymm2[0],ymm9[0],ymm2[1],ymm9[1],ymm2[4],ymm9[4],ymm2[5],ymm9[5] vunpcklps %ymm7, %ymm5, %ymm10 # ymm10 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5] vunpckhps %ymm7, %ymm5, %ymm5 # ymm5 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7] vunpckhps %ymm4, %ymm3, %ymm3 # ymm3 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7] vunpckhps %ymm9, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm9[2],ymm2[3],ymm9[3],ymm2[6],ymm9[6],ymm2[7],ymm9[7] vunpcklps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5] vinsertf128 $0x1, %xmm11, %ymm11, %ymm3 vinsertf128 $0x1, %xmm1, %ymm1, %ymm1 vinsertf128 $0x1, %xmm6, %ymm6, %ymm4 vinsertf128 $0x1, %xmm13, %ymm13, %ymm11 vinsertf128 $0x1, %xmm12, %ymm12, %ymm13 vinsertf128 $0x1, %xmm8, %ymm8, %ymm14 vbroadcastss (%r15), %ymm15 vbroadcastss 0x4(%r15), %ymm16 vbroadcastss 0x8(%r15), %ymm17 vbroadcastss 0x10(%r15), %ymm7 leaq 0x7(%rsp), %rax movq %rax, 0x320(%rsp) vbroadcastss 0x14(%r15), %ymm9 vbroadcastss 0x18(%r15), %ymm12 vsubps %ymm15, %ymm10, %ymm6 vsubps %ymm16, %ymm5, %ymm8 vsubps %ymm17, %ymm2, %ymm10 vsubps %ymm15, %ymm3, %ymm5 vsubps %ymm16, %ymm1, %ymm1 vsubps %ymm17, %ymm4, %ymm20 vsubps %ymm15, %ymm11, %ymm21 vsubps %ymm16, %ymm13, %ymm22 vsubps %ymm17, %ymm14, %ymm23 vsubps %ymm6, %ymm21, %ymm13 vsubps %ymm8, %ymm22, %ymm16 vsubps %ymm10, %ymm23, %ymm15 vsubps %ymm5, %ymm6, %ymm17 vsubps %ymm1, %ymm8, %ymm19 vsubps %ymm20, %ymm10, %ymm18 vsubps %ymm21, %ymm5, %ymm3 vsubps %ymm22, %ymm1, %ymm2 vsubps %ymm23, %ymm20, %ymm4 vaddps %ymm6, %ymm21, %ymm11 vaddps %ymm8, %ymm22, %ymm14 vaddps %ymm10, %ymm23, %ymm24 vmulps %ymm15, %ymm14, %ymm25 vfmsub231ps %ymm24, %ymm16, %ymm25 # ymm25 = (ymm16 * ymm24) - ymm25 vmulps %ymm13, %ymm24, %ymm24 vfmsub231ps %ymm11, %ymm15, %ymm24 # ymm24 = (ymm15 * ymm11) - ymm24 vmulps %ymm16, %ymm11, %ymm11 vfmsub231ps %ymm14, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm14) - ymm11 vmulps %ymm11, %ymm12, %ymm11 vfmadd231ps %ymm24, %ymm9, %ymm11 # ymm11 = (ymm9 * ymm24) + ymm11 vfmadd231ps %ymm25, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm25) + ymm11 vaddps %ymm5, %ymm6, %ymm14 vaddps %ymm1, %ymm8, %ymm24 vaddps %ymm20, %ymm10, %ymm25 vmulps %ymm18, %ymm24, %ymm26 vfmsub231ps %ymm25, %ymm19, %ymm26 # ymm26 = (ymm19 * ymm25) - ymm26 vmulps %ymm17, %ymm25, %ymm25 vfmsub231ps %ymm14, %ymm18, %ymm25 # ymm25 = (ymm18 * ymm14) - ymm25 vmulps %ymm19, %ymm14, %ymm14 vfmsub231ps %ymm24, %ymm17, %ymm14 # ymm14 = (ymm17 * ymm24) - ymm14 vmulps %ymm14, %ymm12, %ymm14 vfmadd231ps %ymm25, %ymm9, %ymm14 # ymm14 = (ymm9 * ymm25) + ymm14 vfmadd231ps %ymm26, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm26) + ymm14 vaddps %ymm21, %ymm5, %ymm5 vaddps %ymm22, %ymm1, %ymm1 vaddps %ymm23, %ymm20, %ymm20 vmulps %ymm4, %ymm1, %ymm21 vfmsub231ps %ymm20, %ymm2, %ymm21 # ymm21 = (ymm2 * ymm20) - ymm21 vmulps %ymm3, %ymm20, %ymm20 vfmsub231ps %ymm5, %ymm4, %ymm20 # ymm20 = (ymm4 * ymm5) - ymm20 vmulps %ymm2, %ymm5, %ymm5 vfmsub231ps %ymm1, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm1) - ymm5 vmulps %ymm5, %ymm12, %ymm22 vfmadd231ps %ymm20, %ymm9, %ymm22 # ymm22 = (ymm9 * ymm20) + ymm22 vfmadd231ps %ymm21, %ymm7, %ymm22 # ymm22 = (ymm7 * ymm21) + ymm22 vaddps %ymm14, %ymm11, %ymm1 vaddps %ymm1, %ymm22, %ymm1 vandps 0x45d54(%rip){1to8}, %ymm1, %ymm5 # 0x1f20ec4 vmulps 0x45d52(%rip){1to8}, %ymm5, %ymm20 # 0x1f20ecc vminps %ymm14, %ymm11, %ymm21 vminps %ymm22, %ymm21, %ymm21 vxorps 0x45d30(%rip){1to8}, %ymm20, %ymm23 # 0x1f20ec0 vcmpnltps %ymm23, %ymm21, %k0 vmaxps %ymm14, %ymm11, %ymm21 vmaxps %ymm22, %ymm21, %ymm21 vcmpleps %ymm20, %ymm21, %k1 korb %k1, %k0, %k0 kortestb %k0, %k0 je 0x1edb562 vmulps %ymm19, %ymm15, %ymm20 vmulps %ymm18, %ymm13, %ymm21 vmulps %ymm17, %ymm16, %ymm22 vmulps %ymm2, %ymm18, %ymm23 vmulps %ymm4, %ymm17, %ymm24 vmulps %ymm3, %ymm19, %ymm25 vfmsub213ps %ymm20, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm16) - ymm20 vfmsub213ps %ymm21, %ymm17, %ymm15 # ymm15 = (ymm17 * ymm15) - ymm21 vfmsub213ps %ymm22, %ymm19, %ymm13 # ymm13 = (ymm19 * ymm13) - ymm22 vfmsub213ps %ymm23, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm4) - ymm23 vfmsub213ps %ymm24, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) - ymm24 vfmsub213ps %ymm25, %ymm17, %ymm2 # ymm2 = (ymm17 * ymm2) - ymm25 vbroadcastss 0x45cba(%rip), %ymm19 # 0x1f20ec4 vandps %ymm19, %ymm20, %ymm17 vandps %ymm19, %ymm23, %ymm18 vcmpltps %ymm18, %ymm17, %k1 vandps %ymm19, %ymm21, %ymm17 vandps %ymm19, %ymm24, %ymm18 vcmpltps %ymm18, %ymm17, %k2 vandps %ymm19, %ymm22, %ymm17 vandps %ymm19, %ymm25, %ymm18 vcmpltps %ymm18, %ymm17, %k3 vmovaps %ymm16, %ymm4 {%k1} vmovaps %ymm15, %ymm3 {%k2} vmovaps %ymm13, %ymm2 {%k3} vmulps %ymm2, %ymm12, %ymm12 vfmadd213ps %ymm12, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm9) + ymm12 vfmadd213ps %ymm9, %ymm4, %ymm7 # ymm7 = (ymm4 * ymm7) + ymm9 vaddps %ymm7, %ymm7, %ymm7 vmulps %ymm2, %ymm10, %ymm9 vfmadd213ps %ymm9, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm8) + ymm9 vfmadd213ps %ymm8, %ymm4, %ymm6 # ymm6 = (ymm4 * ymm6) + ymm8 vaddps %ymm6, %ymm6, %ymm6 vrcp14ps %ymm7, %ymm8 vxorps 0x45c37(%rip){1to8}, %ymm7, %ymm9 # 0x1f20ec0 vmovaps %ymm8, %ymm10 vfnmadd213ps 0x1147c(%rip){1to8}, %ymm7, %ymm10 # ymm10 = -(ymm7 * ymm10) + mem vfmadd132ps %ymm8, %ymm8, %ymm10 # ymm10 = (ymm10 * ymm8) + ymm8 vmulps %ymm6, %ymm10, %ymm6 vbroadcastss %xmm0, %ymm0 vcmpleps %ymm0, %ymm6, %k1 vcmpgeps 0xc(%r15){1to8}, %ymm6, %k1 {%k1} vcmpneqps %ymm9, %ymm7, %k1 {%k1} kandb %k0, %k1, %k0 kortestb %k0, %k0 je 0x1edb562 vmovaps %ymm1, 0x300(%rsp) leaq 0x7(%rsp), %rax movq %rax, 0x320(%rsp) kmovb %k0, 0x328(%rsp) vmovaps %ymm6, 0x380(%rsp) vsubps %ymm14, %ymm1, %ymm0 vblendps $0xf0, %ymm0, %ymm11, %ymm6 # ymm6 = ymm11[0,1,2,3],ymm0[4,5,6,7] vsubps %ymm11, %ymm1, %ymm0 vblendps $0xf0, %ymm0, %ymm14, %ymm0 # ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7] vmovaps %ymm6, 0x2c0(%rsp) vmovaps %ymm0, 0x2e0(%rsp) vmovaps 0x8561e(%rip), %ymm7 # 0x1f60940 vmulps %ymm7, %ymm4, %ymm4 vmovaps %ymm4, 0x3a0(%rsp) vmulps %ymm7, %ymm3, %ymm3 vmovaps %ymm3, 0x3c0(%rsp) vmulps %ymm7, %ymm2, %ymm2 vmovaps %ymm2, 0x3e0(%rsp) movl 0x40(%rsp), %eax vpbroadcastd %eax, %ymm2 vpaddd 0x82165(%rip), %ymm2, %ymm2 # 0x1f5d4c0 movl 0x3c(%rsp), %eax vpbroadcastd %eax, %ymm3 vpaddd 0x82173(%rip), %ymm3, %ymm3 # 0x1f5d4e0 movq 0xb8(%rsp), %rcx movq 0xb0(%rsp), %rdx movzwl 0x8(%rcx,%rdx), %eax decl %eax vcvtsi2ss %eax, %xmm27, %xmm4 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm4, %xmm9, %xmm7 # xmm7 = xmm4[0],xmm9[1,2,3] vrcp14ss %xmm7, %xmm9, %xmm7 vmovss 0x15c57(%rip), %xmm10 # 0x1ef0ff8 vfnmadd213ss %xmm10, %xmm7, %xmm4 # xmm4 = -(xmm7 * xmm4) + xmm10 vmulss %xmm4, %xmm7, %xmm4 movzwl 0xa(%rcx,%rdx), %eax decl %eax vcvtsi2ss %eax, %xmm27, %xmm7 vmovss %xmm7, %xmm9, %xmm8 # xmm8 = xmm7[0],xmm9[1,2,3] vrcp14ss %xmm8, %xmm9, %xmm8 vfnmadd213ss %xmm10, %xmm8, %xmm7 # xmm7 = -(xmm8 * xmm7) + xmm10 vmulss %xmm7, %xmm8, %xmm7 vcvtdq2ps %ymm2, %ymm2 vmulps %ymm2, %ymm1, %ymm2 vaddps %ymm6, %ymm2, %ymm2 vbroadcastss %xmm4, %ymm4 vmulps %ymm4, %ymm2, %ymm2 vmovaps %ymm2, 0x2c0(%rsp) vcvtdq2ps %ymm3, %ymm3 vmulps %ymm3, %ymm1, %ymm3 vaddps %ymm0, %ymm3, %ymm0 vbroadcastss %xmm7, %ymm3 vmulps %ymm3, %ymm0, %ymm0 vmovaps %ymm0, 0x2e0(%rsp) movq 0xc0(%rsp), %rax movq 0x1e8(%rax), %rax movq (%rax,%rsi,8), %r12 movl 0x24(%r15), %eax testl %eax, 0x34(%r12) je 0x1edb562 movl 0x328(%rsp), %ebx vcmpnltps 0x15bae(%rip){1to8}, %ymm5, %k1 # 0x1ef0fe8 vrcp14ps %ymm1, %ymm3 vbroadcastss 0x112cb(%rip), %ymm4 # 0x1eec714 vfnmadd213ps %ymm4, %ymm3, %ymm1 # ymm1 = -(ymm3 * ymm1) + ymm4 vfmadd132ps %ymm3, %ymm3, %ymm1 {%k1} {z} # ymm1 {%k1} {z} = (ymm1 * ymm3) + ymm3 vmulps %ymm1, %ymm2, %ymm2 vminps %ymm4, %ymm2, %ymm2 vmovaps %ymm2, 0x340(%rsp) vmulps %ymm1, %ymm0, %ymm0 vminps %ymm4, %ymm0, %ymm0 vmovaps %ymm0, 0x360(%rsp) vmovaps 0x380(%rsp), %ymm5 kmovd %ebx, %k1 vbroadcastss 0x10594(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm5, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kmovd %k0, %eax andb %bl, %al movzbl %al, %eax cmovel %ebx, %eax movzbl %al, %eax tzcntq %rax, %r13 movq 0x10(%r14), %rdi cmpq $0x0, 0x10(%rdi) jne 0x1edb57a cmpq $0x0, 0x40(%r12) jne 0x1edb57a vmovss 0x340(%rsp,%r13,4), %xmm0 vmovss 0x360(%rsp,%r13,4), %xmm1 vmovss 0x3a0(%rsp,%r13,4), %xmm2 vmovss 0x3c0(%rsp,%r13,4), %xmm3 vmovss 0x3e0(%rsp,%r13,4), %xmm4 vmovss 0x380(%rsp,%r13,4), %xmm5 vmovss %xmm5, 0x20(%r15) vmovss %xmm2, 0x30(%r15) vmovss %xmm3, 0x34(%r15) vmovss %xmm4, 0x38(%r15) vmovss %xmm0, 0x3c(%r15) vmovss %xmm1, 0x40(%r15) movq 0x50(%rsp), %rax movl %eax, 0x44(%r15) movl %esi, 0x48(%r15) movq 0x8(%r14), %rax movl (%rax), %ecx movl %ecx, 0x4c(%r15) movl 0x4(%rax), %eax movl %eax, 0x50(%r15) movq 0xc8(%rsp), %rcx blsrq %rcx, %rcx jne 0x1edac94 jmp 0x1edb83b movq 0x8(%r14), %rax movq %rax, 0x20(%rsp) vmovaps %ymm5, 0x1e0(%rsp) movq %rdi, 0x10(%rsp) vmovss 0x340(%rsp,%r13,4), %xmm0 vmovss 0x360(%rsp,%r13,4), %xmm1 vmovss 0x20(%r15), %xmm2 vmovss %xmm2, 0x28(%rsp) vmovss 0x380(%rsp,%r13,4), %xmm2 vmovss %xmm2, 0x20(%r15) vmovss 0x3a0(%rsp,%r13,4), %xmm2 vmovss 0x3c0(%rsp,%r13,4), %xmm3 vmovss 0x3e0(%rsp,%r13,4), %xmm4 vmovss %xmm2, 0x130(%rsp) vmovss %xmm3, 0x134(%rsp) vmovss %xmm4, 0x138(%rsp) vmovss %xmm0, 0x13c(%rsp) vmovss %xmm1, 0x140(%rsp) movq 0x50(%rsp), %rax movl %eax, 0x144(%rsp) movl %esi, 0x148(%rsp) movq 0x20(%rsp), %rcx movl (%rcx), %eax movl %eax, 0x14c(%rsp) movl 0x4(%rcx), %eax movl %eax, 0x150(%rsp) movl $0xffffffff, 0x44(%rsp) # imm = 0xFFFFFFFF leaq 0x44(%rsp), %rax movq %rax, 0xd0(%rsp) movq 0x18(%r12), %rax movq %rax, 0xd8(%rsp) movq %rcx, 0xe0(%rsp) movq %r15, 0xe8(%rsp) leaq 0x130(%rsp), %rax movq %rax, 0xf0(%rsp) movl $0x1, 0xf8(%rsp) movq 0x40(%r12), %rax testq %rax, %rax je 0x1edb6e5 leaq 0xd0(%rsp), %rdi vzeroupper callq *%rax movq 0x10(%rsp), %rdi vmovaps 0x1e0(%rsp), %ymm5 movq 0x30(%rsp), %rsi vmovaps 0x160(%rsp), %ymm31 vmovaps 0x180(%rsp), %ymm30 vmovaps 0x1a0(%rsp), %ymm29 vmovaps 0x1c0(%rsp), %ymm28 vxorps %xmm27, %xmm27, %xmm27 movq 0xd0(%rsp), %rax cmpl $0x0, (%rax) je 0x1edb7a9 movq 0x10(%rdi), %rax testq %rax, %rax je 0x1edb74e testb $0x2, (%rdi) jne 0x1edb6fb testb $0x40, 0x3e(%r12) je 0x1edb741 leaq 0xd0(%rsp), %rdi vzeroupper callq *%rax movq 0x10(%rsp), %rdi vmovaps 0x1e0(%rsp), %ymm5 movq 0x30(%rsp), %rsi vmovaps 0x160(%rsp), %ymm31 vmovaps 0x180(%rsp), %ymm30 vmovaps 0x1a0(%rsp), %ymm29 vmovaps 0x1c0(%rsp), %ymm28 vxorps %xmm27, %xmm27, %xmm27 movq 0xd0(%rsp), %rax cmpl $0x0, (%rax) je 0x1edb7a9 movq 0xe8(%rsp), %rax movq 0xf0(%rsp), %rcx vmovss (%rcx), %xmm0 vmovss %xmm0, 0x30(%rax) vmovss 0x4(%rcx), %xmm0 vmovss %xmm0, 0x34(%rax) vmovss 0x8(%rcx), %xmm0 vmovss %xmm0, 0x38(%rax) vmovss 0xc(%rcx), %xmm0 vmovss %xmm0, 0x3c(%rax) vmovss 0x10(%rcx), %xmm0 vmovss %xmm0, 0x40(%rax) movl 0x14(%rcx), %edx movl %edx, 0x44(%rax) movl 0x18(%rcx), %edx movl %edx, 0x48(%rax) movl 0x1c(%rcx), %edx movl %edx, 0x4c(%rax) movl 0x20(%rcx), %ecx movl %ecx, 0x50(%rax) jmp 0x1edb7b5 vmovss 0x28(%rsp), %xmm0 vmovss %xmm0, 0x20(%r15) movl $0x1, %eax shlxl %r13d, %eax, %eax kmovd %eax, %k0 movzbl %bl, %eax kmovd %eax, %k1 kandnb %k1, %k0, %k0 vcmpleps 0x20(%r15){1to8}, %ymm5, %k1 kandb %k1, %k0, %k2 kmovd %k2, %ebx ktestb %k1, %k0 je 0x1edb82e kmovd %ebx, %k1 vbroadcastss 0x1022f(%rip), %ymm0 # 0x1eeba20 vblendmps %ymm5, %ymm0, %ymm0 {%k1} vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6] vminps %ymm1, %ymm0, %ymm1 vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2] vminps %ymm2, %ymm1, %ymm1 vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1] vminps %ymm2, %ymm1, %ymm1 vcmpeqps %ymm1, %ymm0, %k0 kmovd %k0, %eax andb %bl, %al movzbl %al, %eax movzbl %bl, %ecx cmovnel %eax, %ecx tzcntl %ecx, %r13d testb %bl, %bl jne 0x1edb591 jmp 0x1edb562 movq 0x90(%rsp), %rax incq %rax cmpq 0x58(%rsp), %rax movq 0x48(%rsp), %rdx jne 0x1edaa0e vpbroadcastd 0x20(%r15), %ymm0 vmovdqa %ymm0, 0x200(%rsp) movq 0x8(%rsp), %rax vmovaps 0x260(%rsp), %ymm7 vmovaps 0x240(%rsp), %ymm5 vmovaps 0x220(%rsp), %ymm6 vmovaps 0x100(%rsp), %ymm4 vpmovsxbd 0x821d9(%rip), %ymm8 # 0x1f5da70 vpbroadcastd 0x85d1c(%rip), %ymm9 # 0x1f615bc vmovdqa 0x400(%rsp), %ymm10 jmp 0x1eda469 movq 0x8(%rsp), %rax jmp 0x1eda469
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::BVHNIntersector1<8, 16777232, true, embree::avx512::SubGridMBIntersector1Pluecker<8, true>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
bool BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::pointQuery( const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context) { return PointQueryDispatch<N, types, robust, PrimitiveIntersector1>::pointQuery(This, query, context); }
subq $0x22c8, %rsp # imm = 0x22C8 movq (%rdi), %rax cmpq $0x8, 0x70(%rax) jne 0x1edb8d6 xorl %eax, %eax addq $0x22c8, %rsp # imm = 0x22C8 vzeroupper retq movq 0x70(%rax), %rax movq %rax, -0x80(%rsp) movl $0x0, -0x78(%rsp) vbroadcastss (%rsi), %ymm0 vbroadcastss 0x4(%rsi), %ymm1 vbroadcastss 0x8(%rsi), %ymm2 vbroadcastss 0x50(%rdx), %ymm10 vbroadcastss 0x54(%rdx), %ymm7 vbroadcastss 0x58(%rdx), %ymm9 cmpl $0x1, 0x18(%rdx) jne 0x1edb91b vmovss 0x10(%rsi), %xmm3 vmulss %xmm3, %xmm3, %xmm3 jmp 0x1edb926 vmovaps 0x50(%rdx), %xmm3 vdpps $0x7f, %xmm3, %xmm3, %xmm3 leaq -0x70(%rsp), %rax vsubps %ymm10, %ymm0, %ymm4 vaddps %ymm0, %ymm10, %ymm5 vsubps %ymm7, %ymm1, %ymm6 vaddps %ymm7, %ymm1, %ymm7 vsubps %ymm9, %ymm2, %ymm8 vaddps %ymm2, %ymm9, %ymm9 vmulss %xmm10, %xmm10, %xmm10 vbroadcastss %xmm10, %ymm10 leaq -0x80(%rsp), %rcx vpmovsxbd 0x82113(%rip), %ymm11 # 0x1f5da70 vpbroadcastd 0x85c56(%rip), %ymm12 # 0x1f615bc vpbroadcastd 0x45551(%rip), %xmm13 # 0x1f20ec0 vpmovsxbd 0x85ca8(%rip), %ymm14 # 0x1f61620 vpmovsxbd 0x85ca7(%rip), %ymm15 # 0x1f61628 vpmovsxbd 0x85ca5(%rip), %ymm16 # 0x1f61630 vpbroadcastd 0x36d6f(%rip), %ymm17 # 0x1f12704 cmpq %rcx, %rax je 0x1edb8c9 vmovss -0x8(%rax), %xmm19 addq $-0x10, %rax vucomiss %xmm3, %xmm19 ja 0x1edb995 movq (%rax), %r8 cmpl $0x1, 0x18(%rdx) jne 0x1edbb1e testb $0x8, %r8b jne 0x1edbaa1 movq %r8, %rdi andq $-0x10, %rdi vbroadcastss 0xc(%rsi), %ymm19 vmovaps 0x100(%rdi), %ymm20 vmovaps 0x120(%rdi), %ymm21 vmovaps 0x140(%rdi), %ymm18 vmovaps 0x160(%rdi), %ymm22 vfmadd213ps 0x40(%rdi), %ymm19, %ymm20 # ymm20 = (ymm19 * ymm20) + mem vfmadd213ps 0x80(%rdi), %ymm19, %ymm18 # ymm18 = (ymm19 * ymm18) + mem vmovaps 0x180(%rdi), %ymm23 vfmadd213ps 0xc0(%rdi), %ymm19, %ymm23 # ymm23 = (ymm19 * ymm23) + mem vfmadd213ps 0x60(%rdi), %ymm19, %ymm21 # ymm21 = (ymm19 * ymm21) + mem vfmadd213ps 0xa0(%rdi), %ymm19, %ymm22 # ymm22 = (ymm19 * ymm22) + mem vmovaps 0x1a0(%rdi), %ymm24 vfmadd213ps 0xe0(%rdi), %ymm19, %ymm24 # ymm24 = (ymm19 * ymm24) + mem vmaxps %ymm20, %ymm0, %ymm25 vminps %ymm21, %ymm25, %ymm25 vsubps %ymm0, %ymm25, %ymm25 vmaxps %ymm18, %ymm1, %ymm18 vminps %ymm22, %ymm18, %ymm18 vsubps %ymm1, %ymm18, %ymm18 vmaxps %ymm23, %ymm2, %ymm22 vminps %ymm24, %ymm22, %ymm22 vsubps %ymm2, %ymm22, %ymm22 vmulps %ymm25, %ymm25, %ymm23 vmulps %ymm18, %ymm18, %ymm18 vaddps %ymm18, %ymm23, %ymm18 vmulps %ymm22, %ymm22, %ymm22 vaddps %ymm22, %ymm18, %ymm18 vcmpleps %ymm10, %ymm18, %k1 vcmpleps %ymm21, %ymm20, %k0 {%k1} movl %r8d, %r9d andl $0x7, %r9d cmpl $0x6, %r9d je 0x1edbc67 kmovb %k0, %edi xorl %r9d, %r9d testb $0x8, %r8b jne 0x1edbb10 testq %rdi, %rdi je 0x1edbb10 andq $-0x10, %r8 vmovdqu64 (%r8), %ymm19 vmovdqu64 0x20(%r8), %ymm20 vmovdqa64 %ymm11, %ymm21 vpternlogd $0xf8, %ymm12, %ymm18, %ymm21 kmovd %edi, %k1 vpcompressd %ymm21, %ymm21 {%k1} vmovdqa64 %ymm19, %ymm22 vpermt2q %ymm20, %ymm21, %ymm22 vmovq %xmm22, %r8 prefetcht0 (%r8) prefetcht0 0x40(%r8) prefetcht0 0x80(%r8) prefetcht0 0xc0(%r8) blsrq %rdi, %r10 movb $0x1, %r9b jne 0x1edbc80 testb %r9b, %r9b jne 0x1edb9b4 jmp 0x1edb995 testb $0x8, %r8b jne 0x1edbaa1 movq %r8, %r9 andq $-0x10, %r9 vbroadcastss 0xc(%rsi), %ymm19 vmovaps 0x100(%r9), %ymm20 vmovaps 0x120(%r9), %ymm21 vmovaps 0x140(%r9), %ymm22 vmovaps 0x160(%r9), %ymm23 vfmadd213ps 0x40(%r9), %ymm19, %ymm20 # ymm20 = (ymm19 * ymm20) + mem vfmadd213ps 0x80(%r9), %ymm19, %ymm22 # ymm22 = (ymm19 * ymm22) + mem vmovaps 0x180(%r9), %ymm24 vfmadd213ps 0xc0(%r9), %ymm19, %ymm24 # ymm24 = (ymm19 * ymm24) + mem vfmadd213ps 0x60(%r9), %ymm19, %ymm21 # ymm21 = (ymm19 * ymm21) + mem vfmadd213ps 0xa0(%r9), %ymm19, %ymm23 # ymm23 = (ymm19 * ymm23) + mem vmovaps 0x1a0(%r9), %ymm25 vfmadd213ps 0xe0(%r9), %ymm19, %ymm25 # ymm25 = (ymm19 * ymm25) + mem vmaxps %ymm20, %ymm0, %ymm18 vminps %ymm21, %ymm18, %ymm18 vsubps %ymm0, %ymm18, %ymm18 vmaxps %ymm22, %ymm1, %ymm26 vminps %ymm23, %ymm26, %ymm26 vsubps %ymm1, %ymm26, %ymm26 vmaxps %ymm24, %ymm2, %ymm27 vminps %ymm25, %ymm27, %ymm27 vsubps %ymm2, %ymm27, %ymm27 vmulps %ymm18, %ymm18, %ymm18 vmulps %ymm26, %ymm26, %ymm26 vaddps %ymm26, %ymm18, %ymm18 vmulps %ymm27, %ymm27, %ymm26 vaddps %ymm26, %ymm18, %ymm18 vcmpleps %ymm21, %ymm20, %k0 kmovd %k0, %edi vcmpltps %ymm4, %ymm21, %k0 vcmpnleps %ymm5, %ymm20, %k1 vcmpltps %ymm6, %ymm23, %k2 vcmpnleps %ymm7, %ymm22, %k3 korb %k1, %k3, %k1 vcmpltps %ymm8, %ymm25, %k3 korb %k3, %k2, %k2 vcmpnleps %ymm9, %ymm24, %k3 korb %k0, %k3, %k0 korb %k0, %k1, %k0 korb %k2, %k0, %k0 knotb %k0, %k0 kmovd %k0, %r10d andb %dil, %r10b movzbl %r10b, %edi movl %r8d, %r10d andl $0x7, %r10d cmpl $0x6, %r10d jne 0x1edbaa1 vcmpltps 0x1e0(%r9), %ymm19, %k1 vcmpgeps 0x1c0(%r9), %ymm19, %k0 {%k1} kmovd %k0, %r9d andb %dil, %r9b movzbl %r9b, %edi jmp 0x1edbaa1 vcmpgeps 0x1c0(%rdi), %ymm19, %k1 vcmpltps 0x1e0(%rdi), %ymm19, %k1 {%k1} kandb %k0, %k1, %k0 jmp 0x1edba9d vpshufd $0x55, %ymm21, %ymm22 # ymm22 = ymm21[1,1,1,1,5,5,5,5] vmovdqa64 %ymm19, %ymm23 vpermt2q %ymm20, %ymm22, %ymm23 vmovq %xmm23, %r8 prefetcht0 (%r8) prefetcht0 0x40(%r8) prefetcht0 0x80(%r8) prefetcht0 0xc0(%r8) vpminsd %ymm22, %ymm21, %ymm23 vpmaxsd %ymm22, %ymm21, %ymm22 blsrq %r10, %r8 jne 0x1edbcf3 vpermi2q %ymm20, %ymm19, %ymm23 vmovq %xmm23, %r8 vpermt2q %ymm20, %ymm22, %ymm19 vmovq %xmm19, (%rax) vpermd %ymm18, %ymm22, %ymm19 vmovd %xmm19, 0x8(%rax) addq $0x10, %rax jmp 0x1edbb10 vpshufd $0xaa, %ymm21, %ymm25 # ymm25 = ymm21[2,2,2,2,6,6,6,6] vmovdqa64 %ymm19, %ymm24 vpermt2q %ymm20, %ymm25, %ymm24 vmovq %xmm24, %r10 prefetcht0 (%r10) prefetcht0 0x40(%r10) prefetcht0 0x80(%r10) prefetcht0 0xc0(%r10) vpminsd %ymm25, %ymm23, %ymm24 vpmaxsd %ymm25, %ymm23, %ymm25 vpminsd %ymm25, %ymm22, %ymm23 vpmaxsd %ymm25, %ymm22, %ymm25 blsrq %r8, %r8 jne 0x1edbd92 vpermi2q %ymm20, %ymm19, %ymm24 vmovq %xmm24, %r8 vmovdqa64 %ymm19, %ymm21 vpermt2q %ymm20, %ymm25, %ymm21 vmovq %xmm21, (%rax) vpermd %ymm18, %ymm25, %ymm21 vmovd %xmm21, 0x8(%rax) vpermt2q %ymm20, %ymm23, %ymm19 vmovq %xmm19, 0x10(%rax) vpermd %ymm18, %ymm23, %ymm19 vmovd %xmm19, 0x18(%rax) addq $0x20, %rax jmp 0x1edbb10 vpshufd $0xff, %ymm21, %ymm22 # ymm22 = ymm21[3,3,3,3,7,7,7,7] vmovdqa64 %ymm19, %ymm26 vpermt2q %ymm20, %ymm22, %ymm26 vmovq %xmm26, %r10 prefetcht0 (%r10) prefetcht0 0x40(%r10) prefetcht0 0x80(%r10) prefetcht0 0xc0(%r10) vpminsd %ymm22, %ymm24, %ymm26 vpmaxsd %ymm22, %ymm24, %ymm24 vpminsd %ymm24, %ymm23, %ymm22 vpmaxsd %ymm24, %ymm23, %ymm23 vpminsd %ymm23, %ymm25, %ymm24 vpmaxsd %ymm23, %ymm25, %ymm25 blsrq %r8, %r8 jne 0x1edbe5d vpermi2q %ymm20, %ymm19, %ymm26 vmovq %xmm26, %r8 vmovdqa64 %ymm19, %ymm21 vpermt2q %ymm20, %ymm25, %ymm21 vmovq %xmm21, (%rax) vpermd %ymm18, %ymm25, %ymm21 vmovd %xmm21, 0x8(%rax) vmovdqa64 %ymm19, %ymm21 vpermt2q %ymm20, %ymm24, %ymm21 vmovq %xmm21, 0x10(%rax) vpermd %ymm18, %ymm24, %ymm21 vmovd %xmm21, 0x18(%rax) vpermt2q %ymm20, %ymm22, %ymm19 vmovq %xmm19, 0x20(%rax) vpermd %ymm18, %ymm22, %ymm19 vmovd %xmm19, 0x28(%rax) addq $0x30, %rax jmp 0x1edbb10 valignd $0x3, %ymm21, %ymm21, %ymm23 # ymm23 = ymm21[3,4,5,6,7,0,1,2] vmovdqa64 %ymm13, %ymm21 vpermt2d %ymm26, %ymm14, %ymm21 vpermt2d %ymm22, %ymm15, %ymm21 vpermt2d %ymm24, %ymm15, %ymm21 vpermt2d %ymm25, %ymm16, %ymm21 movq %r8, %r10 vmovdqa64 %ymm21, %ymm22 vpermd %ymm23, %ymm17, %ymm21 valignd $0x1, %ymm23, %ymm23, %ymm23 # ymm23 = ymm23[1,2,3,4,5,6,7,0] vmovdqa64 %ymm19, %ymm24 vpermt2q %ymm20, %ymm23, %ymm24 vmovq %xmm24, %r11 prefetcht0 (%r11) prefetcht0 0x40(%r11) prefetcht0 0x80(%r11) prefetcht0 0xc0(%r11) blsrq %r10, %r10 vpcmpnltd %ymm22, %ymm21, %k0 vpmaxsd %ymm22, %ymm21, %ymm21 kshiftlb $0x1, %k0, %k1 valignd $0x7, %ymm22, %ymm22, %ymm21 {%k1} # ymm21 {%k1} = ymm22[7,0,1,2,3,4,5,6] jne 0x1edbe85 popcntq %r8, %r8 addq $0x3, %r8 vmovdqa64 %ymm21, %ymm22 vpermi2q %ymm20, %ymm19, %ymm22 vmovq %xmm22, (%rax) vpermd %ymm18, %ymm21, %ymm22 vmovd %xmm22, 0x8(%rax) valignd $0x1, %ymm21, %ymm21, %ymm22 # ymm22 = ymm21[1,2,3,4,5,6,7,0] addq $0x10, %rax vmovdqa64 %ymm22, %ymm21 decq %r8 jne 0x1edbef3 vpermt2q %ymm20, %ymm22, %ymm19 vmovq %xmm19, %r8 jmp 0x1edbb10 nop
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
embree::avx512::InstanceIntersector1MB::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstancePrimitive const&)
bool InstanceIntersector1MB::pointQuery(PointQuery* query, PointQueryContext* context, const InstancePrimitive& prim) { const Instance* instance = prim.instance; const AffineSpace3fa local2world = instance->getLocal2World(query->time); const AffineSpace3fa world2local = instance->getWorld2Local(query->time); float similarityScale = 0.f; const bool similtude = context->query_type == POINT_QUERY_TYPE_SPHERE && similarityTransform(world2local, &similarityScale); if (likely(instance_id_stack::push(context->userContext, prim.instID_, 0, world2local, local2world))) { PointQuery query_inst; query_inst.time = query->time; query_inst.p = xfmPoint(world2local, query->p); query_inst.radius = query->radius * similarityScale; PointQueryContext context_inst( (Scene*)instance->object, context->query_ws, similtude ? POINT_QUERY_TYPE_SPHERE : POINT_QUERY_TYPE_AABB, context->func, context->userContext, similarityScale, context->userPtr); bool changed = instance->object->intersectors.pointQuery(&query_inst, &context_inst); instance_id_stack::pop(context->userContext); return changed; } return false; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x188, %rsp # imm = 0x188 movq %rdx, %r15 movq %rsi, %rbx movq %rdi, %r14 movq (%rdx), %r12 cmpl $0x1, 0x24(%r12) jne 0x1edf9cd movzbl 0x3d(%r12), %ecx shll $0x8, %ecx movq 0x60(%r12), %rax cmpl $0x100, %ecx # imm = 0x100 je 0x1edff34 vmovaps (%rax), %xmm18 vmovaps 0x10(%rax), %xmm19 vmovaps 0x20(%rax), %xmm20 vmovaps 0x30(%rax), %xmm21 jmp 0x1edfa96 vmovss 0xc(%r14), %xmm0 vmovss 0x28(%r12), %xmm1 vmovss 0x2c(%r12), %xmm2 vmovss 0x30(%r12), %xmm3 vsubss %xmm2, %xmm0, %xmm0 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm2, %xmm0, %xmm0 vmulss %xmm0, %xmm1, %xmm0 vroundss $0x9, %xmm0, %xmm0, %xmm2 vaddss 0x10fc6(%rip), %xmm1, %xmm1 # 0x1ef09cc vminss %xmm1, %xmm2, %xmm2 vxorps %xmm1, %xmm1, %xmm1 vmaxss %xmm2, %xmm1, %xmm2 vsubss %xmm2, %xmm0, %xmm13 vcvttss2si %xmm2, %r13d movzbl 0x3d(%r12), %eax shll $0x8, %eax movq 0x60(%r12), %rcx movq %r13, %rbp shlq $0x6, %rbp addq %rcx, %rbp incl %r13d shlq $0x6, %r13 addq %rcx, %r13 cmpl $0x100, %eax # imm = 0x100 je 0x1ee00b0 vmovss 0xccc5(%rip), %xmm0 # 0x1eec714 vsubss %xmm13, %xmm0, %xmm0 vbroadcastss %xmm13, %xmm1 vmulps (%r13), %xmm1, %xmm18 vbroadcastss %xmm0, %xmm0 vfmadd231ps (%rbp), %xmm0, %xmm18 # xmm18 = (xmm0 * mem) + xmm18 vmulps 0x10(%r13), %xmm1, %xmm19 vfmadd231ps 0x10(%rbp), %xmm0, %xmm19 # xmm19 = (xmm0 * mem) + xmm19 vmulps 0x20(%r13), %xmm1, %xmm20 vfmadd231ps 0x20(%rbp), %xmm0, %xmm20 # xmm20 = (xmm0 * mem) + xmm20 vmulps 0x30(%r13), %xmm1, %xmm21 vfmadd231ps 0x30(%rbp), %xmm0, %xmm21 # xmm21 = (xmm0 * mem) + xmm21 cmpl $0x1, 0x24(%r12) jne 0x1edfac8 vmovaps 0x70(%r12), %xmm6 vmovaps 0x80(%r12), %xmm8 vmovaps 0x90(%r12), %xmm7 vmovaps 0xa0(%r12), %xmm9 jmp 0x1edfc1d vmovss 0xc(%r14), %xmm0 vmovss 0x28(%r12), %xmm1 vmovss 0x2c(%r12), %xmm2 vmovss 0x30(%r12), %xmm3 vsubss %xmm2, %xmm0, %xmm0 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm2, %xmm0, %xmm0 vmulss %xmm0, %xmm1, %xmm0 vroundss $0x9, %xmm0, %xmm0, %xmm2 vaddss 0x10ecb(%rip), %xmm1, %xmm1 # 0x1ef09cc vminss %xmm1, %xmm2, %xmm2 vxorps %xmm1, %xmm1, %xmm1 vmaxss %xmm2, %xmm1, %xmm2 vsubss %xmm2, %xmm0, %xmm14 vcvttss2si %xmm2, %r13d movzbl 0x3d(%r12), %eax shll $0x8, %eax movq 0x60(%r12), %rcx movq %r13, %rbp shlq $0x6, %rbp addq %rcx, %rbp incl %r13d shlq $0x6, %r13 addq %rcx, %r13 cmpl $0x100, %eax # imm = 0x100 je 0x1ee01a8 vmovss 0xcbca(%rip), %xmm0 # 0x1eec714 vsubss %xmm14, %xmm0, %xmm1 vbroadcastss %xmm14, %xmm2 vmulps (%r13), %xmm2, %xmm0 vbroadcastss %xmm1, %xmm4 vfmadd231ps (%rbp), %xmm4, %xmm0 # xmm0 = (xmm4 * mem) + xmm0 vmulps 0x10(%r13), %xmm2, %xmm1 vfmadd231ps 0x10(%rbp), %xmm4, %xmm1 # xmm1 = (xmm4 * mem) + xmm1 vmulps 0x20(%r13), %xmm2, %xmm3 vfmadd231ps 0x20(%rbp), %xmm4, %xmm3 # xmm3 = (xmm4 * mem) + xmm3 vmulps 0x30(%r13), %xmm2, %xmm2 vfmadd231ps 0x30(%rbp), %xmm4, %xmm2 # xmm2 = (xmm4 * mem) + xmm2 vshufps $0xc9, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,2,0,3] vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3] vmulps %xmm5, %xmm3, %xmm6 vfmsub231ps %xmm4, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm4) - xmm6 vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3] vshufps $0xc9, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[1,2,0,3] vmulps %xmm4, %xmm0, %xmm4 vfmsub231ps %xmm3, %xmm8, %xmm4 # xmm4 = (xmm8 * xmm3) - xmm4 vmulps %xmm1, %xmm8, %xmm1 vfmsub231ps %xmm5, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm5) - xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,2,0,3] vunpcklps %xmm3, %xmm7, %xmm3 # xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1] vunpcklps %xmm1, %xmm6, %xmm1 # xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1] vinsertps $0x4a, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1],zero,xmm4[2],zero vxorps %xmm6, %xmm6, %xmm6 vmovss %xmm4, %xmm6, %xmm4 # xmm4 = xmm4[0],xmm6[1,2,3] vunpcklps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] vunpcklps %xmm5, %xmm3, %xmm4 # xmm4 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] vunpckhps %xmm5, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3] vdpps $0x7f, %xmm7, %xmm0, %xmm0 vbroadcastss %xmm0, %xmm0 vdivps %xmm0, %xmm4, %xmm6 vdivps %xmm0, %xmm3, %xmm8 vdivps %xmm0, %xmm1, %xmm7 vbroadcastss %xmm2, %xmm0 vshufps $0x55, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[1,1,1,1] vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2] vmulps %xmm7, %xmm2, %xmm2 vfmadd231ps %xmm1, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm1) + xmm2 vfmadd231ps %xmm0, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm0) + xmm2 vxorps 0x412a3(%rip){1to4}, %xmm2, %xmm9 # 0x1f20ec0 cmpl $0x1, 0x18(%rbx) jne 0x1edfc88 vdpps $0x7f, %xmm8, %xmm6, %xmm0 vandps 0x41291(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec4 vxorps %xmm0, %xmm0, %xmm0 vucomiss 0xcad1(%rip), %xmm1 # 0x1eec710 ja 0x1edfc79 vdpps $0x7f, %xmm7, %xmm6, %xmm1 vandps 0x41273(%rip){1to4}, %xmm1, %xmm1 # 0x1f20ec4 vucomiss 0xcab7(%rip), %xmm1 # 0x1eec710 ja 0x1edfc79 vdpps $0x7f, %xmm7, %xmm8, %xmm1 vandps 0x41259(%rip){1to4}, %xmm1, %xmm1 # 0x1f20ec4 vucomiss 0xca9d(%rip), %xmm1 # 0x1eec710 jbe 0x1edfe3b xorl %ebp, %ebp movzbl %bpl, %ecx movl $0x2, %eax subl %ecx, %eax jmp 0x1edfc91 movl $0x2, %eax vxorps %xmm0, %xmm0, %xmm0 movq 0x28(%rbx), %rcx movl 0x8(%r15), %edx movl 0x88(%rcx), %esi movl %edx, 0x80(%rcx,%rsi,4) movl $0x0, 0x84(%rcx,%rsi,4) shlq $0x6, %rsi vmovups %xmm6, (%rcx,%rsi) vmovups %xmm8, 0x10(%rcx,%rsi) vmovups %xmm7, 0x20(%rcx,%rsi) vmovups %xmm9, 0x30(%rcx,%rsi) vmovups %xmm18, 0x40(%rcx,%rsi) vmovups %xmm19, 0x50(%rcx,%rsi) vmovups %xmm20, 0x60(%rcx,%rsi) vmovups %xmm21, 0x70(%rcx,%rsi) incl 0x88(%rcx) vmovss 0xc(%r14), %xmm1 vmovss %xmm1, 0xdc(%rsp) vfmadd132ps 0x8(%r14){1to4}, %xmm9, %xmm7 # xmm7 = (xmm7 * mem) + xmm9 vfmadd231ps 0x4(%r14){1to4}, %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7 vfmadd231ps (%r14){1to4}, %xmm6, %xmm7 # xmm7 = (xmm6 * mem) + xmm7 vmovlps %xmm7, 0xd0(%rsp) vextractps $0x2, %xmm7, 0xd8(%rsp) vmulss 0x10(%r14), %xmm0, %xmm1 vmovss %xmm1, 0xe0(%rsp) movq 0x58(%r12), %rcx movq 0x10(%rbx), %rdx movq 0x38(%rbx), %rsi movq %rcx, 0x110(%rsp) movq $0x0, 0x118(%rsp) movq %rdx, 0x120(%rsp) movl %eax, 0x128(%rsp) vmovdqa 0x20(%rbx), %xmm2 vmovdqa %xmm2, 0x130(%rsp) vmovss %xmm0, 0x140(%rsp) movq %rsi, 0x148(%rsp) movq $-0x1, 0x150(%rsp) vbroadcastss 0x10(%rdx), %xmm1 vmovaps %xmm1, 0x160(%rsp) vmovss 0x10(%rdx), %xmm1 cmpl $0x2, %eax jne 0x1edfdd1 vucomiss 0xbc6c(%rip), %xmm1 # 0x1eeba20 jae 0x1edfdca vpextrq $0x1, %xmm2, %rsi movl 0x88(%rsi), %edi testl %edi, %edi jne 0x1ee0c91 vbroadcastss %xmm1, %xmm0 jmp 0x1edfdda vmulss %xmm1, %xmm0, %xmm0 vbroadcastss %xmm0, %xmm0 leaq 0x110(%rsp), %rdx vmovaps %xmm0, 0x50(%rdx) movq 0x58(%r12), %rax leaq 0x58(%rax), %rdi leaq 0xd0(%rsp), %rsi callq *0x88(%rax) movq 0x28(%rbx), %rcx movl 0x88(%rcx), %edx decl %edx movl %edx, 0x88(%rcx) movl $0xffffffff, %esi # imm = 0xFFFFFFFF movl %esi, 0x80(%rcx,%rdx,4) movl 0x88(%rcx), %edx movl %esi, 0x84(%rcx,%rdx,4) addq $0x188, %rsp # imm = 0x188 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq vdpps $0x7f, %xmm6, %xmm6, %xmm1 vdpps $0x7f, %xmm8, %xmm8, %xmm2 vsubss %xmm2, %xmm1, %xmm3 vbroadcastss 0x41070(%rip), %xmm4 # 0x1f20ec4 vandps %xmm4, %xmm3, %xmm3 xorl %ebp, %ebp vucomiss 0xc8ae(%rip), %xmm3 # 0x1eec710 ja 0x1edfc7b vdpps $0x7f, %xmm7, %xmm7, %xmm3 vsubss %xmm3, %xmm1, %xmm5 vandps %xmm4, %xmm5, %xmm4 vucomiss 0xc892(%rip), %xmm4 # 0x1eec710 ja 0x1edfc7b vsubss %xmm3, %xmm2, %xmm2 vandps 0x41032(%rip){1to4}, %xmm2, %xmm2 # 0x1f20ec4 vucomiss 0xc876(%rip), %xmm2 # 0x1eec710 setbe %bpl ja 0x1edfc7b movb $0x1, %bpl vucomiss %xmm0, %xmm1 jb 0x1edfeb6 vsqrtss %xmm1, %xmm1, %xmm0 jmp 0x1edfc7b vmovaps %xmm1, %xmm0 vmovaps %xmm18, 0x80(%rsp) vmovaps %xmm19, 0x70(%rsp) vmovaps %xmm20, 0x60(%rsp) vmovaps %xmm21, 0x50(%rsp) vmovaps %xmm6, 0x40(%rsp) vmovaps %xmm7, 0x30(%rsp) vmovaps %xmm8, 0x20(%rsp) vmovaps %xmm9, 0x10(%rsp) callq 0x6aa20 vmovaps 0x10(%rsp), %xmm9 vmovaps 0x20(%rsp), %xmm8 vmovaps 0x30(%rsp), %xmm7 vmovaps 0x40(%rsp), %xmm6 vmovaps 0x50(%rsp), %xmm21 vmovaps 0x60(%rsp), %xmm20 vmovaps 0x70(%rsp), %xmm19 vmovaps 0x80(%rsp), %xmm18 jmp 0x1edfc7b vmovss 0x3c(%rax), %xmm1 vmovss 0xc(%rax), %xmm2 vmovss 0x1c(%rax), %xmm3 vmovss 0x2c(%rax), %xmm4 vmovaps (%rax), %xmm0 vxorps %xmm18, %xmm18, %xmm18 vshufps $0xe9, %xmm18, %xmm0, %xmm0 # xmm0 = xmm0[1,2],xmm18[2,3] vblendps $0x4, 0x10(%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[2],xmm0[3] vmulss %xmm2, %xmm2, %xmm5 vmovaps %xmm1, %xmm6 vfmadd213ss %xmm5, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm6) + xmm5 vfnmadd231ss %xmm3, %xmm3, %xmm6 # xmm6 = -(xmm3 * xmm3) + xmm6 vfnmadd231ss %xmm4, %xmm4, %xmm6 # xmm6 = -(xmm4 * xmm4) + xmm6 vmulss %xmm4, %xmm1, %xmm7 vmovaps %xmm3, %xmm8 vfmadd213ss %xmm7, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm7 vaddss %xmm8, %xmm8, %xmm8 vmulss %xmm3, %xmm1, %xmm9 vmovaps %xmm4, %xmm10 vfmsub213ss %xmm9, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) - xmm9 vaddss %xmm10, %xmm10, %xmm10 vfmsub231ss %xmm3, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm3) - xmm7 vaddss %xmm7, %xmm7, %xmm7 vfmsub231ss %xmm1, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm1) - xmm5 vmovaps %xmm3, %xmm11 vfmadd213ss %xmm5, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm11) + xmm5 vfnmadd231ss %xmm4, %xmm4, %xmm11 # xmm11 = -(xmm4 * xmm4) + xmm11 vmulss %xmm2, %xmm1, %xmm1 vmovaps %xmm4, %xmm12 vfmadd213ss %xmm1, %xmm3, %xmm12 # xmm12 = (xmm3 * xmm12) + xmm1 vaddss %xmm12, %xmm12, %xmm12 vfmadd213ss %xmm9, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + xmm9 vaddss %xmm2, %xmm2, %xmm2 vfmsub231ss %xmm4, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm4) - xmm1 vaddss %xmm1, %xmm1, %xmm1 vfnmadd231ss %xmm3, %xmm3, %xmm5 # xmm5 = -(xmm3 * xmm3) + xmm5 vfmadd231ss %xmm4, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm4) + xmm5 vbroadcastss %xmm6, %xmm3 vbroadcastss %xmm8, %xmm4 vbroadcastss %xmm10, %xmm6 vmovaps 0xc704(%rip), %xmm8 # 0x1eec700 vmulps %xmm6, %xmm8, %xmm6 vmovsd 0xc6e8(%rip), %xmm9 # 0x1eec6f0 vfmadd231ps %xmm4, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm4) + xmm6 vmovss 0xc6ff(%rip), %xmm4 # 0x1eec714 vfmadd231ps %xmm3, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm3) + xmm6 vbroadcastss %xmm7, %xmm3 vbroadcastss %xmm11, %xmm7 vbroadcastss %xmm12, %xmm10 vmulps %xmm8, %xmm10, %xmm10 vfmadd231ps %xmm7, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm7) + xmm10 vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10 vbroadcastss %xmm2, %xmm2 vbroadcastss %xmm1, %xmm1 vbroadcastss %xmm5, %xmm3 vmulps %xmm3, %xmm8, %xmm3 vfmadd231ps %xmm1, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm1) + xmm3 vfmadd231ps %xmm2, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm2) + xmm3 vaddps %xmm18, %xmm0, %xmm0 vmulps %xmm18, %xmm3, %xmm19 vfmadd213ps %xmm19, %xmm10, %xmm18 # xmm18 = (xmm10 * xmm18) + xmm19 vfmadd231ps (%rax){1to4}, %xmm6, %xmm18 # xmm18 = (xmm6 * mem) + xmm18 vfmadd231ps 0x14(%rax){1to4}, %xmm10, %xmm19 # xmm19 = (xmm10 * mem) + xmm19 vfmadd231ps 0x10(%rax){1to4}, %xmm6, %xmm19 # xmm19 = (xmm6 * mem) + xmm19 vmulps 0x28(%rax){1to4}, %xmm3, %xmm20 vfmadd231ps 0x24(%rax){1to4}, %xmm10, %xmm20 # xmm20 = (xmm10 * mem) + xmm20 vfmadd231ps 0x20(%rax){1to4}, %xmm6, %xmm20 # xmm20 = (xmm6 * mem) + xmm20 vmulps 0x38(%rax){1to4}, %xmm3, %xmm1 vfmadd231ps 0x34(%rax){1to4}, %xmm10, %xmm1 # xmm1 = (xmm10 * mem) + xmm1 vfmadd231ps 0x30(%rax){1to4}, %xmm6, %xmm1 # xmm1 = (xmm6 * mem) + xmm1 vaddps %xmm1, %xmm0, %xmm21 jmp 0x1edfa96 vmovss 0x3c(%rbp), %xmm15 vmovss 0xc(%rbp), %xmm14 vmovss 0x1c(%rbp), %xmm12 vmovss 0x2c(%rbp), %xmm11 vmovss 0x3c(%r13), %xmm20 vmovss 0xc(%r13), %xmm19 vmovss 0x1c(%r13), %xmm17 vmovss 0x2c(%r13), %xmm16 vmulss %xmm19, %xmm14, %xmm0 vfmadd231ss %xmm20, %xmm15, %xmm0 # xmm0 = (xmm15 * xmm20) + xmm0 vfmadd231ss %xmm17, %xmm12, %xmm0 # xmm0 = (xmm12 * xmm17) + xmm0 vfmadd231ss %xmm16, %xmm11, %xmm0 # xmm0 = (xmm11 * xmm16) + xmm0 vbroadcastss 0x40dbf(%rip), %xmm7 # 0x1f20ec0 vxorps %xmm7, %xmm0, %xmm2 vucomiss %xmm0, %xmm2 seta %al vxorps %xmm7, %xmm20, %xmm3 vxorps %xmm7, %xmm19, %xmm4 vxorps %xmm7, %xmm17, %xmm5 vxorps %xmm7, %xmm16, %xmm6 kmovd %eax, %k1 vmovss %xmm3, %xmm20, %xmm20 {%k1} vmovss %xmm4, %xmm19, %xmm19 {%k1} vmovss %xmm5, %xmm17, %xmm17 {%k1} vmovss %xmm6, %xmm16, %xmm16 {%k1} vmaxss %xmm0, %xmm2, %xmm21 vandps 0x40d74(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vmovss 0x10828(%rip), %xmm2 # 0x1ef0980 vfmadd213ss 0x10823(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vfmadd213ss 0x1081e(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vfmadd213ss 0x10819(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vfmadd213ss 0x10814(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vfmadd213ss 0x1080f(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vmovss 0xc585(%rip), %xmm22 # 0x1eec714 vsubss %xmm4, %xmm22, %xmm0 vucomiss %xmm1, %xmm0 jb 0x1ee02a2 vsqrtss %xmm0, %xmm0, %xmm0 jmp 0x1ee0370 vmovss 0x3c(%rbp), %xmm16 vmovss 0xc(%rbp), %xmm15 vmovss 0x1c(%rbp), %xmm13 vmovss 0x2c(%rbp), %xmm12 vmovss 0x3c(%r13), %xmm24 vmovss 0xc(%r13), %xmm23 vmovss 0x1c(%r13), %xmm22 vmovss 0x2c(%r13), %xmm17 vmulss %xmm23, %xmm15, %xmm0 vfmadd231ss %xmm24, %xmm16, %xmm0 # xmm0 = (xmm16 * xmm24) + xmm0 vfmadd231ss %xmm22, %xmm13, %xmm0 # xmm0 = (xmm13 * xmm22) + xmm0 vfmadd231ss %xmm17, %xmm12, %xmm0 # xmm0 = (xmm12 * xmm17) + xmm0 vbroadcastss 0x40cc5(%rip), %xmm7 # 0x1f20ec0 vxorps %xmm7, %xmm0, %xmm2 vucomiss %xmm0, %xmm2 seta %al vxorps %xmm7, %xmm24, %xmm3 vxorps %xmm7, %xmm23, %xmm4 vxorps %xmm7, %xmm22, %xmm5 vxorps %xmm7, %xmm17, %xmm6 kmovd %eax, %k1 vmovss %xmm3, %xmm24, %xmm24 {%k1} vmovss %xmm4, %xmm23, %xmm23 {%k1} vmovss %xmm5, %xmm22, %xmm22 {%k1} vmovss %xmm6, %xmm17, %xmm17 {%k1} vmaxss %xmm0, %xmm2, %xmm25 vandps 0x40c7a(%rip){1to4}, %xmm0, %xmm4 # 0x1f20ec4 vmovss 0x1072e(%rip), %xmm2 # 0x1ef0980 vfmadd213ss 0x10729(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vfmadd213ss 0x10724(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vfmadd213ss 0x1071f(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vfmadd213ss 0x1071a(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vfmadd213ss 0x10715(%rip), %xmm4, %xmm2 # xmm2 = (xmm4 * xmm2) + mem vmovss 0xc48b(%rip), %xmm26 # 0x1eec714 vsubss %xmm4, %xmm26, %xmm0 vucomiss %xmm1, %xmm0 jb 0x1ee0782 vsqrtss %xmm0, %xmm0, %xmm0 jmp 0x1ee0888 vmovaps %xmm13, 0x80(%rsp) vmovss %xmm11, 0x70(%rsp) vmovss %xmm12, 0x60(%rsp) vmovss %xmm14, 0x50(%rsp) vmovss %xmm15, 0x40(%rsp) vmovaps %xmm16, 0x30(%rsp) vmovaps %xmm17, 0x20(%rsp) vmovaps %xmm19, 0x10(%rsp) vmovaps %xmm20, 0xc0(%rsp) vmovss %xmm21, 0xb0(%rsp) vmovaps %xmm4, 0xa0(%rsp) vmovss %xmm2, 0x90(%rsp) callq 0x6aa20 vmovss 0x90(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm4 vbroadcastss 0x40ba3(%rip), %xmm7 # 0x1f20ec0 vmovss 0xc3ed(%rip), %xmm22 # 0x1eec714 vmovss 0xb0(%rsp), %xmm21 vmovaps 0xc0(%rsp), %xmm20 vmovaps 0x10(%rsp), %xmm19 vmovaps 0x20(%rsp), %xmm17 vmovaps 0x30(%rsp), %xmm16 vmovss 0x40(%rsp), %xmm15 vmovss 0x50(%rsp), %xmm14 vmovss 0x60(%rsp), %xmm12 vmovss 0x70(%rsp), %xmm11 vmovaps 0x80(%rsp), %xmm13 vmovss 0x10620(%rip), %xmm1 # 0x1ef0998 vmulss %xmm2, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm0 vxorps %xmm2, %xmm2, %xmm2 vmaxss %xmm0, %xmm2, %xmm0 vxorps %xmm7, %xmm0, %xmm3 vcmpltss %xmm2, %xmm21, %k1 vmovss %xmm3, %xmm0, %xmm0 {%k1} vsubss %xmm0, %xmm1, %xmm0 vcmpltss %xmm4, %xmm22, %k1 vmovss 0x105ee(%rip), %xmm0 {%k1} # 0x1ef099c vmulss %xmm0, %xmm13, %xmm0 vmulss 0x105e6(%rip), %xmm0, %xmm2 # 0x1ef09a0 vroundss $0x9, %xmm2, %xmm2, %xmm2 vcvttss2si %xmm2, %eax vfnmadd213ss %xmm0, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm0 kmovd %eax, %k1 andl $0x3, %eax cmpl $0x2, %eax setae %cl decl %eax cmpl $0x2, %eax setb %al vmulss %xmm1, %xmm1, %xmm0 vmovss 0x105ba(%rip), %xmm2 # 0x1ef09a4 vfmadd213ss 0x105b5(%rip), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vmovss 0x105b1(%rip), %xmm5 # 0x1ef09ac vfmadd213ss 0x105ac(%rip), %xmm0, %xmm5 # xmm5 = (xmm0 * xmm5) + mem vfmadd213ss 0x105a7(%rip), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vfmadd213ss 0x105a2(%rip), %xmm0, %xmm5 # xmm5 = (xmm0 * xmm5) + mem vfmadd213ss 0x1059d(%rip), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vfmadd213ss 0x10598(%rip), %xmm0, %xmm5 # xmm5 = (xmm0 * xmm5) + mem vfmadd213ss 0x10593(%rip), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vmovss 0xc2e3(%rip), %xmm4 # 0x1eec71c vfmadd213ss %xmm4, %xmm0, %xmm5 # xmm5 = (xmm0 * xmm5) + xmm4 vfmadd213ss %xmm22, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm22 vfmadd213ss %xmm22, %xmm0, %xmm5 # xmm5 = (xmm0 * xmm5) + xmm22 vmulss %xmm2, %xmm1, %xmm0 vmovaps %xmm0, %xmm1 vmovss %xmm5, %xmm1, %xmm1 {%k1} vmovss %xmm0, %xmm5, %xmm5 {%k1} vxorps %xmm7, %xmm1, %xmm0 kmovd %ecx, %k1 vmovss %xmm0, %xmm1, %xmm1 {%k1} vxorps %xmm7, %xmm5, %xmm0 kmovd %eax, %k1 vmovss %xmm0, %xmm5, %xmm5 {%k1} vmovaps %xmm15, %xmm0 vfmsub213ss %xmm20, %xmm21, %xmm0 # xmm0 = (xmm21 * xmm0) - xmm20 vmovaps %xmm14, %xmm2 vfmsub213ss %xmm19, %xmm21, %xmm2 # xmm2 = (xmm21 * xmm2) - xmm19 vmovaps %xmm12, %xmm3 vfmsub213ss %xmm17, %xmm21, %xmm3 # xmm3 = (xmm21 * xmm3) - xmm17 vmovaps %xmm11, %xmm6 vfmsub213ss %xmm16, %xmm21, %xmm6 # xmm6 = (xmm21 * xmm6) - xmm16 vmulss %xmm2, %xmm2, %xmm7 vfmadd231ss %xmm0, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm0) + xmm7 vfmadd231ss %xmm3, %xmm3, %xmm7 # xmm7 = (xmm3 * xmm3) + xmm7 vfmadd231ss %xmm6, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm6) + xmm7 vxorps %xmm18, %xmm18, %xmm18 vmovss %xmm7, %xmm18, %xmm8 # xmm8 = xmm7[0],xmm18[1,2,3] vrsqrt14ss %xmm8, %xmm18, %xmm8 vmovss 0xc249(%rip), %xmm9 # 0x1eec718 vmulss %xmm9, %xmm8, %xmm10 vmulss %xmm4, %xmm7, %xmm7 vmulss %xmm7, %xmm8, %xmm7 vmulss %xmm8, %xmm8, %xmm8 vmulss %xmm7, %xmm8, %xmm7 vaddss %xmm7, %xmm10, %xmm7 vmulss %xmm7, %xmm0, %xmm0 vmulss %xmm7, %xmm2, %xmm2 vmulss %xmm7, %xmm3, %xmm8 vmulss %xmm7, %xmm6, %xmm6 vmulss %xmm1, %xmm0, %xmm3 vfmsub231ss %xmm15, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm15) - xmm3 vmulss %xmm1, %xmm2, %xmm2 vfmsub231ss %xmm14, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm14) - xmm2 vmulss %xmm1, %xmm8, %xmm0 vfmsub231ss %xmm12, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm12) - xmm0 vmulss %xmm1, %xmm6, %xmm1 vfmsub231ss %xmm5, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm5) - xmm1 vsubss %xmm13, %xmm22, %xmm5 vmulss %xmm20, %xmm13, %xmm6 vfmadd231ss %xmm15, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm15) + xmm6 vmulss %xmm19, %xmm13, %xmm7 vfmadd231ss %xmm14, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm14) + xmm7 vmulss %xmm17, %xmm13, %xmm8 vfmadd231ss %xmm12, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm12) + xmm8 vmulss %xmm16, %xmm13, %xmm10 vfmadd231ss %xmm11, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm11) + xmm10 vmulss %xmm7, %xmm7, %xmm11 vfmadd231ss %xmm6, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm6) + xmm11 vfmadd231ss %xmm8, %xmm8, %xmm11 # xmm11 = (xmm8 * xmm8) + xmm11 vfmadd231ss %xmm10, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm10) + xmm11 vmovss %xmm11, %xmm18, %xmm12 # xmm12 = xmm11[0],xmm18[1,2,3] vrsqrt14ss %xmm12, %xmm18, %xmm12 vmulss %xmm9, %xmm12, %xmm9 vmulss %xmm4, %xmm11, %xmm4 vmulss %xmm4, %xmm12, %xmm4 vmulss %xmm12, %xmm12, %xmm11 vmulss %xmm4, %xmm11, %xmm4 vaddss %xmm4, %xmm9, %xmm4 vmulss %xmm4, %xmm6, %xmm6 vmulss %xmm4, %xmm7, %xmm7 vmulss %xmm4, %xmm8, %xmm8 vmulss %xmm4, %xmm10, %xmm4 vucomiss 0x10426(%rip), %xmm21 # 0x1ef09c8 seta %al kmovd %eax, %k1 vmovss %xmm6, %xmm3, %xmm3 {%k1} vmovss %xmm7, %xmm2, %xmm2 {%k1} vmovss %xmm8, %xmm0, %xmm0 {%k1} vmovss %xmm4, %xmm1, %xmm1 {%k1} vbroadcastss %xmm13, %xmm4 vmulps (%r13), %xmm4, %xmm7 vbroadcastss %xmm5, %xmm8 vfmadd231ps (%rbp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7 vmulps 0x10(%r13), %xmm4, %xmm6 vfmadd231ps 0x10(%rbp), %xmm8, %xmm6 # xmm6 = (xmm8 * mem) + xmm6 vmulps 0x20(%r13), %xmm4, %xmm5 vfmadd231ps 0x20(%rbp), %xmm8, %xmm5 # xmm5 = (xmm8 * mem) + xmm5 vmulps 0x30(%r13), %xmm4, %xmm4 vfmadd231ps 0x30(%rbp), %xmm8, %xmm4 # xmm4 = (xmm8 * mem) + xmm4 vshufps $0xe9, %xmm18, %xmm7, %xmm8 # xmm8 = xmm7[1,2],xmm18[2,3] vblendps $0x4, %xmm6, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm6[2],xmm8[3] vmulss %xmm2, %xmm2, %xmm9 vmovaps %xmm3, %xmm10 vfmadd213ss %xmm9, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm10) + xmm9 vfnmadd231ss %xmm0, %xmm0, %xmm10 # xmm10 = -(xmm0 * xmm0) + xmm10 vfnmadd231ss %xmm1, %xmm1, %xmm10 # xmm10 = -(xmm1 * xmm1) + xmm10 vmulss %xmm1, %xmm3, %xmm11 vmovaps %xmm0, %xmm12 vfmadd213ss %xmm11, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm11 vaddss %xmm12, %xmm12, %xmm12 vmulss %xmm0, %xmm3, %xmm13 vmovaps %xmm1, %xmm14 vfmsub213ss %xmm13, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) - xmm13 vaddss %xmm14, %xmm14, %xmm14 vfmsub231ss %xmm0, %xmm2, %xmm11 # xmm11 = (xmm2 * xmm0) - xmm11 vaddss %xmm11, %xmm11, %xmm11 vfmsub231ss %xmm3, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm3) - xmm9 vmovaps %xmm0, %xmm15 vfmadd213ss %xmm9, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm15) + xmm9 vfnmadd231ss %xmm1, %xmm1, %xmm15 # xmm15 = -(xmm1 * xmm1) + xmm15 vmulss %xmm2, %xmm3, %xmm3 vmovaps %xmm1, %xmm16 vfmadd213ss %xmm3, %xmm0, %xmm16 # xmm16 = (xmm0 * xmm16) + xmm3 vaddss %xmm16, %xmm16, %xmm16 vfmadd213ss %xmm13, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm13 vaddss %xmm2, %xmm2, %xmm2 vfmsub231ss %xmm1, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm1) - xmm3 vaddss %xmm3, %xmm3, %xmm3 vfnmadd231ss %xmm0, %xmm0, %xmm9 # xmm9 = -(xmm0 * xmm0) + xmm9 vfmadd231ss %xmm1, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm1) + xmm9 vbroadcastss %xmm10, %xmm0 vbroadcastss %xmm12, %xmm1 vmovaps 0xc05c(%rip), %xmm10 # 0x1eec700 vbroadcastss %xmm14, %xmm12 vmulps %xmm10, %xmm12, %xmm12 vmovsd 0xc03a(%rip), %xmm13 # 0x1eec6f0 vmovss 0xc056(%rip), %xmm14 # 0x1eec714 vfmadd231ps %xmm1, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm1) + xmm12 vfmadd231ps %xmm0, %xmm14, %xmm12 # xmm12 = (xmm14 * xmm0) + xmm12 vbroadcastss %xmm11, %xmm0 vbroadcastss %xmm15, %xmm1 vbroadcastss %xmm16, %xmm11 vmulps %xmm10, %xmm11, %xmm11 vfmadd231ps %xmm1, %xmm13, %xmm11 # xmm11 = (xmm13 * xmm1) + xmm11 vfmadd231ps %xmm0, %xmm14, %xmm11 # xmm11 = (xmm14 * xmm0) + xmm11 vbroadcastss %xmm2, %xmm0 vbroadcastss %xmm3, %xmm1 vbroadcastss %xmm9, %xmm2 vmulps %xmm2, %xmm10, %xmm2 vfmadd231ps %xmm1, %xmm13, %xmm2 # xmm2 = (xmm13 * xmm1) + xmm2 vfmadd231ps %xmm0, %xmm14, %xmm2 # xmm2 = (xmm14 * xmm0) + xmm2 vaddps %xmm18, %xmm8, %xmm0 vbroadcastss %xmm7, %xmm1 vmulps %xmm18, %xmm2, %xmm3 vfmadd213ps %xmm3, %xmm11, %xmm18 # xmm18 = (xmm11 * xmm18) + xmm3 vfmadd231ps %xmm1, %xmm12, %xmm18 # xmm18 = (xmm12 * xmm1) + xmm18 vbroadcastss %xmm6, %xmm1 vshufps $0x55, %xmm6, %xmm6, %xmm19 # xmm19 = xmm6[1,1,1,1] vfmadd213ps %xmm3, %xmm11, %xmm19 # xmm19 = (xmm11 * xmm19) + xmm3 vfmadd231ps %xmm1, %xmm12, %xmm19 # xmm19 = (xmm12 * xmm1) + xmm19 vbroadcastss %xmm5, %xmm1 vshufps $0x55, %xmm5, %xmm5, %xmm3 # xmm3 = xmm5[1,1,1,1] vshufps $0xaa, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2] vmulps %xmm2, %xmm5, %xmm20 vfmadd231ps %xmm3, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm3) + xmm20 vfmadd231ps %xmm1, %xmm12, %xmm20 # xmm20 = (xmm12 * xmm1) + xmm20 vbroadcastss %xmm4, %xmm1 vshufps $0x55, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[1,1,1,1] vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2] vmulps %xmm2, %xmm4, %xmm2 vfmadd231ps %xmm3, %xmm11, %xmm2 # xmm2 = (xmm11 * xmm3) + xmm2 vfmadd231ps %xmm1, %xmm12, %xmm2 # xmm2 = (xmm12 * xmm1) + xmm2 vaddps %xmm2, %xmm0, %xmm21 jmp 0x1edfa96 vmovaps %xmm18, 0x80(%rsp) vmovaps %xmm19, 0x70(%rsp) vmovaps %xmm20, 0x60(%rsp) vmovaps %xmm21, 0x50(%rsp) vmovaps %xmm14, 0x40(%rsp) vmovss %xmm12, 0x30(%rsp) vmovss %xmm13, 0x20(%rsp) vmovss %xmm15, 0x10(%rsp) vmovss %xmm16, 0xc0(%rsp) vmovaps %xmm17, 0xb0(%rsp) vmovaps %xmm22, 0xa0(%rsp) vmovaps %xmm23, 0x90(%rsp) vmovaps %xmm24, 0x100(%rsp) vmovss %xmm25, 0xc(%rsp) vmovaps %xmm4, 0xf0(%rsp) vmovss %xmm2, 0x8(%rsp) callq 0x6aa20 vmovss 0x8(%rsp), %xmm2 vmovaps 0xf0(%rsp), %xmm4 vbroadcastss 0x406aa(%rip), %xmm7 # 0x1f20ec0 vmovss 0xbef4(%rip), %xmm26 # 0x1eec714 vmovss 0xc(%rsp), %xmm25 vmovaps 0x100(%rsp), %xmm24 vmovaps 0x90(%rsp), %xmm23 vmovaps 0xa0(%rsp), %xmm22 vmovaps 0xb0(%rsp), %xmm17 vmovss 0xc0(%rsp), %xmm16 vmovss 0x10(%rsp), %xmm15 vmovss 0x20(%rsp), %xmm13 vmovss 0x30(%rsp), %xmm12 vmovaps 0x40(%rsp), %xmm14 vmovaps 0x50(%rsp), %xmm21 vmovaps 0x60(%rsp), %xmm20 vmovaps 0x70(%rsp), %xmm19 vmovaps 0x80(%rsp), %xmm18 vmovss 0x10108(%rip), %xmm1 # 0x1ef0998 vmulss %xmm2, %xmm0, %xmm0 vsubss %xmm0, %xmm1, %xmm0 vxorps %xmm2, %xmm2, %xmm2 vmaxss %xmm0, %xmm2, %xmm0 vxorps %xmm7, %xmm0, %xmm3 vcmpltss %xmm2, %xmm25, %k1 vmovss %xmm3, %xmm0, %xmm0 {%k1} vsubss %xmm0, %xmm1, %xmm0 vcmpltss %xmm4, %xmm26, %k1 vmovss 0x100d6(%rip), %xmm0 {%k1} # 0x1ef099c vmulss %xmm0, %xmm14, %xmm0 vmulss 0x100ce(%rip), %xmm0, %xmm2 # 0x1ef09a0 vroundss $0x9, %xmm2, %xmm2, %xmm2 vcvttss2si %xmm2, %eax vfnmadd213ss %xmm0, %xmm2, %xmm1 # xmm1 = -(xmm2 * xmm1) + xmm0 kmovd %eax, %k1 andl $0x3, %eax cmpl $0x2, %eax setae %cl decl %eax cmpl $0x2, %eax setb %al vmulss %xmm1, %xmm1, %xmm0 vmovss 0x100a2(%rip), %xmm2 # 0x1ef09a4 vfmadd213ss 0x1009d(%rip), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vmovss 0x10099(%rip), %xmm6 # 0x1ef09ac vfmadd213ss 0x10094(%rip), %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + mem vfmadd213ss 0x1008f(%rip), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vfmadd213ss 0x1008a(%rip), %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + mem vfmadd213ss 0x10085(%rip), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vfmadd213ss 0x10080(%rip), %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + mem vfmadd213ss 0x1007b(%rip), %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem vmovss 0xbdcb(%rip), %xmm5 # 0x1eec71c vfmadd213ss %xmm5, %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + xmm5 vfmadd213ss %xmm26, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm26 vfmadd213ss %xmm26, %xmm0, %xmm6 # xmm6 = (xmm0 * xmm6) + xmm26 vmulss %xmm2, %xmm1, %xmm0 vmovaps %xmm0, %xmm2 vmovss %xmm6, %xmm2, %xmm2 {%k1} vmovss %xmm0, %xmm6, %xmm6 {%k1} vxorps %xmm7, %xmm2, %xmm0 kmovd %ecx, %k1 vmovss %xmm0, %xmm2, %xmm2 {%k1} vxorps %xmm7, %xmm6, %xmm0 kmovd %eax, %k1 vmovss %xmm0, %xmm6, %xmm6 {%k1} vmovaps %xmm16, %xmm1 vfmsub213ss %xmm24, %xmm25, %xmm1 # xmm1 = (xmm25 * xmm1) - xmm24 vmovaps %xmm15, %xmm3 vfmsub213ss %xmm23, %xmm25, %xmm3 # xmm3 = (xmm25 * xmm3) - xmm23 vmovaps %xmm13, %xmm4 vfmsub213ss %xmm22, %xmm25, %xmm4 # xmm4 = (xmm25 * xmm4) - xmm22 vmovaps %xmm12, %xmm7 vfmsub213ss %xmm17, %xmm25, %xmm7 # xmm7 = (xmm25 * xmm7) - xmm17 vmulss %xmm3, %xmm3, %xmm8 vfmadd231ss %xmm1, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm1) + xmm8 vfmadd231ss %xmm4, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm4) + xmm8 vfmadd231ss %xmm7, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm7) + xmm8 vxorps %xmm0, %xmm0, %xmm0 vmovss %xmm8, %xmm0, %xmm9 # xmm9 = xmm8[0],xmm0[1,2,3] vrsqrt14ss %xmm9, %xmm0, %xmm9 vmovss 0xbd32(%rip), %xmm10 # 0x1eec718 vmulss %xmm10, %xmm9, %xmm11 vmulss %xmm5, %xmm8, %xmm8 vmulss %xmm8, %xmm9, %xmm8 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm8, %xmm9, %xmm8 vaddss %xmm8, %xmm11, %xmm8 vmulss %xmm1, %xmm8, %xmm1 vmulss %xmm3, %xmm8, %xmm3 vmulss %xmm4, %xmm8, %xmm9 vmulss %xmm7, %xmm8, %xmm7 vmulss %xmm2, %xmm1, %xmm4 vfmsub231ss %xmm16, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm16) - xmm4 vmulss %xmm2, %xmm3, %xmm3 vfmsub231ss %xmm15, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm15) - xmm3 vmulss %xmm2, %xmm9, %xmm1 vfmsub231ss %xmm13, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm13) - xmm1 vmulss %xmm2, %xmm7, %xmm2 vfmsub231ss %xmm6, %xmm12, %xmm2 # xmm2 = (xmm12 * xmm6) - xmm2 vsubss %xmm14, %xmm26, %xmm6 vmulss %xmm24, %xmm14, %xmm7 vfmadd231ss %xmm16, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm16) + xmm7 vmulss %xmm23, %xmm14, %xmm8 vfmadd231ss %xmm15, %xmm6, %xmm8 # xmm8 = (xmm6 * xmm15) + xmm8 vmulss %xmm22, %xmm14, %xmm9 vfmadd231ss %xmm13, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm13) + xmm9 vmulss %xmm17, %xmm14, %xmm11 vfmadd231ss %xmm12, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm12) + xmm11 vmulss %xmm8, %xmm8, %xmm12 vfmadd231ss %xmm7, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm7) + xmm12 vfmadd231ss %xmm9, %xmm9, %xmm12 # xmm12 = (xmm9 * xmm9) + xmm12 vfmadd231ss %xmm11, %xmm11, %xmm12 # xmm12 = (xmm11 * xmm11) + xmm12 vmovss %xmm12, %xmm0, %xmm13 # xmm13 = xmm12[0],xmm0[1,2,3] vrsqrt14ss %xmm13, %xmm0, %xmm13 vmulss %xmm10, %xmm13, %xmm10 vmulss %xmm5, %xmm12, %xmm5 vmulss %xmm5, %xmm13, %xmm5 vmulss %xmm13, %xmm13, %xmm12 vmulss %xmm5, %xmm12, %xmm5 vaddss %xmm5, %xmm10, %xmm5 vmulss %xmm5, %xmm7, %xmm7 vmulss %xmm5, %xmm8, %xmm8 vmulss %xmm5, %xmm9, %xmm9 vmulss %xmm5, %xmm11, %xmm5 vucomiss 0xff0a(%rip), %xmm25 # 0x1ef09c8 seta %al kmovd %eax, %k1 vmovss %xmm7, %xmm4, %xmm4 {%k1} vmovss %xmm8, %xmm3, %xmm3 {%k1} vmovss %xmm9, %xmm1, %xmm1 {%k1} vmovss %xmm5, %xmm2, %xmm2 {%k1} vbroadcastss %xmm14, %xmm5 vmulps (%r13), %xmm5, %xmm8 vbroadcastss %xmm6, %xmm9 vfmadd231ps (%rbp), %xmm9, %xmm8 # xmm8 = (xmm9 * mem) + xmm8 vmulps 0x10(%r13), %xmm5, %xmm7 vfmadd231ps 0x10(%rbp), %xmm9, %xmm7 # xmm7 = (xmm9 * mem) + xmm7 vmulps 0x20(%r13), %xmm5, %xmm6 vfmadd231ps 0x20(%rbp), %xmm9, %xmm6 # xmm6 = (xmm9 * mem) + xmm6 vmulps 0x30(%r13), %xmm5, %xmm5 vfmadd231ps 0x30(%rbp), %xmm9, %xmm5 # xmm5 = (xmm9 * mem) + xmm5 vshufps $0xe9, %xmm0, %xmm8, %xmm9 # xmm9 = xmm8[1,2],xmm0[2,3] vblendps $0x4, %xmm7, %xmm9, %xmm9 # xmm9 = xmm9[0,1],xmm7[2],xmm9[3] vmulss %xmm3, %xmm3, %xmm10 vmovaps %xmm4, %xmm11 vfmadd213ss %xmm10, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm11) + xmm10 vfnmadd231ss %xmm1, %xmm1, %xmm11 # xmm11 = -(xmm1 * xmm1) + xmm11 vfnmadd231ss %xmm2, %xmm2, %xmm11 # xmm11 = -(xmm2 * xmm2) + xmm11 vmulss %xmm2, %xmm4, %xmm12 vmovaps %xmm1, %xmm13 vfmadd213ss %xmm12, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm12 vaddss %xmm13, %xmm13, %xmm13 vmulss %xmm1, %xmm4, %xmm14 vmovaps %xmm2, %xmm15 vfmsub213ss %xmm14, %xmm3, %xmm15 # xmm15 = (xmm3 * xmm15) - xmm14 vaddss %xmm15, %xmm15, %xmm15 vfmsub231ss %xmm1, %xmm3, %xmm12 # xmm12 = (xmm3 * xmm1) - xmm12 vaddss %xmm12, %xmm12, %xmm12 vfmsub231ss %xmm4, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm4) - xmm10 vmovaps %xmm1, %xmm16 vfmadd213ss %xmm10, %xmm1, %xmm16 # xmm16 = (xmm1 * xmm16) + xmm10 vfnmadd231ss %xmm2, %xmm2, %xmm16 # xmm16 = -(xmm2 * xmm2) + xmm16 vmulss %xmm3, %xmm4, %xmm4 vmovaps %xmm2, %xmm17 vfmadd213ss %xmm4, %xmm1, %xmm17 # xmm17 = (xmm1 * xmm17) + xmm4 vaddss %xmm17, %xmm17, %xmm17 vfmadd213ss %xmm14, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm14 vaddss %xmm3, %xmm3, %xmm3 vfmsub231ss %xmm2, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm2) - xmm4 vaddss %xmm4, %xmm4, %xmm4 vfnmadd231ss %xmm1, %xmm1, %xmm10 # xmm10 = -(xmm1 * xmm1) + xmm10 vfmadd231ss %xmm2, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm2) + xmm10 vbroadcastss %xmm11, %xmm1 vbroadcastss %xmm13, %xmm2 vmovaps 0xbb3e(%rip), %xmm11 # 0x1eec700 vbroadcastss %xmm15, %xmm13 vmulps %xmm11, %xmm13, %xmm13 vmovsd 0xbb1c(%rip), %xmm14 # 0x1eec6f0 vmovss 0xbb38(%rip), %xmm15 # 0x1eec714 vfmadd231ps %xmm2, %xmm14, %xmm13 # xmm13 = (xmm14 * xmm2) + xmm13 vfmadd231ps %xmm1, %xmm15, %xmm13 # xmm13 = (xmm15 * xmm1) + xmm13 vbroadcastss %xmm12, %xmm1 vbroadcastss %xmm16, %xmm2 vbroadcastss %xmm17, %xmm12 vmulps %xmm11, %xmm12, %xmm12 vfmadd231ps %xmm2, %xmm14, %xmm12 # xmm12 = (xmm14 * xmm2) + xmm12 vfmadd231ps %xmm1, %xmm15, %xmm12 # xmm12 = (xmm15 * xmm1) + xmm12 vbroadcastss %xmm3, %xmm1 vbroadcastss %xmm4, %xmm2 vbroadcastss %xmm10, %xmm3 vmulps %xmm3, %xmm11, %xmm4 vfmadd231ps %xmm2, %xmm14, %xmm4 # xmm4 = (xmm14 * xmm2) + xmm4 vfmadd231ps %xmm1, %xmm15, %xmm4 # xmm4 = (xmm15 * xmm1) + xmm4 vaddps %xmm0, %xmm9, %xmm2 vbroadcastss %xmm8, %xmm1 vmulps %xmm0, %xmm4, %xmm3 vfmadd213ps %xmm3, %xmm12, %xmm0 # xmm0 = (xmm12 * xmm0) + xmm3 vfmadd231ps %xmm1, %xmm13, %xmm0 # xmm0 = (xmm13 * xmm1) + xmm0 vbroadcastss %xmm7, %xmm8 vshufps $0x55, %xmm7, %xmm7, %xmm1 # xmm1 = xmm7[1,1,1,1] vfmadd213ps %xmm3, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm1) + xmm3 vfmadd231ps %xmm8, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm8) + xmm1 vbroadcastss %xmm6, %xmm7 vshufps $0x55, %xmm6, %xmm6, %xmm8 # xmm8 = xmm6[1,1,1,1] vshufps $0xaa, %xmm6, %xmm6, %xmm3 # xmm3 = xmm6[2,2,2,2] vmulps %xmm4, %xmm3, %xmm3 vfmadd231ps %xmm8, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm8) + xmm3 vfmadd231ps %xmm7, %xmm13, %xmm3 # xmm3 = (xmm13 * xmm7) + xmm3 vbroadcastss %xmm5, %xmm6 vshufps $0x55, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,1,1,1] vshufps $0xaa, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2] vmulps %xmm4, %xmm5, %xmm4 vfmadd231ps %xmm7, %xmm12, %xmm4 # xmm4 = (xmm12 * xmm7) + xmm4 vfmadd231ps %xmm6, %xmm13, %xmm4 # xmm4 = (xmm13 * xmm6) + xmm4 vaddps %xmm4, %xmm2, %xmm2 jmp 0x1edfb89 leaq 0x170(%rsp), %r14 vmovaps %xmm1, %xmm0 movq %r14, %rdx callq 0x8bb1c3 vmovaps (%r14), %xmm0 jmp 0x1edfdda nop
/embree[P]embree/kernels/geometry/instance_intersector.cpp
embree::avx512::InstanceArrayIntersector1MB::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstanceArrayPrimitive const&)
bool InstanceArrayIntersector1MB::pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& prim) { const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_); Accel* object = instance->getObject(prim.primID_); if (!object) return false; const AffineSpace3fa local2world = instance->getLocal2World(prim.primID_, query->time); const AffineSpace3fa world2local = instance->getWorld2Local(prim.primID_, query->time); float similarityScale = 0.f; const bool similtude = context->query_type == POINT_QUERY_TYPE_SPHERE && similarityTransform(world2local, &similarityScale); if (likely(instance_id_stack::push(context->userContext, prim.instID_, prim.primID_, world2local, local2world))) { PointQuery query_inst; query_inst.time = query->time; query_inst.p = xfmPoint(world2local, query->p); query_inst.radius = query->radius * similarityScale; PointQueryContext context_inst( (Scene*)object, context->query_ws, similtude ? POINT_QUERY_TYPE_SPHERE : POINT_QUERY_TYPE_AABB, context->func, context->userContext, similarityScale, context->userPtr); bool changed = object->intersectors.pointQuery(&query_inst, &context_inst); instance_id_stack::pop(context->userContext); return changed; } return false; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x208, %rsp # imm = 0x208 movq %rdx, %r15 movq %rsi, %rbx movq %rdi, %r14 movq (%rsi), %rcx movl (%rdx), %eax movl 0x4(%rdx), %edx movq 0x1e8(%rcx), %rcx movq (%rcx,%rdx,8), %r13 movq 0x58(%r13), %r12 testq %r12, %r12 jne 0x1ee1f4c movq 0x90(%r13), %rcx movq 0xa0(%r13), %rdx imulq %rax, %rdx movl (%rcx,%rdx), %ecx movl $0xffffffff, %edx # imm = 0xFFFFFFFF cmpq %rdx, %rcx je 0x1ee1f49 movq 0x60(%r13), %rdx movq (%rdx,%rcx,8), %r12 jmp 0x1ee1f4c xorl %r12d, %r12d testq %r12, %r12 je 0x1ee2006 cmpl $0x1, 0x24(%r13) jne 0x1ee200d movzbl 0x3d(%r13), %esi shll $0x8, %esi movq 0x88(%r13), %rcx movl 0x20(%rcx), %edx cmpl $0x100, %esi # imm = 0x100 je 0x1ee30fd cmpl $0x9243, %edx # imm = 0x9243 jg 0x1ee210b cmpl $0x9134, %edx # imm = 0x9134 je 0x1ee2196 cmpl $0x9234, %edx # imm = 0x9234 jne 0x1ee2671 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovsd 0x4(%rdx,%rax), %xmm0 vmovss (%rdx,%rax), %xmm18 vmovaps 0x7eec1(%rip), %xmm1 # 0x1f60e80 vmovsd 0x10(%rdx,%rax), %xmm2 vmovss 0xc(%rdx,%rax), %xmm19 vpermt2ps %xmm0, %xmm1, %xmm18 vpermt2ps %xmm2, %xmm1, %xmm19 vmovsd 0x1c(%rdx,%rax), %xmm0 vmovss 0x18(%rdx,%rax), %xmm20 vpermt2ps %xmm0, %xmm1, %xmm20 vmovsd 0x28(%rdx,%rax), %xmm0 vmovss 0x24(%rdx,%rax), %xmm21 vpermt2ps %xmm0, %xmm1, %xmm21 jmp 0x1ee2671 xorl %eax, %eax jmp 0x1ee2ff2 vmovss 0xc(%r14), %xmm0 vmovss 0x28(%r13), %xmm1 vmovss 0x2c(%r13), %xmm2 vmovss 0x30(%r13), %xmm3 vsubss %xmm2, %xmm0, %xmm0 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm2, %xmm0, %xmm0 vmulss %xmm0, %xmm1, %xmm0 vroundss $0x9, %xmm0, %xmm0, %xmm2 vaddss 0xe989(%rip), %xmm1, %xmm1 # 0x1ef09cc vminss %xmm1, %xmm2, %xmm1 vxorps %xmm2, %xmm2, %xmm2 vmaxss %xmm1, %xmm2, %xmm1 vsubss %xmm1, %xmm0, %xmm11 vcvttss2si %xmm1, %ecx movzbl 0x3d(%r13), %r8d shll $0x8, %r8d movq 0x88(%r13), %rdx imulq $0x38, %rcx, %rdi leaq (%rdx,%rdi), %rsi movl 0x20(%rdx,%rdi), %edi cmpl $0x100, %r8d # imm = 0x100 je 0x1ee317d cmpl $0x9243, %edi # imm = 0x9243 jg 0x1ee214f cmpl $0x9134, %edi # imm = 0x9134 je 0x1ee21fe cmpl $0x9234, %edi # imm = 0x9234 jne 0x1ee242d movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovsd 0x4(%rdi,%rsi), %xmm0 vmovss (%rdi,%rsi), %xmm18 vmovaps 0x7edbc(%rip), %xmm1 # 0x1f60e80 vmovsd 0x10(%rdi,%rsi), %xmm2 vmovss 0xc(%rdi,%rsi), %xmm19 vpermt2ps %xmm0, %xmm1, %xmm18 vpermt2ps %xmm2, %xmm1, %xmm19 vmovsd 0x1c(%rdi,%rsi), %xmm0 vmovss 0x18(%rdi,%rsi), %xmm20 vpermt2ps %xmm0, %xmm1, %xmm20 vmovsd 0x28(%rdi,%rsi), %xmm0 vmovss 0x24(%rdi,%rsi), %xmm21 vpermt2ps %xmm0, %xmm1, %xmm21 jmp 0x1ee242d cmpl $0xb001, %edx # imm = 0xB001 je 0x1ee2269 cmpl $0x9244, %edx # imm = 0x9244 jne 0x1ee2671 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovaps (%rdx,%rax), %xmm18 vmovaps 0x10(%rdx,%rax), %xmm19 vmovaps 0x20(%rdx,%rax), %xmm20 vmovaps 0x30(%rdx,%rax), %xmm21 jmp 0x1ee2671 cmpl $0xb001, %edi # imm = 0xB001 je 0x1ee234c cmpl $0x9244, %edi # imm = 0x9244 jne 0x1ee242d movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovaps (%rdi,%rsi), %xmm18 vmovaps 0x10(%rdi,%rsi), %xmm19 vmovaps 0x20(%rdi,%rsi), %xmm20 vmovaps 0x30(%rdi,%rsi), %xmm21 jmp 0x1ee242d movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovss (%rdx,%rax), %xmm0 vmovss 0x4(%rdx,%rax), %xmm1 vmovss 0x8(%rdx,%rax), %xmm2 vmovss 0xc(%rdx,%rax), %xmm3 vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm18 # xmm18 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rdx,%rax), %xmm0, %xmm19 # xmm19 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rdx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rdx,%rax), %xmm0, %xmm20 # xmm20 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rdx,%rax), %xmm0, %xmm21 # xmm21 = xmm0[0,1],mem[0],zero jmp 0x1ee2671 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovss (%rdi,%rsi), %xmm0 vmovss 0x4(%rdi,%rsi), %xmm1 vmovss 0x8(%rdi,%rsi), %xmm2 vmovss 0xc(%rdi,%rsi), %xmm3 vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm18 # xmm18 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rdi,%rsi), %xmm0, %xmm19 # xmm19 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rdi,%rsi), %xmm0, %xmm20 # xmm20 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm0, %xmm21 # xmm21 = xmm0[0,1],mem[0],zero jmp 0x1ee242d movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovsd 0x10(%rdx,%rax), %xmm0 vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rdx,%rax), %xmm1 vmovaps 0x7ec03(%rip), %xmm2 # 0x1f60e90 vmovss (%rdx,%rax), %xmm3 vmovss 0xc(%rdx,%rax), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm1 vmovss 0x18(%rdx,%rax), %xmm3 vmovsd 0x1c(%rdx,%rax), %xmm5 vmovss 0x24(%rdx,%rax), %xmm6 vmovss 0x28(%rdx,%rax), %xmm7 vmovss 0x2c(%rdx,%rax), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rdx,%rax), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0xa426(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0xa422(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm3 vmulss %xmm3, %xmm6, %xmm6 vinsertps $0x30, %xmm6, %xmm5, %xmm21 # xmm21 = xmm5[0,1,2],xmm6[0] vmulss %xmm3, %xmm7, %xmm5 vinsertps $0x30, %xmm5, %xmm1, %xmm18 # xmm18 = xmm1[0,1,2],xmm5[0] vmulss %xmm3, %xmm8, %xmm1 vmulss %xmm3, %xmm2, %xmm2 vinsertps $0x30, %xmm2, %xmm0, %xmm20 # xmm20 = xmm0[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rdx,%rax), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vinsertps $0x30, %xmm1, %xmm0, %xmm19 # xmm19 = xmm0[0,1,2],xmm1[0] jmp 0x1ee2671 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovsd 0x10(%rdi,%rsi), %xmm0 vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rdi,%rsi), %xmm1 vmovaps 0x7eb1d(%rip), %xmm2 # 0x1f60e90 vmovss (%rdi,%rsi), %xmm3 vmovss 0xc(%rdi,%rsi), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm1 vmovss 0x18(%rdi,%rsi), %xmm3 vmovsd 0x1c(%rdi,%rsi), %xmm5 vmovss 0x24(%rdi,%rsi), %xmm6 vmovss 0x28(%rdi,%rsi), %xmm7 vmovss 0x2c(%rdi,%rsi), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rdi,%rsi), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0xa340(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0xa33c(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm3 vmulss %xmm3, %xmm6, %xmm6 vinsertps $0x30, %xmm6, %xmm5, %xmm21 # xmm21 = xmm5[0,1,2],xmm6[0] vmulss %xmm3, %xmm7, %xmm5 vinsertps $0x30, %xmm5, %xmm1, %xmm18 # xmm18 = xmm1[0,1,2],xmm5[0] vmulss %xmm3, %xmm8, %xmm1 vmulss %xmm3, %xmm2, %xmm2 vinsertps $0x30, %xmm2, %xmm0, %xmm20 # xmm20 = xmm0[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rdi,%rsi), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vinsertps $0x30, %xmm1, %xmm0, %xmm19 # xmm19 = xmm0[0,1,2],xmm1[0] incl %ecx imulq $0x38, %rcx, %rcx leaq (%rdx,%rcx), %rsi movl 0x20(%rdx,%rcx), %ecx cmpl $0x9243, %ecx # imm = 0x9243 jg 0x1ee24b7 cmpl $0x9134, %ecx # imm = 0x9134 je 0x1ee24f3 cmpl $0x9234, %ecx # imm = 0x9234 jne 0x1ee2632 movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovsd 0x4(%rcx,%rax), %xmm1 vmovss (%rcx,%rax), %xmm0 vmovaps 0x7ea0a(%rip), %xmm4 # 0x1f60e80 vmovsd 0x10(%rcx,%rax), %xmm2 vmovss 0xc(%rcx,%rax), %xmm3 vpermt2ps %xmm1, %xmm4, %xmm0 vpermt2ps %xmm2, %xmm4, %xmm3 vmovsd 0x1c(%rcx,%rax), %xmm1 vmovss 0x18(%rcx,%rax), %xmm2 vpermt2ps %xmm1, %xmm4, %xmm2 vmovsd 0x28(%rcx,%rax), %xmm5 vmovss 0x24(%rcx,%rax), %xmm1 vpermt2ps %xmm5, %xmm4, %xmm1 jmp 0x1ee2632 cmpl $0xb001, %ecx # imm = 0xB001 je 0x1ee2557 cmpl $0x9244, %ecx # imm = 0x9244 jne 0x1ee2632 movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovaps (%rcx,%rax), %xmm0 vmovaps 0x10(%rcx,%rax), %xmm3 vmovaps 0x20(%rcx,%rax), %xmm2 vmovaps 0x30(%rcx,%rax), %xmm1 jmp 0x1ee2632 movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovss (%rcx,%rax), %xmm0 vmovss 0x4(%rcx,%rax), %xmm1 vmovss 0x8(%rcx,%rax), %xmm2 vmovss 0xc(%rcx,%rax), %xmm4 vinsertps $0x1c, 0x10(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rcx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rcx,%rax), %xmm1, %xmm3 # xmm3 = xmm1[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rcx,%rax), %xmm2, %xmm1 # xmm1 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rcx,%rax), %xmm1, %xmm2 # xmm2 = xmm1[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rcx,%rax), %xmm4, %xmm1 # xmm1 = xmm4[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rcx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],zero jmp 0x1ee2632 movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovsd 0x10(%rcx,%rax), %xmm0 vinsertps $0x20, 0x8(%rcx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rcx,%rax), %xmm0 vmovaps 0x7e915(%rip), %xmm1 # 0x1f60e90 vmovss (%rcx,%rax), %xmm3 vmovss 0xc(%rcx,%rax), %xmm4 vpermt2ps %xmm3, %xmm1, %xmm0 vmovss 0x18(%rcx,%rax), %xmm3 vmovsd 0x1c(%rcx,%rax), %xmm5 vmovss 0x24(%rcx,%rax), %xmm6 vmovss 0x28(%rcx,%rax), %xmm7 vmovss 0x2c(%rcx,%rax), %xmm8 vpermt2ps %xmm3, %xmm1, %xmm5 vmovss 0x30(%rcx,%rax), %xmm3 vmulss %xmm7, %xmm7, %xmm1 vfmadd231ss %xmm6, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm6) + xmm1 vfmadd231ss %xmm8, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm8) + xmm1 vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm1, %xmm9, %xmm10 # xmm10 = xmm1[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0xa138(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0xa134(%rip), %xmm1, %xmm1 # 0x1eec71c vmulss %xmm1, %xmm9, %xmm1 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm1, %xmm9, %xmm1 vaddss %xmm1, %xmm10, %xmm9 vmulss %xmm6, %xmm9, %xmm1 vinsertps $0x30, %xmm1, %xmm5, %xmm1 # xmm1 = xmm5[0,1,2],xmm1[0] vmulss %xmm7, %xmm9, %xmm5 vinsertps $0x30, %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm5[0] vmulss %xmm9, %xmm8, %xmm5 vmulss %xmm3, %xmm9, %xmm3 vinsertps $0x30, %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm3[0] vinsertps $0x10, 0x4(%rcx,%rax), %xmm4, %xmm3 # xmm3 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rcx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3] vinsertps $0x30, %xmm5, %xmm3, %xmm3 # xmm3 = xmm3[0,1,2],xmm5[0] vmovss 0xa0da(%rip), %xmm4 # 0x1eec714 vsubss %xmm11, %xmm4, %xmm4 vbroadcastss %xmm11, %xmm5 vmulps %xmm0, %xmm5, %xmm0 vbroadcastss %xmm4, %xmm4 vfmadd213ps %xmm0, %xmm4, %xmm18 # xmm18 = (xmm4 * xmm18) + xmm0 vmulps %xmm3, %xmm5, %xmm0 vfmadd213ps %xmm0, %xmm4, %xmm19 # xmm19 = (xmm4 * xmm19) + xmm0 vmulps %xmm2, %xmm5, %xmm0 vfmadd213ps %xmm0, %xmm4, %xmm20 # xmm20 = (xmm4 * xmm20) + xmm0 vmulps %xmm1, %xmm5, %xmm0 vfmadd213ps %xmm0, %xmm4, %xmm21 # xmm21 = (xmm4 * xmm21) + xmm0 movl (%r15), %eax cmpl $0x1, 0x24(%r13) jne 0x1ee271d movzbl 0x3d(%r13), %esi shll $0x8, %esi movq 0x88(%r13), %rcx movl 0x20(%rcx), %edx cmpl $0x100, %esi # imm = 0x100 je 0x1ee320b cmpl $0x9243, %edx # imm = 0x9243 jg 0x1ee2813 cmpl $0x9134, %edx # imm = 0x9134 je 0x1ee288e cmpl $0x9234, %edx # imm = 0x9234 jne 0x1ee2d5a movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovsd 0x4(%rdx,%rax), %xmm2 vmovss (%rdx,%rax), %xmm0 vmovaps 0x7e7a4(%rip), %xmm4 # 0x1f60e80 vmovsd 0x10(%rdx,%rax), %xmm3 vmovss 0xc(%rdx,%rax), %xmm1 vpermt2ps %xmm2, %xmm4, %xmm0 vpermt2ps %xmm3, %xmm4, %xmm1 vmovsd 0x1c(%rdx,%rax), %xmm3 vmovss 0x18(%rdx,%rax), %xmm2 vpermt2ps %xmm3, %xmm4, %xmm2 vmovsd 0x28(%rdx,%rax), %xmm5 vmovss 0x24(%rdx,%rax), %xmm3 vpermt2ps %xmm5, %xmm4, %xmm3 jmp 0x1ee2d5a vmovss 0xc(%r14), %xmm0 vmovss 0x28(%r13), %xmm1 vmovss 0x2c(%r13), %xmm2 vmovss 0x30(%r13), %xmm3 vsubss %xmm2, %xmm0, %xmm0 vsubss %xmm2, %xmm3, %xmm2 vdivss %xmm2, %xmm0, %xmm0 vmulss %xmm0, %xmm1, %xmm0 vroundss $0x9, %xmm0, %xmm0, %xmm2 vaddss 0xe279(%rip), %xmm1, %xmm1 # 0x1ef09cc vminss %xmm1, %xmm2, %xmm1 vxorps %xmm2, %xmm2, %xmm2 vmaxss %xmm1, %xmm2, %xmm1 vsubss %xmm1, %xmm0, %xmm15 vcvttss2si %xmm1, %ecx movzbl 0x3d(%r13), %r8d shll $0x8, %r8d movq 0x88(%r13), %rdx imulq $0x38, %rcx, %rdi leaq (%rdx,%rdi), %rsi movl 0x20(%rdx,%rdi), %edi cmpl $0x100, %r8d # imm = 0x100 je 0x1ee328b cmpl $0x9243, %edi # imm = 0x9243 jg 0x1ee284f cmpl $0x9134, %edi # imm = 0x9134 je 0x1ee28f2 cmpl $0x9234, %edi # imm = 0x9234 jne 0x1ee2b17 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovsd 0x4(%rdi,%rsi), %xmm2 vmovss (%rdi,%rsi), %xmm0 vmovaps 0x7e6ae(%rip), %xmm4 # 0x1f60e80 vmovsd 0x10(%rdi,%rsi), %xmm3 vmovss 0xc(%rdi,%rsi), %xmm1 vpermt2ps %xmm2, %xmm4, %xmm0 vpermt2ps %xmm3, %xmm4, %xmm1 vmovsd 0x1c(%rdi,%rsi), %xmm3 vmovss 0x18(%rdi,%rsi), %xmm2 vpermt2ps %xmm3, %xmm4, %xmm2 vmovsd 0x28(%rdi,%rsi), %xmm5 vmovss 0x24(%rdi,%rsi), %xmm3 vpermt2ps %xmm5, %xmm4, %xmm3 jmp 0x1ee2b17 cmpl $0xb001, %edx # imm = 0xB001 je 0x1ee2959 cmpl $0x9244, %edx # imm = 0x9244 jne 0x1ee2d5a movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovaps (%rdx,%rax), %xmm0 vmovaps 0x10(%rdx,%rax), %xmm1 vmovaps 0x20(%rdx,%rax), %xmm2 vmovaps 0x30(%rdx,%rax), %xmm3 jmp 0x1ee2d5a cmpl $0xb001, %edi # imm = 0xB001 je 0x1ee2a39 cmpl $0x9244, %edi # imm = 0x9244 jne 0x1ee2b17 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovaps (%rdi,%rsi), %xmm0 vmovaps 0x10(%rdi,%rsi), %xmm1 vmovaps 0x20(%rdi,%rsi), %xmm2 vmovaps 0x30(%rdi,%rsi), %xmm3 jmp 0x1ee2b17 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovss (%rdx,%rax), %xmm0 vmovss 0x4(%rdx,%rax), %xmm1 vmovss 0x8(%rdx,%rax), %xmm2 vmovss 0xc(%rdx,%rax), %xmm3 vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rdx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rdx,%rax), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rdx,%rax), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],zero jmp 0x1ee2d5a movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovss (%rdi,%rsi), %xmm0 vmovss 0x4(%rdi,%rsi), %xmm1 vmovss 0x8(%rdi,%rsi), %xmm2 vmovss 0xc(%rdi,%rsi), %xmm3 vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rdi,%rsi), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],zero jmp 0x1ee2b17 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovsd 0x10(%rdx,%rax), %xmm0 vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rdx,%rax), %xmm0 vmovaps 0x7e513(%rip), %xmm2 # 0x1f60e90 vmovss (%rdx,%rax), %xmm3 vmovss 0xc(%rdx,%rax), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm0 vmovss 0x18(%rdx,%rax), %xmm3 vmovsd 0x1c(%rdx,%rax), %xmm5 vmovss 0x24(%rdx,%rax), %xmm6 vmovss 0x28(%rdx,%rax), %xmm7 vmovss 0x2c(%rdx,%rax), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rdx,%rax), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0x9d36(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0x9d32(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm9 vmulss %xmm6, %xmm9, %xmm3 vinsertps $0x30, %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[0,1,2],xmm3[0] vmulss %xmm7, %xmm9, %xmm5 vinsertps $0x30, %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm5[0] vmulss %xmm9, %xmm8, %xmm5 vmulss %xmm2, %xmm9, %xmm2 vinsertps $0x30, %xmm2, %xmm1, %xmm2 # xmm2 = xmm1[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rdx,%rax), %xmm4, %xmm1 # xmm1 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rdx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3] vinsertps $0x30, %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm5[0] jmp 0x1ee2d5a movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovsd 0x10(%rdi,%rsi), %xmm0 vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rdi,%rsi), %xmm0 vmovaps 0x7e430(%rip), %xmm2 # 0x1f60e90 vmovss (%rdi,%rsi), %xmm3 vmovss 0xc(%rdi,%rsi), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm0 vmovss 0x18(%rdi,%rsi), %xmm3 vmovsd 0x1c(%rdi,%rsi), %xmm5 vmovss 0x24(%rdi,%rsi), %xmm6 vmovss 0x28(%rdi,%rsi), %xmm7 vmovss 0x2c(%rdi,%rsi), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rdi,%rsi), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0x9c53(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0x9c4f(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm9 vmulss %xmm6, %xmm9, %xmm3 vinsertps $0x30, %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[0,1,2],xmm3[0] vmulss %xmm7, %xmm9, %xmm5 vinsertps $0x30, %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm5[0] vmulss %xmm9, %xmm8, %xmm5 vmulss %xmm2, %xmm9, %xmm2 vinsertps $0x30, %xmm2, %xmm1, %xmm2 # xmm2 = xmm1[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rdi,%rsi), %xmm4, %xmm1 # xmm1 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3] vinsertps $0x30, %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm5[0] incl %ecx imulq $0x38, %rcx, %rcx leaq (%rdx,%rcx), %rsi movl 0x20(%rdx,%rcx), %ecx cmpl $0x9243, %ecx # imm = 0x9243 jg 0x1ee2ba1 cmpl $0x9134, %ecx # imm = 0x9134 je 0x1ee2bdd cmpl $0x9234, %ecx # imm = 0x9234 jne 0x1ee2d1f movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovsd 0x4(%rcx,%rax), %xmm5 vmovss (%rcx,%rax), %xmm4 vmovaps 0x7e320(%rip), %xmm8 # 0x1f60e80 vmovsd 0x10(%rcx,%rax), %xmm6 vmovss 0xc(%rcx,%rax), %xmm7 vpermt2ps %xmm5, %xmm8, %xmm4 vpermt2ps %xmm6, %xmm8, %xmm7 vmovsd 0x1c(%rcx,%rax), %xmm5 vmovss 0x18(%rcx,%rax), %xmm6 vpermt2ps %xmm5, %xmm8, %xmm6 vmovsd 0x28(%rcx,%rax), %xmm9 vmovss 0x24(%rcx,%rax), %xmm5 vpermt2ps %xmm9, %xmm8, %xmm5 jmp 0x1ee2d1f cmpl $0xb001, %ecx # imm = 0xB001 je 0x1ee2c41 cmpl $0x9244, %ecx # imm = 0x9244 jne 0x1ee2d1f movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovaps (%rcx,%rax), %xmm4 vmovaps 0x10(%rcx,%rax), %xmm7 vmovaps 0x20(%rcx,%rax), %xmm6 vmovaps 0x30(%rcx,%rax), %xmm5 jmp 0x1ee2d1f movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovss (%rcx,%rax), %xmm4 vmovss 0x4(%rcx,%rax), %xmm5 vmovss 0x8(%rcx,%rax), %xmm6 vmovss 0xc(%rcx,%rax), %xmm8 vinsertps $0x1c, 0x10(%rcx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rcx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rcx,%rax), %xmm5, %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rcx,%rax), %xmm5, %xmm7 # xmm7 = xmm5[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rcx,%rax), %xmm6, %xmm5 # xmm5 = xmm6[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rcx,%rax), %xmm5, %xmm6 # xmm6 = xmm5[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rcx,%rax), %xmm8, %xmm5 # xmm5 = xmm8[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rcx,%rax), %xmm5, %xmm5 # xmm5 = xmm5[0,1],mem[0],zero jmp 0x1ee2d1f movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovsd 0x10(%rcx,%rax), %xmm4 vinsertps $0x20, 0x8(%rcx,%rax), %xmm4, %xmm6 # xmm6 = xmm4[0,1],mem[0],xmm4[3] vmovsd 0x34(%rcx,%rax), %xmm4 vmovaps 0x7e22b(%rip), %xmm5 # 0x1f60e90 vmovss (%rcx,%rax), %xmm7 vmovss 0xc(%rcx,%rax), %xmm8 vpermt2ps %xmm7, %xmm5, %xmm4 vmovss 0x18(%rcx,%rax), %xmm7 vmovsd 0x1c(%rcx,%rax), %xmm9 vmovss 0x24(%rcx,%rax), %xmm10 vmovss 0x28(%rcx,%rax), %xmm11 vmovss 0x2c(%rcx,%rax), %xmm12 vpermt2ps %xmm7, %xmm5, %xmm9 vmovss 0x30(%rcx,%rax), %xmm7 vmulss %xmm11, %xmm11, %xmm5 vfmadd231ss %xmm10, %xmm10, %xmm5 # xmm5 = (xmm10 * xmm10) + xmm5 vfmadd231ss %xmm12, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm12) + xmm5 vfmadd231ss %xmm7, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm7) + xmm5 vxorps %xmm13, %xmm13, %xmm13 vmovss %xmm5, %xmm13, %xmm14 # xmm14 = xmm5[0],xmm13[1,2,3] vrsqrt14ss %xmm14, %xmm13, %xmm13 vmulss 0x9a4d(%rip), %xmm13, %xmm14 # 0x1eec718 vmulss 0x9a49(%rip), %xmm5, %xmm5 # 0x1eec71c vmulss %xmm5, %xmm13, %xmm5 vmulss %xmm13, %xmm13, %xmm13 vmulss %xmm5, %xmm13, %xmm5 vaddss %xmm5, %xmm14, %xmm13 vmulss %xmm13, %xmm10, %xmm5 vinsertps $0x30, %xmm5, %xmm9, %xmm5 # xmm5 = xmm9[0,1,2],xmm5[0] vmulss %xmm13, %xmm11, %xmm9 vinsertps $0x30, %xmm9, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm9[0] vmulss %xmm13, %xmm12, %xmm9 vmulss %xmm7, %xmm13, %xmm7 vinsertps $0x30, %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0,1,2],xmm7[0] vinsertps $0x10, 0x4(%rcx,%rax), %xmm8, %xmm7 # xmm7 = xmm8[0],mem[0],xmm8[2,3] vinsertps $0x20, 0x3c(%rcx,%rax), %xmm7, %xmm7 # xmm7 = xmm7[0,1],mem[0],xmm7[3] vinsertps $0x30, %xmm9, %xmm7, %xmm7 # xmm7 = xmm7[0,1,2],xmm9[0] vmovss 0x99ed(%rip), %xmm8 # 0x1eec714 vsubss %xmm15, %xmm8, %xmm8 vbroadcastss %xmm15, %xmm9 vmulps %xmm4, %xmm9, %xmm4 vbroadcastss %xmm8, %xmm8 vfmadd213ps %xmm4, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + xmm4 vmulps %xmm7, %xmm9, %xmm4 vfmadd213ps %xmm4, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm4 vmulps %xmm6, %xmm9, %xmm4 vfmadd213ps %xmm4, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm4 vmulps %xmm5, %xmm9, %xmm4 vfmadd213ps %xmm4, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm3) + xmm4 vshufps $0xc9, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,2,0,3] vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3] vmulps %xmm5, %xmm2, %xmm6 vfmsub231ps %xmm4, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm4) - xmm6 vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3] vshufps $0xc9, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[1,2,0,3] vmulps %xmm4, %xmm0, %xmm4 vfmsub231ps %xmm2, %xmm8, %xmm4 # xmm4 = (xmm8 * xmm2) - xmm4 vmulps %xmm1, %xmm8, %xmm1 vfmsub231ps %xmm5, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm5) - xmm1 vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3] vunpcklps %xmm2, %xmm7, %xmm2 # xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1] vunpcklps %xmm1, %xmm6, %xmm1 # xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1] vinsertps $0x4a, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1],zero,xmm4[2],zero vxorps %xmm6, %xmm6, %xmm6 vmovss %xmm4, %xmm6, %xmm4 # xmm4 = xmm4[0],xmm6[1,2,3] vunpcklps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] vunpcklps %xmm5, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] vunpckhps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3] vdpps $0x7f, %xmm7, %xmm0, %xmm0 vbroadcastss %xmm0, %xmm0 vdivps %xmm0, %xmm4, %xmm6 vdivps %xmm0, %xmm2, %xmm8 vdivps %xmm0, %xmm1, %xmm7 vbroadcastss %xmm3, %xmm0 vshufps $0x55, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[1,1,1,1] vshufps $0xaa, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[2,2,2,2] vmulps %xmm7, %xmm2, %xmm2 vfmadd231ps %xmm1, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm1) + xmm2 vfmadd231ps %xmm0, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm0) + xmm2 vxorps 0x3e0d2(%rip){1to4}, %xmm2, %xmm9 # 0x1f20ec0 cmpl $0x1, 0x18(%rbx) jne 0x1ee2e59 vdpps $0x7f, %xmm8, %xmm6, %xmm0 vandps 0x3e0c0(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec4 vxorps %xmm0, %xmm0, %xmm0 vucomiss 0x9900(%rip), %xmm1 # 0x1eec710 ja 0x1ee2e4a vdpps $0x7f, %xmm7, %xmm6, %xmm1 vandps 0x3e0a2(%rip){1to4}, %xmm1, %xmm1 # 0x1f20ec4 vucomiss 0x98e6(%rip), %xmm1 # 0x1eec710 ja 0x1ee2e4a vdpps $0x7f, %xmm7, %xmm8, %xmm1 vandps 0x3e088(%rip){1to4}, %xmm1, %xmm1 # 0x1f20ec4 vucomiss 0x98cc(%rip), %xmm1 # 0x1eec710 jbe 0x1ee3004 xorl %ebp, %ebp movzbl %bpl, %ecx movl $0x2, %eax subl %ecx, %eax jmp 0x1ee2e62 movl $0x2, %eax vxorps %xmm0, %xmm0, %xmm0 movq 0x28(%rbx), %rcx movl 0x88(%rcx), %edx vmovq (%r15), %xmm1 vprolq $0x20, %xmm1, %xmm1 vmovq %xmm1, 0x80(%rcx,%rdx,4) shlq $0x6, %rdx vmovups %xmm6, (%rcx,%rdx) vmovups %xmm8, 0x10(%rcx,%rdx) vmovups %xmm7, 0x20(%rcx,%rdx) vmovups %xmm9, 0x30(%rcx,%rdx) vmovups %xmm18, 0x40(%rcx,%rdx) vmovups %xmm19, 0x50(%rcx,%rdx) vmovups %xmm20, 0x60(%rcx,%rdx) vmovups %xmm21, 0x70(%rcx,%rdx) incl 0x88(%rcx) vmovss 0xc(%r14), %xmm1 vmovss %xmm1, 0x15c(%rsp) vfmadd132ps 0x8(%r14){1to4}, %xmm9, %xmm7 # xmm7 = (xmm7 * mem) + xmm9 vfmadd231ps 0x4(%r14){1to4}, %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7 vfmadd231ps (%r14){1to4}, %xmm6, %xmm7 # xmm7 = (xmm6 * mem) + xmm7 vmovlps %xmm7, 0x150(%rsp) vextractps $0x2, %xmm7, 0x158(%rsp) vmulss 0x10(%r14), %xmm0, %xmm1 vmovss %xmm1, 0x160(%rsp) movq 0x10(%rbx), %rcx movq 0x38(%rbx), %rdx movq %r12, 0x190(%rsp) movq $0x0, 0x198(%rsp) movq %rcx, 0x1a0(%rsp) movl %eax, 0x1a8(%rsp) vmovdqa 0x20(%rbx), %xmm2 vmovdqa %xmm2, 0x1b0(%rsp) vmovss %xmm0, 0x1c0(%rsp) movq %rdx, 0x1c8(%rsp) movq $-0x1, 0x1d0(%rsp) vbroadcastss 0x10(%rcx), %xmm1 vmovaps %xmm1, 0x1e0(%rsp) vmovss 0x10(%rcx), %xmm1 cmpl $0x2, %eax jne 0x1ee2f9c vucomiss 0x8aa1(%rip), %xmm1 # 0x1eeba20 jae 0x1ee2f95 vpextrq $0x1, %xmm2, %rsi movl 0x88(%rsi), %edi testl %edi, %edi jne 0x1ee3434 vbroadcastss %xmm1, %xmm0 jmp 0x1ee2fa5 vmulss %xmm1, %xmm0, %xmm0 vbroadcastss %xmm0, %xmm0 leaq 0x190(%rsp), %rdx vmovaps %xmm0, 0x50(%rdx) leaq 0x58(%r12), %rdi leaq 0x150(%rsp), %rsi callq *0x88(%r12) movq 0x28(%rbx), %rcx movl 0x88(%rcx), %edx decl %edx movl %edx, 0x88(%rcx) movl $0xffffffff, %esi # imm = 0xFFFFFFFF movl %esi, 0x80(%rcx,%rdx,4) movl 0x88(%rcx), %edx movl %esi, 0x84(%rcx,%rdx,4) addq $0x208, %rsp # imm = 0x208 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq vdpps $0x7f, %xmm6, %xmm6, %xmm1 vdpps $0x7f, %xmm8, %xmm8, %xmm2 vsubss %xmm2, %xmm1, %xmm3 vbroadcastss 0x3dea7(%rip), %xmm4 # 0x1f20ec4 vandps %xmm4, %xmm3, %xmm3 xorl %ebp, %ebp vucomiss 0x96e5(%rip), %xmm3 # 0x1eec710 ja 0x1ee2e4c vdpps $0x7f, %xmm7, %xmm7, %xmm3 vsubss %xmm3, %xmm1, %xmm5 vandps %xmm4, %xmm5, %xmm4 vucomiss 0x96c9(%rip), %xmm4 # 0x1eec710 ja 0x1ee2e4c vsubss %xmm3, %xmm2, %xmm2 vandps 0x3de69(%rip){1to4}, %xmm2, %xmm2 # 0x1f20ec4 vucomiss 0x96ad(%rip), %xmm2 # 0x1eec710 setbe %bpl ja 0x1ee2e4c movb $0x1, %bpl vucomiss %xmm0, %xmm1 jb 0x1ee307f vsqrtss %xmm1, %xmm1, %xmm0 jmp 0x1ee2e4c vmovaps %xmm1, %xmm0 vmovaps %xmm18, 0x80(%rsp) vmovaps %xmm19, 0x70(%rsp) vmovaps %xmm20, 0x60(%rsp) vmovaps %xmm21, 0x50(%rsp) vmovaps %xmm6, 0x40(%rsp) vmovaps %xmm7, 0x30(%rsp) vmovaps %xmm8, 0x20(%rsp) vmovaps %xmm9, 0x10(%rsp) callq 0x6aa20 vmovaps 0x10(%rsp), %xmm9 vmovaps 0x20(%rsp), %xmm8 vmovaps 0x30(%rsp), %xmm7 vmovaps 0x40(%rsp), %xmm6 vmovaps 0x50(%rsp), %xmm21 vmovaps 0x60(%rsp), %xmm20 vmovaps 0x70(%rsp), %xmm19 vmovaps 0x80(%rsp), %xmm18 jmp 0x1ee2e4c cmpl $0x9243, %edx # imm = 0x9243 jg 0x1ee3338 cmpl $0x9134, %edx # imm = 0x9134 je 0x1ee3452 cmpl $0x9234, %edx # imm = 0x9234 jne 0x1ee36c6 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovsd 0x4(%rdx,%rax), %xmm1 vmovss (%rdx,%rax), %xmm0 vmovaps 0x7dd44(%rip), %xmm4 # 0x1f60e80 vmovsd 0x10(%rdx,%rax), %xmm3 vmovss 0xc(%rdx,%rax), %xmm2 vpermt2ps %xmm1, %xmm4, %xmm0 vpermt2ps %xmm3, %xmm4, %xmm2 vmovsd 0x1c(%rdx,%rax), %xmm1 vmovss 0x18(%rdx,%rax), %xmm3 vpermt2ps %xmm1, %xmm4, %xmm3 vmovsd 0x28(%rdx,%rax), %xmm5 vmovss 0x24(%rdx,%rax), %xmm1 vpermt2ps %xmm5, %xmm4, %xmm1 jmp 0x1ee36c6 cmpl $0x9243, %edi # imm = 0x9243 vmovaps %xmm11, 0x80(%rsp) jg 0x1ee3374 cmpl $0x9134, %edi # imm = 0x9134 je 0x1ee34b6 cmpl $0x9234, %edi # imm = 0x9234 jne 0x1ee3946 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovsd 0x4(%rdi,%rsi), %xmm0 vmovss (%rdi,%rsi), %xmm14 vmovaps 0x7dcb8(%rip), %xmm1 # 0x1f60e80 vmovsd 0x10(%rdi,%rsi), %xmm2 vmovss 0xc(%rdi,%rsi), %xmm11 vpermt2ps %xmm0, %xmm1, %xmm14 vpermt2ps %xmm2, %xmm1, %xmm11 vmovsd 0x1c(%rdi,%rsi), %xmm0 vmovss 0x18(%rdi,%rsi), %xmm16 vpermt2ps %xmm0, %xmm1, %xmm16 vmovsd 0x28(%rdi,%rsi), %xmm0 vmovss 0x24(%rdi,%rsi), %xmm15 vpermt2ps %xmm0, %xmm1, %xmm15 jmp 0x1ee3946 cmpl $0x9243, %edx # imm = 0x9243 jg 0x1ee33b5 cmpl $0x9134, %edx # imm = 0x9134 je 0x1ee351e cmpl $0x9234, %edx # imm = 0x9234 jne 0x1ee3af7 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovsd 0x4(%rdx,%rax), %xmm0 vmovss (%rdx,%rax), %xmm1 vmovaps 0x7dc36(%rip), %xmm5 # 0x1f60e80 vmovsd 0x10(%rdx,%rax), %xmm2 vmovss 0xc(%rdx,%rax), %xmm4 vpermt2ps %xmm0, %xmm5, %xmm1 vpermt2ps %xmm2, %xmm5, %xmm4 vmovsd 0x1c(%rdx,%rax), %xmm0 vmovss 0x18(%rdx,%rax), %xmm2 vpermt2ps %xmm0, %xmm5, %xmm2 vmovsd 0x28(%rdx,%rax), %xmm0 vmovss 0x24(%rdx,%rax), %xmm3 vpermt2ps %xmm0, %xmm5, %xmm3 jmp 0x1ee3af7 cmpl $0x9243, %edi # imm = 0x9243 vmovaps %xmm18, 0x80(%rsp) vmovaps %xmm19, 0x70(%rsp) vmovaps %xmm20, 0x60(%rsp) vmovaps %xmm21, 0x50(%rsp) vmovaps %xmm15, 0x40(%rsp) jg 0x1ee33f1 cmpl $0x9134, %edi # imm = 0x9134 je 0x1ee3582 cmpl $0x9234, %edi # imm = 0x9234 jne 0x1ee3d6b movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovsd 0x4(%rdi,%rsi), %xmm0 vmovss (%rdi,%rsi), %xmm16 vmovaps 0x7db8b(%rip), %xmm1 # 0x1f60e80 vmovsd 0x10(%rdi,%rsi), %xmm2 vmovss 0xc(%rdi,%rsi), %xmm11 vpermt2ps %xmm0, %xmm1, %xmm16 vpermt2ps %xmm2, %xmm1, %xmm11 vmovsd 0x1c(%rdi,%rsi), %xmm0 vmovss 0x18(%rdi,%rsi), %xmm17 vpermt2ps %xmm0, %xmm1, %xmm17 vmovsd 0x28(%rdi,%rsi), %xmm0 vmovss 0x24(%rdi,%rsi), %xmm15 vpermt2ps %xmm0, %xmm1, %xmm15 jmp 0x1ee3d6b cmpl $0xb001, %edx # imm = 0xB001 je 0x1ee35eb cmpl $0x9244, %edx # imm = 0x9244 jne 0x1ee36c6 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovaps (%rdx,%rax), %xmm0 vmovaps 0x10(%rdx,%rax), %xmm2 vmovaps 0x20(%rdx,%rax), %xmm3 vmovaps 0x30(%rdx,%rax), %xmm1 jmp 0x1ee36c6 cmpl $0xb001, %edi # imm = 0xB001 je 0x1ee3868 cmpl $0x9244, %edi # imm = 0x9244 jne 0x1ee3946 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovaps (%rdi,%rsi), %xmm14 vmovaps 0x10(%rdi,%rsi), %xmm11 vmovaps 0x20(%rdi,%rsi), %xmm16 vmovaps 0x30(%rdi,%rsi), %xmm15 jmp 0x1ee3946 cmpl $0xb001, %edx # imm = 0xB001 je 0x1ee3a1c cmpl $0x9244, %edx # imm = 0x9244 jne 0x1ee3af7 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovaps (%rdx,%rax), %xmm1 vmovaps 0x10(%rdx,%rax), %xmm4 vmovaps 0x20(%rdx,%rax), %xmm2 vmovaps 0x30(%rdx,%rax), %xmm3 jmp 0x1ee3af7 cmpl $0xb001, %edi # imm = 0xB001 je 0x1ee3c8c cmpl $0x9244, %edi # imm = 0x9244 jne 0x1ee3d6b movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovaps (%rdi,%rsi), %xmm16 vmovaps 0x10(%rdi,%rsi), %xmm11 vmovaps 0x20(%rdi,%rsi), %xmm17 vmovaps 0x30(%rdi,%rsi), %xmm15 jmp 0x1ee3d6b leaq 0x1f0(%rsp), %r14 vmovaps %xmm1, %xmm0 movq %r14, %rdx callq 0x8d2a34 vmovaps (%r14), %xmm0 jmp 0x1ee2fa5 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovss (%rdx,%rax), %xmm0 vmovss 0x4(%rdx,%rax), %xmm1 vmovss 0x8(%rdx,%rax), %xmm3 vmovss 0xc(%rdx,%rax), %xmm4 vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rdx,%rax), %xmm1, %xmm2 # xmm2 = xmm1[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rdx,%rax), %xmm3, %xmm1 # xmm1 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rdx,%rax), %xmm1, %xmm3 # xmm3 = xmm1[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm4, %xmm1 # xmm1 = xmm4[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rdx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],zero jmp 0x1ee36c6 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovss (%rdi,%rsi), %xmm0 vmovss 0x4(%rdi,%rsi), %xmm1 vmovss 0x8(%rdi,%rsi), %xmm2 vmovss 0xc(%rdi,%rsi), %xmm3 vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm14 # xmm14 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rdi,%rsi), %xmm0, %xmm11 # xmm11 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rdi,%rsi), %xmm0, %xmm16 # xmm16 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm0, %xmm15 # xmm15 = xmm0[0,1],mem[0],zero jmp 0x1ee3946 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovss (%rdx,%rax), %xmm0 vmovss 0x4(%rdx,%rax), %xmm2 vmovss 0x8(%rdx,%rax), %xmm3 vmovss 0xc(%rdx,%rax), %xmm5 vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rdx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rdx,%rax), %xmm0, %xmm4 # xmm4 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rdx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm5, %xmm0 # xmm0 = xmm5[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rdx,%rax), %xmm0, %xmm3 # xmm3 = xmm0[0,1],mem[0],zero jmp 0x1ee3af7 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovss (%rdi,%rsi), %xmm0 vmovss 0x4(%rdi,%rsi), %xmm1 vmovss 0x8(%rdi,%rsi), %xmm2 vmovss 0xc(%rdi,%rsi), %xmm3 vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm16 # xmm16 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rdi,%rsi), %xmm0, %xmm11 # xmm11 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rdi,%rsi), %xmm0, %xmm17 # xmm17 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm0, %xmm15 # xmm15 = xmm0[0,1],mem[0],zero jmp 0x1ee3d6b movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovsd 0x10(%rdx,%rax), %xmm0 vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rdx,%rax), %xmm0 vmovaps 0x7d881(%rip), %xmm1 # 0x1f60e90 vmovss (%rdx,%rax), %xmm3 vmovss 0xc(%rdx,%rax), %xmm4 vpermt2ps %xmm3, %xmm1, %xmm0 vmovss 0x18(%rdx,%rax), %xmm3 vmovsd 0x1c(%rdx,%rax), %xmm5 vmovss 0x24(%rdx,%rax), %xmm6 vmovss 0x28(%rdx,%rax), %xmm7 vmovss 0x2c(%rdx,%rax), %xmm8 vpermt2ps %xmm3, %xmm1, %xmm5 vmovss 0x30(%rdx,%rax), %xmm3 vmulss %xmm7, %xmm7, %xmm1 vfmadd231ss %xmm6, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm6) + xmm1 vfmadd231ss %xmm8, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm8) + xmm1 vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm1, %xmm9, %xmm10 # xmm10 = xmm1[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0x90a4(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0x90a0(%rip), %xmm1, %xmm1 # 0x1eec71c vmulss %xmm1, %xmm9, %xmm1 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm1, %xmm9, %xmm1 vaddss %xmm1, %xmm10, %xmm9 vmulss %xmm6, %xmm9, %xmm1 vinsertps $0x30, %xmm1, %xmm5, %xmm1 # xmm1 = xmm5[0,1,2],xmm1[0] vmulss %xmm7, %xmm9, %xmm5 vinsertps $0x30, %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm5[0] vmulss %xmm9, %xmm8, %xmm5 vmulss %xmm3, %xmm9, %xmm3 vinsertps $0x30, %xmm3, %xmm2, %xmm3 # xmm3 = xmm2[0,1,2],xmm3[0] vinsertps $0x10, 0x4(%rdx,%rax), %xmm4, %xmm2 # xmm2 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rdx,%rax), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3] vinsertps $0x30, %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm5[0] vshufps $0xff, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[3,3,3,3] vshufps $0xff, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[3,3,3,3] vshufps $0xff, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[3,3,3,3] vshufps $0xff, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[3,3,3,3] vxorps %xmm18, %xmm18, %xmm18 vshufps $0xe9, %xmm18, %xmm0, %xmm4 # xmm4 = xmm0[1,2],xmm18[2,3] vblendps $0x4, %xmm2, %xmm4, %xmm4 # xmm4 = xmm4[0,1],xmm2[2],xmm4[3] vmulss %xmm6, %xmm6, %xmm9 vmovaps %xmm5, %xmm10 vfmadd213ss %xmm9, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm10) + xmm9 vfnmadd231ss %xmm7, %xmm7, %xmm10 # xmm10 = -(xmm7 * xmm7) + xmm10 vfnmadd231ss %xmm8, %xmm8, %xmm10 # xmm10 = -(xmm8 * xmm8) + xmm10 vmulss %xmm5, %xmm8, %xmm11 vmovaps %xmm7, %xmm12 vfmadd213ss %xmm11, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + xmm11 vaddss %xmm12, %xmm12, %xmm12 vmulss %xmm5, %xmm7, %xmm13 vmovaps %xmm8, %xmm14 vfmsub213ss %xmm13, %xmm6, %xmm14 # xmm14 = (xmm6 * xmm14) - xmm13 vaddss %xmm14, %xmm14, %xmm14 vfmsub231ss %xmm7, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm7) - xmm11 vaddss %xmm11, %xmm11, %xmm11 vfmsub231ss %xmm5, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm5) - xmm9 vmovaps %xmm7, %xmm15 vfmadd213ss %xmm9, %xmm7, %xmm15 # xmm15 = (xmm7 * xmm15) + xmm9 vfnmadd231ss %xmm8, %xmm8, %xmm15 # xmm15 = -(xmm8 * xmm8) + xmm15 vmulss %xmm5, %xmm6, %xmm5 vmovaps %xmm8, %xmm16 vfmadd213ss %xmm5, %xmm7, %xmm16 # xmm16 = (xmm7 * xmm16) + xmm5 vaddss %xmm16, %xmm16, %xmm16 vfmadd213ss %xmm13, %xmm8, %xmm6 # xmm6 = (xmm8 * xmm6) + xmm13 vaddss %xmm6, %xmm6, %xmm6 vfmsub231ss %xmm8, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm8) - xmm5 vaddss %xmm5, %xmm5, %xmm5 vfnmadd231ss %xmm7, %xmm7, %xmm9 # xmm9 = -(xmm7 * xmm7) + xmm9 vfmadd231ss %xmm8, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm8) + xmm9 vbroadcastss %xmm10, %xmm7 vbroadcastss %xmm12, %xmm8 vmovaps 0x8f76(%rip), %xmm10 # 0x1eec700 vbroadcastss %xmm14, %xmm12 vmulps %xmm10, %xmm12, %xmm12 vmovsd 0x8f54(%rip), %xmm13 # 0x1eec6f0 vmovss 0x8f70(%rip), %xmm14 # 0x1eec714 vfmadd231ps %xmm8, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm8) + xmm12 vfmadd231ps %xmm7, %xmm14, %xmm12 # xmm12 = (xmm14 * xmm7) + xmm12 vbroadcastss %xmm11, %xmm7 vbroadcastss %xmm15, %xmm8 vbroadcastss %xmm16, %xmm11 vmulps %xmm10, %xmm11, %xmm11 vfmadd231ps %xmm8, %xmm13, %xmm11 # xmm11 = (xmm13 * xmm8) + xmm11 vfmadd231ps %xmm7, %xmm14, %xmm11 # xmm11 = (xmm14 * xmm7) + xmm11 vbroadcastss %xmm6, %xmm6 vbroadcastss %xmm5, %xmm5 vbroadcastss %xmm9, %xmm7 vmulps %xmm7, %xmm10, %xmm7 vfmadd231ps %xmm5, %xmm13, %xmm7 # xmm7 = (xmm13 * xmm5) + xmm7 vfmadd231ps %xmm6, %xmm14, %xmm7 # xmm7 = (xmm14 * xmm6) + xmm7 vaddps %xmm18, %xmm4, %xmm4 vbroadcastss %xmm0, %xmm0 vmulps %xmm18, %xmm7, %xmm5 vfmadd213ps %xmm5, %xmm11, %xmm18 # xmm18 = (xmm11 * xmm18) + xmm5 vfmadd231ps %xmm0, %xmm12, %xmm18 # xmm18 = (xmm12 * xmm0) + xmm18 vbroadcastss %xmm2, %xmm0 vshufps $0x55, %xmm2, %xmm2, %xmm19 # xmm19 = xmm2[1,1,1,1] vfmadd213ps %xmm5, %xmm11, %xmm19 # xmm19 = (xmm11 * xmm19) + xmm5 vfmadd231ps %xmm0, %xmm12, %xmm19 # xmm19 = (xmm12 * xmm0) + xmm19 vbroadcastss %xmm3, %xmm0 vshufps $0x55, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[1,1,1,1] vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2] vmulps %xmm7, %xmm3, %xmm20 vfmadd231ps %xmm2, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm2) + xmm20 vfmadd231ps %xmm0, %xmm12, %xmm20 # xmm20 = (xmm12 * xmm0) + xmm20 vbroadcastss %xmm1, %xmm0 vshufps $0x55, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1] vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2] vmulps %xmm7, %xmm1, %xmm1 vfmadd231ps %xmm2, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm2) + xmm1 vfmadd231ps %xmm0, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm0) + xmm1 vaddps %xmm1, %xmm4, %xmm21 jmp 0x1ee2671 movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovsd 0x10(%rdi,%rsi), %xmm0 vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rdi,%rsi), %xmm1 vmovaps 0x7d601(%rip), %xmm2 # 0x1f60e90 vmovss (%rdi,%rsi), %xmm3 vmovss 0xc(%rdi,%rsi), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm1 vmovss 0x18(%rdi,%rsi), %xmm3 vmovsd 0x1c(%rdi,%rsi), %xmm5 vmovss 0x24(%rdi,%rsi), %xmm6 vmovss 0x28(%rdi,%rsi), %xmm7 vmovss 0x2c(%rdi,%rsi), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rdi,%rsi), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0x8e24(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0x8e20(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm3 vmulss %xmm3, %xmm6, %xmm6 vinsertps $0x30, %xmm6, %xmm5, %xmm15 # xmm15 = xmm5[0,1,2],xmm6[0] vmulss %xmm3, %xmm7, %xmm5 vinsertps $0x30, %xmm5, %xmm1, %xmm14 # xmm14 = xmm1[0,1,2],xmm5[0] vmulss %xmm3, %xmm8, %xmm1 vmulss %xmm3, %xmm2, %xmm2 vinsertps $0x30, %xmm2, %xmm0, %xmm16 # xmm16 = xmm0[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rdi,%rsi), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vinsertps $0x30, %xmm1, %xmm0, %xmm11 # xmm11 = xmm0[0,1,2],xmm1[0] incl %ecx imulq $0x38, %rcx, %rcx leaq (%rdx,%rcx), %rsi movl 0x20(%rdx,%rcx), %ecx cmpl $0x9243, %ecx # imm = 0x9243 jg 0x1ee39d8 cmpl $0x9134, %ecx # imm = 0x9134 je 0x1ee3e41 cmpl $0x9234, %ecx # imm = 0x9234 jne 0x1ee3f87 movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovsd 0x4(%rcx,%rax), %xmm0 vmovss (%rcx,%rax), %xmm17 vmovaps 0x7d4ef(%rip), %xmm1 # 0x1f60e80 vmovsd 0x10(%rcx,%rax), %xmm2 vmovss 0xc(%rcx,%rax), %xmm19 vpermt2ps %xmm0, %xmm1, %xmm17 vpermt2ps %xmm2, %xmm1, %xmm19 vmovsd 0x1c(%rcx,%rax), %xmm0 vmovss 0x18(%rcx,%rax), %xmm20 vpermt2ps %xmm0, %xmm1, %xmm20 vmovsd 0x28(%rcx,%rax), %xmm0 vmovss 0x24(%rcx,%rax), %xmm21 vpermt2ps %xmm0, %xmm1, %xmm21 jmp 0x1ee3f87 cmpl $0xb001, %ecx # imm = 0xB001 je 0x1ee3ea9 cmpl $0x9244, %ecx # imm = 0x9244 jne 0x1ee3f87 movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovaps (%rcx,%rax), %xmm17 vmovaps 0x10(%rcx,%rax), %xmm19 vmovaps 0x20(%rcx,%rax), %xmm20 vmovaps 0x30(%rcx,%rax), %xmm21 jmp 0x1ee3f87 movq (%rcx), %rdx imulq 0x10(%rcx), %rax vmovsd 0x10(%rdx,%rax), %xmm0 vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rdx,%rax), %xmm1 vmovaps 0x7d450(%rip), %xmm2 # 0x1f60e90 vmovss (%rdx,%rax), %xmm3 vmovss 0xc(%rdx,%rax), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm1 vmovss 0x18(%rdx,%rax), %xmm3 vmovsd 0x1c(%rdx,%rax), %xmm5 vmovss 0x24(%rdx,%rax), %xmm6 vmovss 0x28(%rdx,%rax), %xmm7 vmovss 0x2c(%rdx,%rax), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rdx,%rax), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0x8c73(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0x8c6f(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm9 vmulss %xmm6, %xmm9, %xmm3 vinsertps $0x30, %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[0,1,2],xmm3[0] vmulss %xmm7, %xmm9, %xmm5 vinsertps $0x30, %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm5[0] vmulss %xmm9, %xmm8, %xmm5 vmulss %xmm2, %xmm9, %xmm2 vinsertps $0x30, %xmm2, %xmm0, %xmm2 # xmm2 = xmm0[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rdx,%rax), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vinsertps $0x30, %xmm5, %xmm0, %xmm4 # xmm4 = xmm0[0,1,2],xmm5[0] vshufps $0xff, %xmm3, %xmm3, %xmm6 # xmm6 = xmm3[3,3,3,3] vshufps $0xff, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[3,3,3,3] vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3] vshufps $0xff, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[3,3,3,3] vxorps %xmm0, %xmm0, %xmm0 vshufps $0xe9, %xmm0, %xmm1, %xmm5 # xmm5 = xmm1[1,2],xmm0[2,3] vblendps $0x4, %xmm4, %xmm5, %xmm5 # xmm5 = xmm5[0,1],xmm4[2],xmm5[3] vmulss %xmm7, %xmm7, %xmm10 vmovaps %xmm6, %xmm11 vfmadd213ss %xmm10, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm11) + xmm10 vfnmadd231ss %xmm8, %xmm8, %xmm11 # xmm11 = -(xmm8 * xmm8) + xmm11 vfnmadd231ss %xmm9, %xmm9, %xmm11 # xmm11 = -(xmm9 * xmm9) + xmm11 vmulss %xmm6, %xmm9, %xmm12 vmovaps %xmm8, %xmm13 vfmadd213ss %xmm12, %xmm7, %xmm13 # xmm13 = (xmm7 * xmm13) + xmm12 vaddss %xmm13, %xmm13, %xmm13 vmulss %xmm6, %xmm8, %xmm14 vmovaps %xmm9, %xmm15 vfmsub213ss %xmm14, %xmm7, %xmm15 # xmm15 = (xmm7 * xmm15) - xmm14 vaddss %xmm15, %xmm15, %xmm15 vfmsub231ss %xmm8, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm8) - xmm12 vaddss %xmm12, %xmm12, %xmm12 vfmsub231ss %xmm6, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm6) - xmm10 vmovaps %xmm8, %xmm16 vfmadd213ss %xmm10, %xmm8, %xmm16 # xmm16 = (xmm8 * xmm16) + xmm10 vfnmadd231ss %xmm9, %xmm9, %xmm16 # xmm16 = -(xmm9 * xmm9) + xmm16 vmulss %xmm7, %xmm6, %xmm6 vmovaps %xmm9, %xmm17 vfmadd213ss %xmm6, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm6 vaddss %xmm17, %xmm17, %xmm17 vfmadd213ss %xmm14, %xmm9, %xmm7 # xmm7 = (xmm9 * xmm7) + xmm14 vaddss %xmm7, %xmm7, %xmm7 vfmsub231ss %xmm9, %xmm8, %xmm6 # xmm6 = (xmm8 * xmm9) - xmm6 vaddss %xmm6, %xmm6, %xmm6 vfnmadd231ss %xmm8, %xmm8, %xmm10 # xmm10 = -(xmm8 * xmm8) + xmm10 vfmadd231ss %xmm9, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm9) + xmm10 vbroadcastss %xmm11, %xmm8 vbroadcastss %xmm13, %xmm9 vmovaps 0x8b44(%rip), %xmm11 # 0x1eec700 vbroadcastss %xmm15, %xmm13 vmulps %xmm11, %xmm13, %xmm13 vmovsd 0x8b22(%rip), %xmm14 # 0x1eec6f0 vmovss 0x8b3e(%rip), %xmm15 # 0x1eec714 vfmadd231ps %xmm9, %xmm14, %xmm13 # xmm13 = (xmm14 * xmm9) + xmm13 vfmadd231ps %xmm8, %xmm15, %xmm13 # xmm13 = (xmm15 * xmm8) + xmm13 vbroadcastss %xmm12, %xmm8 vbroadcastss %xmm16, %xmm9 vbroadcastss %xmm17, %xmm12 vmulps %xmm11, %xmm12, %xmm12 vfmadd231ps %xmm9, %xmm14, %xmm12 # xmm12 = (xmm14 * xmm9) + xmm12 vfmadd231ps %xmm8, %xmm15, %xmm12 # xmm12 = (xmm15 * xmm8) + xmm12 vbroadcastss %xmm7, %xmm7 vbroadcastss %xmm6, %xmm6 vbroadcastss %xmm10, %xmm8 vmulps %xmm11, %xmm8, %xmm8 vfmadd231ps %xmm6, %xmm14, %xmm8 # xmm8 = (xmm14 * xmm6) + xmm8 vfmadd231ps %xmm7, %xmm15, %xmm8 # xmm8 = (xmm15 * xmm7) + xmm8 vaddps %xmm0, %xmm5, %xmm5 vbroadcastss %xmm1, %xmm1 vmulps %xmm0, %xmm8, %xmm6 vfmadd213ps %xmm6, %xmm12, %xmm0 # xmm0 = (xmm12 * xmm0) + xmm6 vfmadd231ps %xmm1, %xmm13, %xmm0 # xmm0 = (xmm13 * xmm1) + xmm0 vbroadcastss %xmm4, %xmm7 vshufps $0x55, %xmm4, %xmm4, %xmm1 # xmm1 = xmm4[1,1,1,1] vfmadd213ps %xmm6, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm1) + xmm6 vfmadd231ps %xmm7, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm7) + xmm1 vbroadcastss %xmm2, %xmm4 vshufps $0x55, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,1,1,1] vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2] vmulps %xmm2, %xmm8, %xmm2 vfmadd231ps %xmm6, %xmm12, %xmm2 # xmm2 = (xmm12 * xmm6) + xmm2 vfmadd231ps %xmm4, %xmm13, %xmm2 # xmm2 = (xmm13 * xmm4) + xmm2 vbroadcastss %xmm3, %xmm4 vshufps $0x55, %xmm3, %xmm3, %xmm6 # xmm6 = xmm3[1,1,1,1] vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2] vmulps %xmm3, %xmm8, %xmm3 vfmadd231ps %xmm6, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm6) + xmm3 vfmadd231ps %xmm4, %xmm13, %xmm3 # xmm3 = (xmm13 * xmm4) + xmm3 vaddps %xmm3, %xmm5, %xmm3 jmp 0x1ee2d5a movq (%rsi), %rdi movq 0x10(%rsi), %rsi imulq %rax, %rsi vmovsd 0x10(%rdi,%rsi), %xmm0 vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rdi,%rsi), %xmm1 vmovaps 0x7d1dd(%rip), %xmm2 # 0x1f60e90 vmovss (%rdi,%rsi), %xmm3 vmovss 0xc(%rdi,%rsi), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm1 vmovss 0x18(%rdi,%rsi), %xmm3 vmovsd 0x1c(%rdi,%rsi), %xmm5 vmovss 0x24(%rdi,%rsi), %xmm6 vmovss 0x28(%rdi,%rsi), %xmm7 vmovss 0x2c(%rdi,%rsi), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rdi,%rsi), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0x8a00(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0x89fc(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm3 vmulss %xmm3, %xmm6, %xmm6 vinsertps $0x30, %xmm6, %xmm5, %xmm15 # xmm15 = xmm5[0,1,2],xmm6[0] vmulss %xmm3, %xmm7, %xmm5 vinsertps $0x30, %xmm5, %xmm1, %xmm16 # xmm16 = xmm1[0,1,2],xmm5[0] vmulss %xmm3, %xmm8, %xmm1 vmulss %xmm3, %xmm2, %xmm2 vinsertps $0x30, %xmm2, %xmm0, %xmm17 # xmm17 = xmm0[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rdi,%rsi), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vinsertps $0x30, %xmm1, %xmm0, %xmm11 # xmm11 = xmm0[0,1,2],xmm1[0] incl %ecx imulq $0x38, %rcx, %rcx leaq (%rdx,%rcx), %rsi movl 0x20(%rdx,%rcx), %ecx cmpl $0x9243, %ecx # imm = 0x9243 jg 0x1ee3dfd cmpl $0x9134, %ecx # imm = 0x9134 je 0x1ee45e9 cmpl $0x9234, %ecx # imm = 0x9234 jne 0x1ee472f movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovsd 0x4(%rcx,%rax), %xmm0 vmovss (%rcx,%rax), %xmm18 vmovaps 0x7d0ca(%rip), %xmm1 # 0x1f60e80 vmovsd 0x10(%rcx,%rax), %xmm2 vmovss 0xc(%rcx,%rax), %xmm21 vpermt2ps %xmm0, %xmm1, %xmm18 vpermt2ps %xmm2, %xmm1, %xmm21 vmovsd 0x1c(%rcx,%rax), %xmm0 vmovss 0x18(%rcx,%rax), %xmm20 vpermt2ps %xmm0, %xmm1, %xmm20 vmovsd 0x28(%rcx,%rax), %xmm0 vmovss 0x24(%rcx,%rax), %xmm19 vpermt2ps %xmm0, %xmm1, %xmm19 jmp 0x1ee472f cmpl $0xb001, %ecx # imm = 0xB001 je 0x1ee4651 cmpl $0x9244, %ecx # imm = 0x9244 jne 0x1ee472f movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovaps (%rcx,%rax), %xmm18 vmovaps 0x10(%rcx,%rax), %xmm21 vmovaps 0x20(%rcx,%rax), %xmm20 vmovaps 0x30(%rcx,%rax), %xmm19 jmp 0x1ee472f movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovss (%rcx,%rax), %xmm0 vmovss 0x4(%rcx,%rax), %xmm1 vmovss 0x8(%rcx,%rax), %xmm2 vmovss 0xc(%rcx,%rax), %xmm3 vinsertps $0x1c, 0x10(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rcx,%rax), %xmm0, %xmm17 # xmm17 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rcx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rcx,%rax), %xmm0, %xmm19 # xmm19 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rcx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rcx,%rax), %xmm0, %xmm20 # xmm20 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rcx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rcx,%rax), %xmm0, %xmm21 # xmm21 = xmm0[0,1],mem[0],zero jmp 0x1ee3f87 movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovsd 0x10(%rcx,%rax), %xmm0 vinsertps $0x20, 0x8(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rcx,%rax), %xmm1 vmovaps 0x7cfc3(%rip), %xmm2 # 0x1f60e90 vmovss (%rcx,%rax), %xmm3 vmovss 0xc(%rcx,%rax), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm1 vmovss 0x18(%rcx,%rax), %xmm3 vmovsd 0x1c(%rcx,%rax), %xmm5 vmovss 0x24(%rcx,%rax), %xmm6 vmovss 0x28(%rcx,%rax), %xmm7 vmovss 0x2c(%rcx,%rax), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rcx,%rax), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0x87e6(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0x87e2(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm3 vmulss %xmm3, %xmm6, %xmm6 vinsertps $0x30, %xmm6, %xmm5, %xmm21 # xmm21 = xmm5[0,1,2],xmm6[0] vmulss %xmm3, %xmm7, %xmm5 vinsertps $0x30, %xmm5, %xmm1, %xmm17 # xmm17 = xmm1[0,1,2],xmm5[0] vmulss %xmm3, %xmm8, %xmm1 vmulss %xmm3, %xmm2, %xmm2 vinsertps $0x30, %xmm2, %xmm0, %xmm20 # xmm20 = xmm0[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rcx,%rax), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vinsertps $0x30, %xmm1, %xmm0, %xmm19 # xmm19 = xmm0[0,1,2],xmm1[0] vshufps $0xff, %xmm15, %xmm15, %xmm23 # xmm23 = xmm15[3,3,3,3] vshufps $0xff, %xmm14, %xmm14, %xmm22 # xmm22 = xmm14[3,3,3,3] vmovaps %xmm11, 0x70(%rsp) vshufps $0xff, %xmm11, %xmm11, %xmm12 # xmm12 = xmm11[3,3,3,3] vshufps $0xff, %xmm16, %xmm16, %xmm11 # xmm11 = xmm16[3,3,3,3] vshufps $0xff, %xmm21, %xmm21, %xmm27 # xmm27 = xmm21[3,3,3,3] vshufps $0xff, %xmm17, %xmm17, %xmm26 # xmm26 = xmm17[3,3,3,3] vshufps $0xff, %xmm19, %xmm19, %xmm25 # xmm25 = xmm19[3,3,3,3] vshufps $0xff, %xmm20, %xmm20, %xmm24 # xmm24 = xmm20[3,3,3,3] vmulss %xmm26, %xmm22, %xmm0 vfmadd231ss %xmm23, %xmm27, %xmm0 # xmm0 = (xmm27 * xmm23) + xmm0 vfmadd231ss %xmm12, %xmm25, %xmm0 # xmm0 = (xmm25 * xmm12) + xmm0 vfmadd231ss %xmm11, %xmm24, %xmm0 # xmm0 = (xmm24 * xmm11) + xmm0 vbroadcastss 0x3cedb(%rip), %xmm6 # 0x1f20ec0 vxorps %xmm6, %xmm0, %xmm1 vucomiss %xmm0, %xmm1 seta %al vxorps %xmm6, %xmm27, %xmm2 vxorps %xmm6, %xmm26, %xmm3 vxorps %xmm6, %xmm25, %xmm4 vxorps %xmm6, %xmm24, %xmm5 kmovd %eax, %k1 vmovss %xmm2, %xmm27, %xmm27 {%k1} vmovss %xmm3, %xmm26, %xmm26 {%k1} vmovss %xmm4, %xmm25, %xmm25 {%k1} vmovss %xmm5, %xmm24, %xmm24 {%k1} vandps 0x3ce96(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec4 vmovss 0xc94a(%rip), %xmm2 # 0x1ef0980 vfmadd213ss 0xc945(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vfmadd213ss 0xc940(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vfmadd213ss 0xc93b(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vfmadd213ss 0xc936(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vfmadd213ss 0xc931(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vmaxss %xmm0, %xmm1, %xmm28 vmovss 0x86a1(%rip), %xmm29 # 0x1eec714 vsubss %xmm3, %xmm29, %xmm0 vxorps %xmm4, %xmm4, %xmm4 vucomiss %xmm4, %xmm0 jb 0x1ee408c vsqrtss %xmm0, %xmm0, %xmm0 jmp 0x1ee41c8 vmovaps %xmm14, 0x60(%rsp) vmovaps %xmm15, 0x50(%rsp) vmovaps %xmm16, 0x40(%rsp) vmovaps %xmm17, 0x30(%rsp) vmovaps %xmm19, 0x20(%rsp) vmovaps %xmm20, 0x10(%rsp) vmovaps %xmm21, 0x140(%rsp) vmovaps %xmm11, 0x130(%rsp) vmovaps %xmm12, 0x120(%rsp) vmovaps %xmm22, 0x110(%rsp) vmovaps %xmm23, 0x100(%rsp) vmovaps %xmm24, 0xf0(%rsp) vmovaps %xmm25, 0xe0(%rsp) vmovaps %xmm26, 0xd0(%rsp) vmovaps %xmm27, 0xc0(%rsp) vmovss %xmm28, 0xb0(%rsp) vmovaps %xmm3, 0xa0(%rsp) vmovss %xmm2, 0x90(%rsp) callq 0x6aa20 vxorps %xmm4, %xmm4, %xmm4 vmovss 0x90(%rsp), %xmm2 vmovaps 0xa0(%rsp), %xmm3 vbroadcastss 0x3cd80(%rip), %xmm6 # 0x1f20ec0 vmovss 0x85ca(%rip), %xmm29 # 0x1eec714 vmovss 0xb0(%rsp), %xmm28 vmovaps 0xc0(%rsp), %xmm27 vmovaps 0xd0(%rsp), %xmm26 vmovaps 0xe0(%rsp), %xmm25 vmovaps 0xf0(%rsp), %xmm24 vmovaps 0x100(%rsp), %xmm23 vmovaps 0x110(%rsp), %xmm22 vmovaps 0x120(%rsp), %xmm12 vmovaps 0x130(%rsp), %xmm11 vmovaps 0x140(%rsp), %xmm21 vmovaps 0x10(%rsp), %xmm20 vmovaps 0x20(%rsp), %xmm19 vmovaps 0x30(%rsp), %xmm17 vmovaps 0x40(%rsp), %xmm16 vmovaps 0x50(%rsp), %xmm15 vmovaps 0x60(%rsp), %xmm14 vmulss %xmm2, %xmm0, %xmm1 vmovss 0xc7c4(%rip), %xmm0 # 0x1ef0998 vsubss %xmm1, %xmm0, %xmm1 vmaxss %xmm1, %xmm4, %xmm1 vxorps %xmm6, %xmm1, %xmm2 vcmpltss %xmm4, %xmm28, %k1 vmovss %xmm2, %xmm1, %xmm1 {%k1} vsubss %xmm1, %xmm0, %xmm1 vcmpltss %xmm3, %xmm29, %k1 vmovss 0xc79a(%rip), %xmm1 {%k1} # 0x1ef099c vmovaps 0x80(%rsp), %xmm13 vmulss %xmm1, %xmm13, %xmm1 vmulss 0xc789(%rip), %xmm1, %xmm2 # 0x1ef09a0 vroundss $0x9, %xmm2, %xmm2, %xmm2 vcvttss2si %xmm2, %eax vfnmadd213ss %xmm1, %xmm2, %xmm0 # xmm0 = -(xmm2 * xmm0) + xmm1 kmovd %eax, %k1 andl $0x3, %eax cmpl $0x2, %eax setae %cl decl %eax cmpl $0x2, %eax setb %al vmulss %xmm0, %xmm0, %xmm1 vmovss 0xc75d(%rip), %xmm2 # 0x1ef09a4 vfmadd213ss 0xc758(%rip), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem vmovss 0xc754(%rip), %xmm5 # 0x1ef09ac vfmadd213ss 0xc74f(%rip), %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + mem vfmadd213ss 0xc74a(%rip), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem vfmadd213ss 0xc745(%rip), %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + mem vfmadd213ss 0xc740(%rip), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem vfmadd213ss 0xc73b(%rip), %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + mem vfmadd213ss 0xc736(%rip), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem vmovss 0x8486(%rip), %xmm4 # 0x1eec71c vfmadd213ss %xmm4, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm4 vfmadd213ss %xmm29, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm29 vfmadd213ss %xmm29, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm29 vmulss %xmm2, %xmm0, %xmm0 vmovaps %xmm0, %xmm1 vmovss %xmm5, %xmm1, %xmm1 {%k1} vmovss %xmm0, %xmm5, %xmm5 {%k1} vxorps %xmm6, %xmm1, %xmm0 kmovd %ecx, %k1 vmovss %xmm0, %xmm1, %xmm1 {%k1} vxorps %xmm6, %xmm5, %xmm0 kmovd %eax, %k1 vmovss %xmm0, %xmm5, %xmm5 {%k1} vmovaps %xmm23, %xmm0 vfmsub213ss %xmm27, %xmm28, %xmm0 # xmm0 = (xmm28 * xmm0) - xmm27 vmovaps %xmm22, %xmm2 vfmsub213ss %xmm26, %xmm28, %xmm2 # xmm2 = (xmm28 * xmm2) - xmm26 vmovaps %xmm12, %xmm3 vfmsub213ss %xmm25, %xmm28, %xmm3 # xmm3 = (xmm28 * xmm3) - xmm25 vmovaps %xmm11, %xmm6 vfmsub213ss %xmm24, %xmm28, %xmm6 # xmm6 = (xmm28 * xmm6) - xmm24 vmulss %xmm2, %xmm2, %xmm7 vfmadd231ss %xmm0, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm0) + xmm7 vfmadd231ss %xmm3, %xmm3, %xmm7 # xmm7 = (xmm3 * xmm3) + xmm7 vfmadd231ss %xmm6, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm6) + xmm7 vxorps %xmm18, %xmm18, %xmm18 vmovss %xmm7, %xmm18, %xmm8 # xmm8 = xmm7[0],xmm18[1,2,3] vrsqrt14ss %xmm8, %xmm18, %xmm8 vmovss 0x83e8(%rip), %xmm9 # 0x1eec718 vmulss %xmm9, %xmm8, %xmm10 vmulss %xmm4, %xmm7, %xmm7 vmulss %xmm7, %xmm8, %xmm7 vmulss %xmm8, %xmm8, %xmm8 vmulss %xmm7, %xmm8, %xmm7 vaddss %xmm7, %xmm10, %xmm7 vmulss %xmm7, %xmm0, %xmm0 vmulss %xmm7, %xmm2, %xmm2 vmulss %xmm7, %xmm3, %xmm8 vmulss %xmm7, %xmm6, %xmm6 vmulss %xmm1, %xmm0, %xmm3 vfmsub231ss %xmm23, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm23) - xmm3 vmulss %xmm1, %xmm2, %xmm2 vfmsub231ss %xmm22, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm22) - xmm2 vmulss %xmm1, %xmm8, %xmm0 vfmsub231ss %xmm12, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm12) - xmm0 vmulss %xmm1, %xmm6, %xmm1 vfmsub231ss %xmm5, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm5) - xmm1 vsubss %xmm13, %xmm29, %xmm5 vmulss %xmm27, %xmm13, %xmm6 vfmadd231ss %xmm23, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm23) + xmm6 vmulss %xmm26, %xmm13, %xmm7 vfmadd231ss %xmm22, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm22) + xmm7 vmulss %xmm25, %xmm13, %xmm8 vfmadd231ss %xmm12, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm12) + xmm8 vmulss %xmm24, %xmm13, %xmm10 vfmadd231ss %xmm11, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm11) + xmm10 vmulss %xmm7, %xmm7, %xmm11 vfmadd231ss %xmm6, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm6) + xmm11 vfmadd231ss %xmm8, %xmm8, %xmm11 # xmm11 = (xmm8 * xmm8) + xmm11 vfmadd231ss %xmm10, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm10) + xmm11 vmovss %xmm11, %xmm18, %xmm12 # xmm12 = xmm11[0],xmm18[1,2,3] vrsqrt14ss %xmm12, %xmm18, %xmm12 vmulss %xmm9, %xmm12, %xmm9 vmulss %xmm4, %xmm11, %xmm4 vmulss %xmm4, %xmm12, %xmm4 vmulss %xmm12, %xmm12, %xmm11 vmulss %xmm4, %xmm11, %xmm4 vaddss %xmm4, %xmm9, %xmm4 vmulss %xmm4, %xmm6, %xmm6 vmulss %xmm4, %xmm7, %xmm7 vmulss %xmm4, %xmm8, %xmm8 vmulss %xmm4, %xmm10, %xmm4 vucomiss 0xc5c1(%rip), %xmm28 # 0x1ef09c8 seta %al kmovd %eax, %k1 vmovss %xmm6, %xmm3, %xmm3 {%k1} vmovss %xmm7, %xmm2, %xmm2 {%k1} vmovss %xmm8, %xmm0, %xmm0 {%k1} vmovss %xmm4, %xmm1, %xmm1 {%k1} vbroadcastss %xmm13, %xmm4 vmulps %xmm17, %xmm4, %xmm6 vbroadcastss %xmm5, %xmm5 vfmadd213ps %xmm6, %xmm5, %xmm14 # xmm14 = (xmm5 * xmm14) + xmm6 vmulps %xmm19, %xmm4, %xmm6 vmovaps 0x70(%rsp), %xmm13 vfmadd213ps %xmm6, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm13) + xmm6 vmulps %xmm20, %xmm4, %xmm6 vfmadd213ps %xmm6, %xmm5, %xmm16 # xmm16 = (xmm5 * xmm16) + xmm6 vmulps %xmm21, %xmm4, %xmm4 vfmadd213ps %xmm4, %xmm5, %xmm15 # xmm15 = (xmm5 * xmm15) + xmm4 vshufps $0xe9, %xmm18, %xmm14, %xmm4 # xmm4 = xmm14[1,2],xmm18[2,3] vblendps $0x4, %xmm13, %xmm4, %xmm4 # xmm4 = xmm4[0,1],xmm13[2],xmm4[3] vmulss %xmm2, %xmm2, %xmm5 vmovaps %xmm3, %xmm6 vfmadd213ss %xmm5, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm6) + xmm5 vfnmadd231ss %xmm0, %xmm0, %xmm6 # xmm6 = -(xmm0 * xmm0) + xmm6 vfnmadd231ss %xmm1, %xmm1, %xmm6 # xmm6 = -(xmm1 * xmm1) + xmm6 vmulss %xmm1, %xmm3, %xmm7 vmovaps %xmm0, %xmm8 vfmadd213ss %xmm7, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm7 vaddss %xmm8, %xmm8, %xmm8 vmulss %xmm0, %xmm3, %xmm9 vmovaps %xmm1, %xmm10 vfmsub213ss %xmm9, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm10) - xmm9 vaddss %xmm10, %xmm10, %xmm10 vfmsub231ss %xmm0, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm0) - xmm7 vaddss %xmm7, %xmm7, %xmm7 vfmsub231ss %xmm3, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm3) - xmm5 vmovaps %xmm0, %xmm11 vfmadd213ss %xmm5, %xmm0, %xmm11 # xmm11 = (xmm0 * xmm11) + xmm5 vfnmadd231ss %xmm1, %xmm1, %xmm11 # xmm11 = -(xmm1 * xmm1) + xmm11 vmulss %xmm2, %xmm3, %xmm3 vmovaps %xmm1, %xmm12 vfmadd213ss %xmm3, %xmm0, %xmm12 # xmm12 = (xmm0 * xmm12) + xmm3 vaddss %xmm12, %xmm12, %xmm12 vfmadd213ss %xmm9, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm9 vaddss %xmm2, %xmm2, %xmm2 vfmsub231ss %xmm1, %xmm0, %xmm3 # xmm3 = (xmm0 * xmm1) - xmm3 vaddss %xmm3, %xmm3, %xmm3 vfnmadd231ss %xmm0, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm0) + xmm5 vfmadd231ss %xmm1, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm1) + xmm5 vbroadcastss %xmm6, %xmm0 vbroadcastss %xmm8, %xmm1 vmovaps 0x81f9(%rip), %xmm6 # 0x1eec700 vbroadcastss %xmm10, %xmm8 vmulps %xmm6, %xmm8, %xmm8 vmovsd 0x81d8(%rip), %xmm9 # 0x1eec6f0 vmovss 0x81f4(%rip), %xmm10 # 0x1eec714 vfmadd231ps %xmm1, %xmm9, %xmm8 # xmm8 = (xmm9 * xmm1) + xmm8 vfmadd231ps %xmm0, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm0) + xmm8 vbroadcastss %xmm7, %xmm0 vbroadcastss %xmm11, %xmm1 vbroadcastss %xmm12, %xmm7 vmulps %xmm6, %xmm7, %xmm7 vfmadd231ps %xmm1, %xmm9, %xmm7 # xmm7 = (xmm9 * xmm1) + xmm7 vfmadd231ps %xmm0, %xmm10, %xmm7 # xmm7 = (xmm10 * xmm0) + xmm7 vbroadcastss %xmm2, %xmm0 vbroadcastss %xmm3, %xmm1 vbroadcastss %xmm5, %xmm2 vmulps %xmm6, %xmm2, %xmm2 vfmadd231ps %xmm1, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm1) + xmm2 vfmadd231ps %xmm0, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm0) + xmm2 vaddps %xmm18, %xmm4, %xmm0 vbroadcastss %xmm14, %xmm1 vmulps %xmm18, %xmm2, %xmm3 vfmadd213ps %xmm3, %xmm7, %xmm18 # xmm18 = (xmm7 * xmm18) + xmm3 vfmadd231ps %xmm1, %xmm8, %xmm18 # xmm18 = (xmm8 * xmm1) + xmm18 vbroadcastss %xmm13, %xmm1 vshufps $0x55, %xmm13, %xmm13, %xmm19 # xmm19 = xmm13[1,1,1,1] vfmadd213ps %xmm3, %xmm7, %xmm19 # xmm19 = (xmm7 * xmm19) + xmm3 vfmadd231ps %xmm1, %xmm8, %xmm19 # xmm19 = (xmm8 * xmm1) + xmm19 vbroadcastss %xmm16, %xmm1 vshufps $0x55, %xmm16, %xmm16, %xmm3 # xmm3 = xmm16[1,1,1,1] vshufps $0xaa, %xmm16, %xmm16, %xmm4 # xmm4 = xmm16[2,2,2,2] vmulps %xmm2, %xmm4, %xmm20 vfmadd231ps %xmm3, %xmm7, %xmm20 # xmm20 = (xmm7 * xmm3) + xmm20 vfmadd231ps %xmm1, %xmm8, %xmm20 # xmm20 = (xmm8 * xmm1) + xmm20 vbroadcastss %xmm15, %xmm1 vshufps $0x55, %xmm15, %xmm15, %xmm3 # xmm3 = xmm15[1,1,1,1] vshufps $0xaa, %xmm15, %xmm15, %xmm4 # xmm4 = xmm15[2,2,2,2] vmulps %xmm2, %xmm4, %xmm2 vfmadd231ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm3) + xmm2 vfmadd231ps %xmm1, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm1) + xmm2 vaddps %xmm2, %xmm0, %xmm21 jmp 0x1ee2671 movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovss (%rcx,%rax), %xmm0 vmovss 0x4(%rcx,%rax), %xmm1 vmovss 0x8(%rcx,%rax), %xmm2 vmovss 0xc(%rcx,%rax), %xmm3 vinsertps $0x1c, 0x10(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero vinsertps $0x28, 0x20(%rcx,%rax), %xmm0, %xmm18 # xmm18 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x14(%rcx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero vinsertps $0x28, 0x24(%rcx,%rax), %xmm0, %xmm21 # xmm21 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x18(%rcx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero vinsertps $0x28, 0x28(%rcx,%rax), %xmm0, %xmm20 # xmm20 = xmm0[0,1],mem[0],zero vinsertps $0x1c, 0x1c(%rcx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero vinsertps $0x28, 0x2c(%rcx,%rax), %xmm0, %xmm19 # xmm19 = xmm0[0,1],mem[0],zero jmp 0x1ee472f movq (%rsi), %rcx imulq 0x10(%rsi), %rax vmovsd 0x10(%rcx,%rax), %xmm0 vinsertps $0x20, 0x8(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vmovsd 0x34(%rcx,%rax), %xmm1 vmovaps 0x7c81b(%rip), %xmm2 # 0x1f60e90 vmovss (%rcx,%rax), %xmm3 vmovss 0xc(%rcx,%rax), %xmm4 vpermt2ps %xmm3, %xmm2, %xmm1 vmovss 0x18(%rcx,%rax), %xmm3 vmovsd 0x1c(%rcx,%rax), %xmm5 vmovss 0x24(%rcx,%rax), %xmm6 vmovss 0x28(%rcx,%rax), %xmm7 vmovss 0x2c(%rcx,%rax), %xmm8 vpermt2ps %xmm3, %xmm2, %xmm5 vmovss 0x30(%rcx,%rax), %xmm2 vmulss %xmm7, %xmm7, %xmm3 vfmadd231ss %xmm6, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm6) + xmm3 vfmadd231ss %xmm8, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm8) + xmm3 vfmadd231ss %xmm2, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm2) + xmm3 vxorps %xmm9, %xmm9, %xmm9 vmovss %xmm3, %xmm9, %xmm10 # xmm10 = xmm3[0],xmm9[1,2,3] vrsqrt14ss %xmm10, %xmm9, %xmm9 vmulss 0x803e(%rip), %xmm9, %xmm10 # 0x1eec718 vmulss 0x803a(%rip), %xmm3, %xmm3 # 0x1eec71c vmulss %xmm3, %xmm9, %xmm3 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm3, %xmm9, %xmm3 vaddss %xmm3, %xmm10, %xmm3 vmulss %xmm3, %xmm6, %xmm6 vinsertps $0x30, %xmm6, %xmm5, %xmm19 # xmm19 = xmm5[0,1,2],xmm6[0] vmulss %xmm3, %xmm7, %xmm5 vinsertps $0x30, %xmm5, %xmm1, %xmm18 # xmm18 = xmm1[0,1,2],xmm5[0] vmulss %xmm3, %xmm8, %xmm1 vmulss %xmm3, %xmm2, %xmm2 vinsertps $0x30, %xmm2, %xmm0, %xmm20 # xmm20 = xmm0[0,1,2],xmm2[0] vinsertps $0x10, 0x4(%rcx,%rax), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],xmm4[2,3] vinsertps $0x20, 0x3c(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3] vinsertps $0x30, %xmm1, %xmm0, %xmm21 # xmm21 = xmm0[0,1,2],xmm1[0] vshufps $0xff, %xmm15, %xmm15, %xmm23 # xmm23 = xmm15[3,3,3,3] vshufps $0xff, %xmm16, %xmm16, %xmm22 # xmm22 = xmm16[3,3,3,3] vmovaps %xmm11, 0x30(%rsp) vshufps $0xff, %xmm11, %xmm11, %xmm13 # xmm13 = xmm11[3,3,3,3] vshufps $0xff, %xmm17, %xmm17, %xmm12 # xmm12 = xmm17[3,3,3,3] vshufps $0xff, %xmm19, %xmm19, %xmm27 # xmm27 = xmm19[3,3,3,3] vshufps $0xff, %xmm18, %xmm18, %xmm26 # xmm26 = xmm18[3,3,3,3] vshufps $0xff, %xmm21, %xmm21, %xmm25 # xmm25 = xmm21[3,3,3,3] vshufps $0xff, %xmm20, %xmm20, %xmm24 # xmm24 = xmm20[3,3,3,3] vmulss %xmm26, %xmm22, %xmm0 vfmadd231ss %xmm23, %xmm27, %xmm0 # xmm0 = (xmm27 * xmm23) + xmm0 vfmadd231ss %xmm13, %xmm25, %xmm0 # xmm0 = (xmm25 * xmm13) + xmm0 vfmadd231ss %xmm12, %xmm24, %xmm0 # xmm0 = (xmm24 * xmm12) + xmm0 vbroadcastss 0x3c733(%rip), %xmm7 # 0x1f20ec0 vxorps %xmm7, %xmm0, %xmm1 vucomiss %xmm0, %xmm1 seta %al vxorps %xmm7, %xmm27, %xmm2 vxorps %xmm7, %xmm26, %xmm3 vxorps %xmm7, %xmm25, %xmm4 vxorps %xmm7, %xmm24, %xmm5 kmovd %eax, %k1 vmovss %xmm2, %xmm27, %xmm27 {%k1} vmovss %xmm3, %xmm26, %xmm26 {%k1} vmovss %xmm4, %xmm25, %xmm25 {%k1} vmovss %xmm5, %xmm24, %xmm24 {%k1} vandps 0x3c6ee(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec4 vmovss 0xc1a2(%rip), %xmm2 # 0x1ef0980 vfmadd213ss 0xc19d(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vfmadd213ss 0xc198(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vfmadd213ss 0xc193(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vfmadd213ss 0xc18e(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vfmadd213ss 0xc189(%rip), %xmm3, %xmm2 # xmm2 = (xmm3 * xmm2) + mem vmaxss %xmm0, %xmm1, %xmm28 vmovss 0x7ef9(%rip), %xmm29 # 0x1eec714 vsubss %xmm3, %xmm29, %xmm0 vxorps %xmm4, %xmm4, %xmm4 vucomiss %xmm4, %xmm0 jb 0x1ee4834 vsqrtss %xmm0, %xmm0, %xmm0 jmp 0x1ee496e vmovaps %xmm15, 0x20(%rsp) vmovaps %xmm16, 0x10(%rsp) vmovaps %xmm17, 0x140(%rsp) vmovaps %xmm18, 0x130(%rsp) vmovaps %xmm19, 0x120(%rsp) vmovaps %xmm20, 0x110(%rsp) vmovaps %xmm21, 0x100(%rsp) vmovaps %xmm12, 0xf0(%rsp) vmovaps %xmm13, 0xe0(%rsp) vmovaps %xmm22, 0xd0(%rsp) vmovaps %xmm23, 0xc0(%rsp) vmovaps %xmm24, 0xb0(%rsp) vmovaps %xmm25, 0xa0(%rsp) vmovaps %xmm26, 0x90(%rsp) vmovaps %xmm27, 0x180(%rsp) vmovss %xmm28, 0xc(%rsp) vmovaps %xmm3, 0x170(%rsp) vmovss %xmm2, 0x8(%rsp) callq 0x6aa20 vxorps %xmm4, %xmm4, %xmm4 vmovss 0x8(%rsp), %xmm2 vmovaps 0x170(%rsp), %xmm3 vbroadcastss 0x3c5dc(%rip), %xmm7 # 0x1f20ec0 vmovss 0x7e26(%rip), %xmm29 # 0x1eec714 vmovss 0xc(%rsp), %xmm28 vmovaps 0x180(%rsp), %xmm27 vmovaps 0x90(%rsp), %xmm26 vmovaps 0xa0(%rsp), %xmm25 vmovaps 0xb0(%rsp), %xmm24 vmovaps 0xc0(%rsp), %xmm23 vmovaps 0xd0(%rsp), %xmm22 vmovaps 0xe0(%rsp), %xmm13 vmovaps 0xf0(%rsp), %xmm12 vmovaps 0x100(%rsp), %xmm21 vmovaps 0x110(%rsp), %xmm20 vmovaps 0x120(%rsp), %xmm19 vmovaps 0x130(%rsp), %xmm18 vmovaps 0x140(%rsp), %xmm17 vmovaps 0x10(%rsp), %xmm16 vmovaps 0x20(%rsp), %xmm15 vmulss %xmm2, %xmm0, %xmm1 vmovss 0xc01e(%rip), %xmm0 # 0x1ef0998 vsubss %xmm1, %xmm0, %xmm1 vmaxss %xmm1, %xmm4, %xmm1 vxorps %xmm7, %xmm1, %xmm2 vcmpltss %xmm4, %xmm28, %k1 vmovss %xmm2, %xmm1, %xmm1 {%k1} vsubss %xmm1, %xmm0, %xmm1 vcmpltss %xmm3, %xmm29, %k1 vmovss 0xbff4(%rip), %xmm1 {%k1} # 0x1ef099c vmovaps 0x40(%rsp), %xmm14 vmulss %xmm1, %xmm14, %xmm1 vmulss 0xbfe6(%rip), %xmm1, %xmm2 # 0x1ef09a0 vroundss $0x9, %xmm2, %xmm2, %xmm2 vcvttss2si %xmm2, %eax vfnmadd213ss %xmm1, %xmm2, %xmm0 # xmm0 = -(xmm2 * xmm0) + xmm1 kmovd %eax, %k1 andl $0x3, %eax cmpl $0x2, %eax setae %cl decl %eax cmpl $0x2, %eax setb %al vmulss %xmm0, %xmm0, %xmm1 vmovss 0xbfba(%rip), %xmm2 # 0x1ef09a4 vfmadd213ss 0xbfb5(%rip), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem vmovss 0xbfb1(%rip), %xmm6 # 0x1ef09ac vfmadd213ss 0xbfac(%rip), %xmm1, %xmm6 # xmm6 = (xmm1 * xmm6) + mem vfmadd213ss 0xbfa7(%rip), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem vfmadd213ss 0xbfa2(%rip), %xmm1, %xmm6 # xmm6 = (xmm1 * xmm6) + mem vfmadd213ss 0xbf9d(%rip), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem vfmadd213ss 0xbf98(%rip), %xmm1, %xmm6 # xmm6 = (xmm1 * xmm6) + mem vfmadd213ss 0xbf93(%rip), %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem vmovss 0x7ce3(%rip), %xmm5 # 0x1eec71c vfmadd213ss %xmm5, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm6) + xmm5 vfmadd213ss %xmm29, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + xmm29 vfmadd213ss %xmm29, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm6) + xmm29 vmulss %xmm2, %xmm0, %xmm0 vmovaps %xmm0, %xmm2 vmovss %xmm6, %xmm2, %xmm2 {%k1} vmovss %xmm0, %xmm6, %xmm6 {%k1} vxorps %xmm7, %xmm2, %xmm0 kmovd %ecx, %k1 vmovss %xmm0, %xmm2, %xmm2 {%k1} vxorps %xmm7, %xmm6, %xmm0 kmovd %eax, %k1 vmovss %xmm0, %xmm6, %xmm6 {%k1} vmovaps %xmm23, %xmm1 vfmsub213ss %xmm27, %xmm28, %xmm1 # xmm1 = (xmm28 * xmm1) - xmm27 vmovaps %xmm22, %xmm3 vfmsub213ss %xmm26, %xmm28, %xmm3 # xmm3 = (xmm28 * xmm3) - xmm26 vmovaps %xmm13, %xmm4 vfmsub213ss %xmm25, %xmm28, %xmm4 # xmm4 = (xmm28 * xmm4) - xmm25 vmovaps %xmm12, %xmm7 vfmsub213ss %xmm24, %xmm28, %xmm7 # xmm7 = (xmm28 * xmm7) - xmm24 vmulss %xmm3, %xmm3, %xmm8 vfmadd231ss %xmm1, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm1) + xmm8 vfmadd231ss %xmm4, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm4) + xmm8 vfmadd231ss %xmm7, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm7) + xmm8 vxorps %xmm0, %xmm0, %xmm0 vmovss %xmm8, %xmm0, %xmm9 # xmm9 = xmm8[0],xmm0[1,2,3] vrsqrt14ss %xmm9, %xmm0, %xmm9 vmovss 0x7c48(%rip), %xmm10 # 0x1eec718 vmulss %xmm10, %xmm9, %xmm11 vmulss %xmm5, %xmm8, %xmm8 vmulss %xmm8, %xmm9, %xmm8 vmulss %xmm9, %xmm9, %xmm9 vmulss %xmm8, %xmm9, %xmm8 vaddss %xmm8, %xmm11, %xmm8 vmulss %xmm1, %xmm8, %xmm1 vmulss %xmm3, %xmm8, %xmm3 vmulss %xmm4, %xmm8, %xmm9 vmulss %xmm7, %xmm8, %xmm7 vmulss %xmm2, %xmm1, %xmm4 vfmsub231ss %xmm23, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm23) - xmm4 vmulss %xmm2, %xmm3, %xmm3 vfmsub231ss %xmm22, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm22) - xmm3 vmulss %xmm2, %xmm9, %xmm1 vfmsub231ss %xmm13, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm13) - xmm1 vmulss %xmm2, %xmm7, %xmm2 vfmsub231ss %xmm6, %xmm12, %xmm2 # xmm2 = (xmm12 * xmm6) - xmm2 vsubss %xmm14, %xmm29, %xmm6 vmulss %xmm27, %xmm14, %xmm7 vfmadd231ss %xmm23, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm23) + xmm7 vmulss %xmm26, %xmm14, %xmm8 vfmadd231ss %xmm22, %xmm6, %xmm8 # xmm8 = (xmm6 * xmm22) + xmm8 vmulss %xmm25, %xmm14, %xmm9 vfmadd231ss %xmm13, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm13) + xmm9 vmulss %xmm24, %xmm14, %xmm11 vfmadd231ss %xmm12, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm12) + xmm11 vmulss %xmm8, %xmm8, %xmm12 vfmadd231ss %xmm7, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm7) + xmm12 vfmadd231ss %xmm9, %xmm9, %xmm12 # xmm12 = (xmm9 * xmm9) + xmm12 vfmadd231ss %xmm11, %xmm11, %xmm12 # xmm12 = (xmm11 * xmm11) + xmm12 vmovss %xmm12, %xmm0, %xmm13 # xmm13 = xmm12[0],xmm0[1,2,3] vrsqrt14ss %xmm13, %xmm0, %xmm13 vmulss %xmm10, %xmm13, %xmm10 vmulss %xmm5, %xmm12, %xmm5 vmulss %xmm5, %xmm13, %xmm5 vmulss %xmm13, %xmm13, %xmm12 vmulss %xmm5, %xmm12, %xmm5 vaddss %xmm5, %xmm10, %xmm5 vmulss %xmm5, %xmm7, %xmm7 vmulss %xmm5, %xmm8, %xmm8 vmulss %xmm5, %xmm9, %xmm9 vmulss %xmm5, %xmm11, %xmm5 vucomiss 0xbe1e(%rip), %xmm28 # 0x1ef09c8 seta %al kmovd %eax, %k1 vmovss %xmm7, %xmm4, %xmm4 {%k1} vmovss %xmm8, %xmm3, %xmm3 {%k1} vmovss %xmm9, %xmm1, %xmm1 {%k1} vmovss %xmm5, %xmm2, %xmm2 {%k1} vbroadcastss %xmm14, %xmm5 vmulps %xmm18, %xmm5, %xmm7 vbroadcastss %xmm6, %xmm6 vfmadd213ps %xmm7, %xmm6, %xmm16 # xmm16 = (xmm6 * xmm16) + xmm7 vmulps %xmm21, %xmm5, %xmm7 vmovaps 0x30(%rsp), %xmm14 vfmadd213ps %xmm7, %xmm6, %xmm14 # xmm14 = (xmm6 * xmm14) + xmm7 vmulps %xmm20, %xmm5, %xmm7 vfmadd213ps %xmm7, %xmm6, %xmm17 # xmm17 = (xmm6 * xmm17) + xmm7 vmulps %xmm19, %xmm5, %xmm5 vfmadd213ps %xmm5, %xmm6, %xmm15 # xmm15 = (xmm6 * xmm15) + xmm5 vshufps $0xe9, %xmm0, %xmm16, %xmm5 # xmm5 = xmm16[1,2],xmm0[2,3] vblendps $0x4, %xmm14, %xmm5, %xmm5 # xmm5 = xmm5[0,1],xmm14[2],xmm5[3] vmulss %xmm3, %xmm3, %xmm6 vmovaps %xmm4, %xmm7 vfmadd213ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm7) + xmm6 vfnmadd231ss %xmm1, %xmm1, %xmm7 # xmm7 = -(xmm1 * xmm1) + xmm7 vfnmadd231ss %xmm2, %xmm2, %xmm7 # xmm7 = -(xmm2 * xmm2) + xmm7 vmulss %xmm2, %xmm4, %xmm8 vmovaps %xmm1, %xmm9 vfmadd213ss %xmm8, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm9) + xmm8 vaddss %xmm9, %xmm9, %xmm9 vmulss %xmm1, %xmm4, %xmm10 vmovaps %xmm2, %xmm11 vfmsub213ss %xmm10, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm11) - xmm10 vaddss %xmm11, %xmm11, %xmm11 vfmsub231ss %xmm1, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm1) - xmm8 vaddss %xmm8, %xmm8, %xmm8 vfmsub231ss %xmm4, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm4) - xmm6 vmovaps %xmm1, %xmm12 vfmadd213ss %xmm6, %xmm1, %xmm12 # xmm12 = (xmm1 * xmm12) + xmm6 vfnmadd231ss %xmm2, %xmm2, %xmm12 # xmm12 = -(xmm2 * xmm2) + xmm12 vmulss %xmm3, %xmm4, %xmm4 vmovaps %xmm2, %xmm13 vfmadd213ss %xmm4, %xmm1, %xmm13 # xmm13 = (xmm1 * xmm13) + xmm4 vaddss %xmm13, %xmm13, %xmm13 vfmadd213ss %xmm10, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm10 vaddss %xmm3, %xmm3, %xmm3 vfmsub231ss %xmm2, %xmm1, %xmm4 # xmm4 = (xmm1 * xmm2) - xmm4 vaddss %xmm4, %xmm4, %xmm4 vfnmadd231ss %xmm1, %xmm1, %xmm6 # xmm6 = -(xmm1 * xmm1) + xmm6 vfmadd231ss %xmm2, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm2) + xmm6 vbroadcastss %xmm7, %xmm1 vbroadcastss %xmm9, %xmm2 vmovaps 0x7a54(%rip), %xmm7 # 0x1eec700 vbroadcastss %xmm11, %xmm9 vmulps %xmm7, %xmm9, %xmm9 vmovsd 0x7a33(%rip), %xmm10 # 0x1eec6f0 vmovss 0x7a4f(%rip), %xmm11 # 0x1eec714 vfmadd231ps %xmm2, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm2) + xmm9 vfmadd231ps %xmm1, %xmm11, %xmm9 # xmm9 = (xmm11 * xmm1) + xmm9 vbroadcastss %xmm8, %xmm1 vbroadcastss %xmm12, %xmm2 vbroadcastss %xmm13, %xmm8 vmulps %xmm7, %xmm8, %xmm8 vfmadd231ps %xmm2, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm2) + xmm8 vfmadd231ps %xmm1, %xmm11, %xmm8 # xmm8 = (xmm11 * xmm1) + xmm8 vbroadcastss %xmm3, %xmm1 vbroadcastss %xmm4, %xmm2 vbroadcastss %xmm6, %xmm3 vmulps %xmm7, %xmm3, %xmm3 vfmadd231ps %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3 vfmadd231ps %xmm1, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm1) + xmm3 vaddps %xmm0, %xmm5, %xmm4 vbroadcastss %xmm16, %xmm1 vmulps %xmm0, %xmm3, %xmm2 vfmadd213ps %xmm2, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + xmm2 vfmadd231ps %xmm1, %xmm9, %xmm0 # xmm0 = (xmm9 * xmm1) + xmm0 vbroadcastss %xmm14, %xmm5 vshufps $0x55, %xmm14, %xmm14, %xmm1 # xmm1 = xmm14[1,1,1,1] vfmadd213ps %xmm2, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm2 vfmadd231ps %xmm5, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm5) + xmm1 vbroadcastss %xmm17, %xmm5 vshufps $0x55, %xmm17, %xmm17, %xmm6 # xmm6 = xmm17[1,1,1,1] vshufps $0xaa, %xmm17, %xmm17, %xmm2 # xmm2 = xmm17[2,2,2,2] vmulps %xmm3, %xmm2, %xmm2 vfmadd231ps %xmm6, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm6) + xmm2 vfmadd231ps %xmm5, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm5) + xmm2 vbroadcastss %xmm15, %xmm5 vshufps $0x55, %xmm15, %xmm15, %xmm6 # xmm6 = xmm15[1,1,1,1] vshufps $0xaa, %xmm15, %xmm15, %xmm7 # xmm7 = xmm15[2,2,2,2] vmulps %xmm3, %xmm7, %xmm3 vfmadd231ps %xmm6, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm6) + xmm3 vfmadd231ps %xmm5, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm5) + xmm3 vaddps %xmm3, %xmm4, %xmm3 vmovaps 0x80(%rsp), %xmm18 vmovaps 0x70(%rsp), %xmm19 vmovaps 0x60(%rsp), %xmm20 vmovaps 0x50(%rsp), %xmm21 jmp 0x1ee2d5a
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
embree::getPlatformName[abi:cxx11]()
std::string getPlatformName() { #if defined(__ANDROID__) && !defined(__64BIT__) return "Android (32bit)"; #elif defined(__ANDROID__) && defined(__64BIT__) return "Android (64bit)"; #elif defined(__LINUX__) && !defined(__64BIT__) return "Linux (32bit)"; #elif defined(__LINUX__) && defined(__64BIT__) return "Linux (64bit)"; #elif defined(__FREEBSD__) && !defined(__64BIT__) return "FreeBSD (32bit)"; #elif defined(__FREEBSD__) && defined(__64BIT__) return "FreeBSD (64bit)"; #elif defined(__CYGWIN__) && !defined(__64BIT__) return "Cygwin (32bit)"; #elif defined(__CYGWIN__) && defined(__64BIT__) return "Cygwin (64bit)"; #elif defined(__WIN32__) && !defined(__64BIT__) return "Windows (32bit)"; #elif defined(__WIN32__) && defined(__64BIT__) return "Windows (64bit)"; #elif defined(__MACOSX__) && !defined(__64BIT__) return "Mac OS X (32bit)"; #elif defined(__MACOSX__) && defined(__64BIT__) return "Mac OS X (64bit)"; #elif defined(__UNIX__) && !defined(__64BIT__) return "Unix (32bit)"; #elif defined(__UNIX__) && defined(__64BIT__) return "Unix (64bit)"; #else return "Unknown"; #endif }
pushq %rbx movq %rdi, %rbx leaq 0x10(%rdi), %rax movq %rax, (%rdi) leaq 0x7fa76(%rip), %rsi # 0x1f64ac4 leaq 0x7fa7c(%rip), %rdx # 0x1f64ad1 callq 0x8d7230 movq %rbx, %rax popq %rbx retq
/embree[P]embree/common/sys/sysinfo.cpp
embree::getCompilerName[abi:cxx11]()
std::string getCompilerName() { #if defined(__INTEL_COMPILER) int icc_mayor = __INTEL_COMPILER / 100 % 100; int icc_minor = __INTEL_COMPILER % 100; std::string version = "Intel Compiler "; version += toString(icc_mayor); version += "." + toString(icc_minor); #if defined(__INTEL_COMPILER_UPDATE) version += "." + toString(__INTEL_COMPILER_UPDATE); #endif return version; #elif defined(__clang__) return "CLANG " __clang_version__; #elif defined (__GNUC__) return "GCC " __VERSION__; #elif defined(_MSC_VER) std::string version = toString(_MSC_FULL_VER); version.insert(4,"."); version.insert(9,"."); version.insert(2,"."); return "Visual C++ Compiler " + version; #else return "Unknown Compiler"; #endif }
pushq %rbx movq %rdi, %rbx leaq 0x10(%rdi), %rax movq %rax, (%rdi) leaq 0x7fa61(%rip), %rsi # 0x1f64ad2 leaq 0x7fabf(%rip), %rdx # 0x1f64b37 callq 0x8d7230 movq %rbx, %rax popq %rbx retq
/embree[P]embree/common/sys/sysinfo.cpp
embree::getCPUVendor[abi:cxx11]()
std::string getCPUVendor() { #if defined(__X86_ASM__) int cpuinfo[4]; __cpuid (cpuinfo, 0); int name[4]; name[0] = cpuinfo[1]; name[1] = cpuinfo[3]; name[2] = cpuinfo[2]; name[3] = 0; return (char*)name; #elif defined(__ARM_NEON) return "ARM"; #else return "Unknown"; #endif }
pushq %r15 pushq %r14 pushq %rbx subq $0x10, %rsp movq %rdi, %r14 xorl %eax, %eax cpuid movq %rsp, %r15 movl %ebx, (%r15) movl %edx, 0x4(%r15) movl %ecx, 0x8(%r15) movl $0x0, 0xc(%r15) leaq 0x10(%rdi), %rax movq %rax, (%rdi) movq %r15, %rdi callq 0x6a3d0 leaq (%rsp,%rax), %rdx movq %r14, %rdi movq %r15, %rsi callq 0x8d7230 movq %r14, %rax addq $0x10, %rsp popq %rbx popq %r14 popq %r15 retq
/embree[P]embree/common/sys/sysinfo.cpp
embree::getCPUModel()
CPU getCPUModel() { #if defined(__X86_ASM__) if (getCPUVendor() != "GenuineIntel") return CPU::UNKNOWN; int out[4]; __cpuid(out, 0); if (out[0] < 1) return CPU::UNKNOWN; __cpuid(out, 1); /* please see CPUID documentation for these formulas */ uint32_t family_ID = (out[0] >> 8) & 0x0F; uint32_t extended_family_ID = (out[0] >> 20) & 0xFF; uint32_t model_ID = (out[0] >> 4) & 0x0F; uint32_t extended_model_ID = (out[0] >> 16) & 0x0F; uint32_t DisplayFamily = family_ID; if (family_ID == 0x0F) DisplayFamily += extended_family_ID; uint32_t DisplayModel = model_ID; if (family_ID == 0x06 || family_ID == 0x0F) DisplayModel += extended_model_ID << 4; uint32_t DisplayFamily_DisplayModel = (DisplayFamily << 8) + (DisplayModel << 0); // Data from Intel® 64 and IA-32 Architectures, Volume 4, Chapter 2, Table 2-1 (CPUID Signature Values of DisplayFamily_DisplayModel) if (DisplayFamily_DisplayModel == 0x067D) return CPU::CORE_ICE_LAKE; if (DisplayFamily_DisplayModel == 0x067E) return CPU::CORE_ICE_LAKE; if (DisplayFamily_DisplayModel == 0x068C) return CPU::CORE_TIGER_LAKE; if (DisplayFamily_DisplayModel == 0x06A5) return CPU::CORE_COMET_LAKE; if (DisplayFamily_DisplayModel == 0x06A6) return CPU::CORE_COMET_LAKE; if (DisplayFamily_DisplayModel == 0x0666) return CPU::CORE_CANNON_LAKE; if (DisplayFamily_DisplayModel == 0x068E) return CPU::CORE_KABY_LAKE; if (DisplayFamily_DisplayModel == 0x069E) return CPU::CORE_KABY_LAKE; if (DisplayFamily_DisplayModel == 0x066A) return CPU::XEON_ICE_LAKE; if (DisplayFamily_DisplayModel == 0x066C) return CPU::XEON_ICE_LAKE; if (DisplayFamily_DisplayModel == 0x0655) return CPU::XEON_SKY_LAKE; if (DisplayFamily_DisplayModel == 0x064E) return CPU::CORE_SKY_LAKE; if (DisplayFamily_DisplayModel == 0x065E) return CPU::CORE_SKY_LAKE; if (DisplayFamily_DisplayModel == 0x0656) return CPU::XEON_BROADWELL; if (DisplayFamily_DisplayModel == 0x064F) return CPU::XEON_BROADWELL; if (DisplayFamily_DisplayModel == 0x0647) return CPU::CORE_BROADWELL; if (DisplayFamily_DisplayModel == 0x063D) return CPU::CORE_BROADWELL; if (DisplayFamily_DisplayModel == 0x063F) return CPU::XEON_HASWELL; if (DisplayFamily_DisplayModel == 0x063C) return CPU::CORE_HASWELL; if (DisplayFamily_DisplayModel == 0x0645) return CPU::CORE_HASWELL; if (DisplayFamily_DisplayModel == 0x0646) return CPU::CORE_HASWELL; if (DisplayFamily_DisplayModel == 0x063E) return CPU::XEON_IVY_BRIDGE; if (DisplayFamily_DisplayModel == 0x063A) return CPU::CORE_IVY_BRIDGE; if (DisplayFamily_DisplayModel == 0x062D) return CPU::SANDY_BRIDGE; if (DisplayFamily_DisplayModel == 0x062F) return CPU::SANDY_BRIDGE; if (DisplayFamily_DisplayModel == 0x062A) return CPU::SANDY_BRIDGE; if (DisplayFamily_DisplayModel == 0x062E) return CPU::NEHALEM; if (DisplayFamily_DisplayModel == 0x0625) return CPU::NEHALEM; if (DisplayFamily_DisplayModel == 0x062C) return CPU::NEHALEM; if (DisplayFamily_DisplayModel == 0x061E) return CPU::NEHALEM; if (DisplayFamily_DisplayModel == 0x061F) return CPU::NEHALEM; if (DisplayFamily_DisplayModel == 0x061A) return CPU::NEHALEM; if (DisplayFamily_DisplayModel == 0x061D) return CPU::NEHALEM; if (DisplayFamily_DisplayModel == 0x0617) return CPU::CORE2; if (DisplayFamily_DisplayModel == 0x060F) return CPU::CORE2; if (DisplayFamily_DisplayModel == 0x060E) return CPU::CORE1; if (DisplayFamily_DisplayModel == 0x0685) return CPU::XEON_PHI_KNIGHTS_MILL; if (DisplayFamily_DisplayModel == 0x0657) return CPU::XEON_PHI_KNIGHTS_LANDING; #elif defined(__ARM_NEON) return CPU::ARM; #endif return CPU::UNKNOWN; }
pushq %r14 pushq %rbx subq $0x28, %rsp leaq 0x8(%rsp), %r14 movq %r14, %rdi callq 0x1ee5082 leaq 0x7fa4a(%rip), %rsi # 0x1f64b38 movq %r14, %rdi callq 0x6a8e0 movl %eax, %ebx movq (%r14), %rdi leaq 0x18(%rsp), %rax cmpq %rax, %rdi je 0x1ee510a callq 0x6a4f0 movl $0x15, %esi testl %ebx, %ebx je 0x1ee511d movl %esi, %eax addq $0x28, %rsp popq %rbx popq %r14 retq xorl %edi, %edi xorl %eax, %eax cpuid testl %eax, %eax jle 0x1ee5113 movl $0x1, %eax cpuid movl %eax, %edx shrl $0x8, %edx andl $0xf, %edx movl %eax, %r8d shrl $0x4, %r8d andl $0xf, %r8d movl %eax, %ecx shrl $0x14, %ecx movzbl %cl, %ecx addl $0xf, %ecx shrl $0xc, %eax andl $0xf0, %eax cmpl $0xf, %edx cmovnel %edx, %ecx cmovel %eax, %edi movl $0x1, %esi cmpl $0x6, %edx cmovel %eax, %edi orl %r8d, %edi shll $0x8, %ecx orl %edi, %ecx cmpl $0x639, %ecx # imm = 0x639 jle 0x1ee51a9 cmpl $0x665, %ecx # imm = 0x665 jg 0x1ee51d2 leal -0x63a(%rcx), %eax cmpl $0x1d, %eax ja 0x1ee52c6 leaq 0x7f85e(%rip), %rcx # 0x1f649f4 movslq (%rcx,%rax,4), %rax addq %rcx, %rax jmpq *%rax movl $0xd, %esi jmp 0x1ee5113 leal -0x617(%rcx), %eax cmpl $0x18, %eax ja 0x1ee5266 leaq 0x7f7d1(%rip), %rcx # 0x1f64990 movslq (%rcx,%rax,4), %rax addq %rcx, %rax jmpq *%rax movl $0x11, %esi jmp 0x1ee5113 cmpl $0x68b, %ecx # imm = 0x68B jg 0x1ee5209 cmpl $0x67c, %ecx # imm = 0x67C jg 0x1ee5237 cmpl $0x666, %ecx # imm = 0x666 je 0x1ee52bc cmpl $0x66a, %ecx # imm = 0x66A je 0x1ee5202 cmpl $0x66c, %ecx # imm = 0x66C jne 0x1ee52d8 xorl %esi, %esi jmp 0x1ee5113 cmpl $0x69d, %ecx # imm = 0x69D jle 0x1ee5284 cmpl $0x69e, %ecx # imm = 0x69E je 0x1ee5294 cmpl $0x6a5, %ecx # imm = 0x6A5 je 0x1ee522d cmpl $0x6a6, %ecx # imm = 0x6A6 jne 0x1ee52d8 movl $0x3, %esi jmp 0x1ee5113 leal -0x67d(%rcx), %eax cmpl $0x2, %eax jb 0x1ee5113 cmpl $0x685, %ecx # imm = 0x685 jne 0x1ee52d8 movl $0x8, %esi jmp 0x1ee5113 movl $0x10, %esi jmp 0x1ee5113 cmpl $0x60e, %ecx # imm = 0x60E je 0x1ee5314 cmpl $0x60f, %ecx # imm = 0x60F jne 0x1ee52d8 movl $0x12, %esi jmp 0x1ee5113 cmpl $0x68c, %ecx # imm = 0x68C je 0x1ee52b2 cmpl $0x68e, %ecx # imm = 0x68E jne 0x1ee52d8 movl $0x5, %esi jmp 0x1ee5113 movl $0xa, %esi jmp 0x1ee5113 movl $0xb, %esi jmp 0x1ee5113 movl $0x2, %esi jmp 0x1ee5113 movl $0x4, %esi jmp 0x1ee5113 cmpl $0x65e, %ecx # imm = 0x65E jne 0x1ee52d8 movl $0x7, %esi jmp 0x1ee5113 movl $0x15, %esi jmp 0x1ee5113 movl $0xe, %esi jmp 0x1ee5113 movl $0xc, %esi jmp 0x1ee5113 movl $0x6, %esi jmp 0x1ee5113 movl $0xf, %esi jmp 0x1ee5113 movl $0x9, %esi jmp 0x1ee5113 movl $0x13, %esi jmp 0x1ee5113
/embree[P]embree/common/sys/sysinfo.cpp
embree::stringOfCPUModel[abi:cxx11](embree::CPU)
std::string stringOfCPUModel(CPU model) { switch (model) { case CPU::XEON_ICE_LAKE : return "Xeon Ice Lake"; case CPU::CORE_ICE_LAKE : return "Core Ice Lake"; case CPU::CORE_TIGER_LAKE : return "Core Tiger Lake"; case CPU::CORE_COMET_LAKE : return "Core Comet Lake"; case CPU::CORE_CANNON_LAKE : return "Core Cannon Lake"; case CPU::CORE_KABY_LAKE : return "Core Kaby Lake"; case CPU::XEON_SKY_LAKE : return "Xeon Sky Lake"; case CPU::CORE_SKY_LAKE : return "Core Sky Lake"; case CPU::XEON_PHI_KNIGHTS_MILL : return "Xeon Phi Knights Mill"; case CPU::XEON_PHI_KNIGHTS_LANDING: return "Xeon Phi Knights Landing"; case CPU::XEON_BROADWELL : return "Xeon Broadwell"; case CPU::CORE_BROADWELL : return "Core Broadwell"; case CPU::XEON_HASWELL : return "Xeon Haswell"; case CPU::CORE_HASWELL : return "Core Haswell"; case CPU::XEON_IVY_BRIDGE : return "Xeon Ivy Bridge"; case CPU::CORE_IVY_BRIDGE : return "Core Ivy Bridge"; case CPU::SANDY_BRIDGE : return "Sandy Bridge"; case CPU::NEHALEM : return "Nehalem"; case CPU::CORE2 : return "Core2"; case CPU::CORE1 : return "Core"; case CPU::ARM : return "ARM"; case CPU::UNKNOWN : return "Unknown CPU"; } return "Unknown CPU (error)"; }
pushq %rbx movq %rdi, %rbx leaq 0x10(%rdi), %rax movq %rax, (%rdi) cmpl $0x15, %esi ja 0x1ee54be movl %esi, %eax leaq 0x7f731(%rip), %rcx # 0x1f64a6c movslq (%rcx,%rax,4), %rax addq %rcx, %rax jmpq *%rax leaq 0x7f7fa(%rip), %rsi # 0x1f64b45 leaq 0x7f800(%rip), %rdx # 0x1f64b52 jmp 0x1ee54dc leaq 0x7f8e6(%rip), %rsi # 0x1f64c44 leaq 0x7f8eb(%rip), %rdx # 0x1f64c50 jmp 0x1ee54dc leaq 0x7f8b3(%rip), %rsi # 0x1f64c24 leaq 0x7f8bb(%rip), %rdx # 0x1f64c33 jmp 0x1ee54dc leaq 0x7f877(%rip), %rsi # 0x1f64bfb leaq 0x7f87e(%rip), %rdx # 0x1f64c09 jmp 0x1ee54dc leaq 0x7f7ea(%rip), %rsi # 0x1f64b81 leaq 0x7f7f3(%rip), %rdx # 0x1f64b91 jmp 0x1ee54dc leaq 0x7f860(%rip), %rsi # 0x1f64c0a leaq 0x7f865(%rip), %rdx # 0x1f64c16 jmp 0x1ee54dc leaq 0x7f816(%rip), %rsi # 0x1f64bd3 leaq 0x7f827(%rip), %rdx # 0x1f64beb jmp 0x1ee54dc leaq 0x7f791(%rip), %rsi # 0x1f64b61 leaq 0x7f799(%rip), %rdx # 0x1f64b70 jmp 0x1ee54dc leaq 0x7f851(%rip), %rsi # 0x1f64c34 leaq 0x7f859(%rip), %rdx # 0x1f64c43 jmp 0x1ee54dc leaq 0x7f77b(%rip), %rsi # 0x1f64b71 leaq 0x7f783(%rip), %rdx # 0x1f64b80 jmp 0x1ee54dc leaq 0x7f7a6(%rip), %rsi # 0x1f64baf leaq 0x7f7ac(%rip), %rdx # 0x1f64bbc jmp 0x1ee54dc leaq 0x7f737(%rip), %rsi # 0x1f64b53 leaq 0x7f73d(%rip), %rdx # 0x1f64b60 jmp 0x1ee54dc leaq 0x7f822(%rip), %rsi # 0x1f64c51 leaq 0x7f822(%rip), %rdx # 0x1f64c58 jmp 0x1ee54dc leaq 0x7f822(%rip), %rsi # 0x1f64c64 leaq 0x7f81e(%rip), %rdx # 0x1f64c67 jmp 0x1ee54dc leaq 0x7f73d(%rip), %rsi # 0x1f64b92 leaq 0x7f744(%rip), %rdx # 0x1f64ba0 jmp 0x1ee54dc leaq 0x7f803(%rip), %rsi # 0x1f64c68 leaq 0x7f807(%rip), %rdx # 0x1f64c73 jmp 0x1ee54dc leaq 0x7f7a2(%rip), %rsi # 0x1f64c17 leaq 0x7f7a7(%rip), %rdx # 0x1f64c23 jmp 0x1ee54dc leaq 0x7f71c(%rip), %rsi # 0x1f64ba1 leaq 0x7f722(%rip), %rdx # 0x1f64bae jmp 0x1ee54dc leaq 0x7f7c4(%rip), %rsi # 0x1f64c59 leaq 0x7f7c2(%rip), %rdx # 0x1f64c5e jmp 0x1ee54dc leaq 0x7f747(%rip), %rsi # 0x1f64bec leaq 0x7f74e(%rip), %rdx # 0x1f64bfa jmp 0x1ee54dc leaq 0x7f708(%rip), %rsi # 0x1f64bbd leaq 0x7f716(%rip), %rdx # 0x1f64bd2 jmp 0x1ee54dc leaq 0x7f7af(%rip), %rsi # 0x1f64c74 leaq 0x7f7bb(%rip), %rdx # 0x1f64c87 jmp 0x1ee54dc leaq 0x7f78a(%rip), %rsi # 0x1f64c5f leaq 0x7f787(%rip), %rdx # 0x1f64c63 movq %rbx, %rdi callq 0x8d7230 movq %rbx, %rax popq %rbx retq
/embree[P]embree/common/sys/sysinfo.cpp
embree::get_xcr0()
__noinline int64_t get_xcr0() { #if defined (__WIN32__) && !defined (__MINGW32__) && defined(_XCR_XFEATURE_ENABLED_MASK) int64_t xcr0 = 0; // int64_t is workaround for compiler bug under VS2013, Win32 xcr0 = _xgetbv(0); return xcr0; #else int xcr0 = 0; __asm__ ("xgetbv" : "=a" (xcr0) : "c" (0) : "%edx" ); return xcr0; #endif }
xorl %ecx, %ecx xgetbv cltq retq
/embree[P]embree/common/sys/sysinfo.cpp
embree::stringOfCPUFeatures[abi:cxx11](int)
std::string stringOfCPUFeatures(int features) { std::string str; if (features & CPU_FEATURE_XMM_ENABLED) str += "XMM "; if (features & CPU_FEATURE_YMM_ENABLED) str += "YMM "; if (features & CPU_FEATURE_ZMM_ENABLED) str += "ZMM "; if (features & CPU_FEATURE_SSE ) str += "SSE "; if (features & CPU_FEATURE_SSE2 ) str += "SSE2 "; if (features & CPU_FEATURE_SSE3 ) str += "SSE3 "; if (features & CPU_FEATURE_SSSE3 ) str += "SSSE3 "; if (features & CPU_FEATURE_SSE41 ) str += "SSE4.1 "; if (features & CPU_FEATURE_SSE42 ) str += "SSE4.2 "; if (features & CPU_FEATURE_POPCNT) str += "POPCNT "; if (features & CPU_FEATURE_AVX ) str += "AVX "; if (features & CPU_FEATURE_F16C ) str += "F16C "; if (features & CPU_FEATURE_RDRAND) str += "RDRAND "; if (features & CPU_FEATURE_AVX2 ) str += "AVX2 "; if (features & CPU_FEATURE_FMA3 ) str += "FMA3 "; if (features & CPU_FEATURE_LZCNT ) str += "LZCNT "; if (features & CPU_FEATURE_BMI1 ) str += "BMI1 "; if (features & CPU_FEATURE_BMI2 ) str += "BMI2 "; if (features & CPU_FEATURE_AVX512F) str += "AVX512F "; if (features & CPU_FEATURE_AVX512DQ) str += "AVX512DQ "; if (features & CPU_FEATURE_AVX512PF) str += "AVX512PF "; if (features & CPU_FEATURE_AVX512ER) str += "AVX512ER "; if (features & CPU_FEATURE_AVX512CD) str += "AVX512CD "; if (features & CPU_FEATURE_AVX512BW) str += "AVX512BW "; if (features & CPU_FEATURE_AVX512VL) str += "AVX512VL "; if (features & CPU_FEATURE_AVX512IFMA) str += "AVX512IFMA "; if (features & CPU_FEATURE_AVX512VBMI) str += "AVX512VBMI "; if (features & CPU_FEATURE_NEON) str += "NEON "; if (features & CPU_FEATURE_NEON_2X) str += "2xNEON "; return str; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movl %esi, %ebp movq %rdi, %rbx leaq 0x10(%rdi), %r15 movq %r15, (%rdi) movq $0x0, 0x8(%rdi) movb $0x0, 0x10(%rdi) btl $0x19, %esi jae 0x1ee573a leaq 0x7f556(%rip), %rsi # 0x1f64c88 movq %rbx, %rdi callq 0x6a620 btl $0x1a, %ebp jae 0x1ee574f leaq 0x7f546(%rip), %rsi # 0x1f64c8d movq %rbx, %rdi callq 0x6a620 btl $0x1b, %ebp jae 0x1ee5764 leaq 0x7f536(%rip), %rsi # 0x1f64c92 movq %rbx, %rdi callq 0x6a620 testb $0x1, %bpl je 0x1ee5779 leaq 0x7f526(%rip), %rsi # 0x1f64c97 movq %rbx, %rdi callq 0x6a620 testb $0x2, %bpl je 0x1ee578e leaq 0x5966(%rip), %rsi # 0x1eeb0ec movq %rbx, %rdi callq 0x6a620 testb $0x4, %bpl je 0x1ee57a3 leaq 0x7f502(%rip), %rsi # 0x1f64c9d movq %rbx, %rdi callq 0x6a620 testb $0x8, %bpl je 0x1ee57b8 leaq 0x7f4ec(%rip), %rsi # 0x1f64c9c movq %rbx, %rdi callq 0x6a620 testb $0x10, %bpl je 0x1ee57cd leaq 0x7f4de(%rip), %rsi # 0x1f64ca3 movq %rbx, %rdi callq 0x6a620 testb $0x20, %bpl je 0x1ee57e2 leaq 0x5918(%rip), %rsi # 0x1eeb0f2 movq %rbx, %rdi callq 0x6a620 testb $0x40, %bpl je 0x1ee57f7 leaq 0x7f4bc(%rip), %rsi # 0x1f64cab movq %rbx, %rdi callq 0x6a620 testb %bpl, %bpl jns 0x1ee580b leaq 0x58f7(%rip), %rsi # 0x1eeb0fa movq %rbx, %rdi callq 0x6a620 btl $0x8, %ebp jae 0x1ee5820 leaq 0x7f49b(%rip), %rsi # 0x1f64cb3 movq %rbx, %rdi callq 0x6a620 btl $0x9, %ebp jae 0x1ee5835 leaq 0x7f48c(%rip), %rsi # 0x1f64cb9 movq %rbx, %rdi callq 0x6a620 btl $0xa, %ebp jae 0x1ee584a leaq 0x58bd(%rip), %rsi # 0x1eeb0ff movq %rbx, %rdi callq 0x6a620 btl $0xb, %ebp jae 0x1ee585f leaq 0x7f46a(%rip), %rsi # 0x1f64cc1 movq %rbx, %rdi callq 0x6a620 btl $0xc, %ebp jae 0x1ee5874 leaq 0x7f45b(%rip), %rsi # 0x1f64cc7 movq %rbx, %rdi callq 0x6a620 btl $0xd, %ebp jae 0x1ee5889 leaq 0x7f44d(%rip), %rsi # 0x1f64cce movq %rbx, %rdi callq 0x6a620 btl $0xe, %ebp jae 0x1ee589e leaq 0x7f43e(%rip), %rsi # 0x1f64cd4 movq %rbx, %rdi callq 0x6a620 btl $0x10, %ebp jae 0x1ee58b3 leaq 0x7f42f(%rip), %rsi # 0x1f64cda movq %rbx, %rdi callq 0x6a620 btl $0x11, %ebp jae 0x1ee58c8 leaq 0x7f423(%rip), %rsi # 0x1f64ce3 movq %rbx, %rdi callq 0x6a620 btl $0x12, %ebp jae 0x1ee58dd leaq 0x7f418(%rip), %rsi # 0x1f64ced movq %rbx, %rdi callq 0x6a620 btl $0x13, %ebp jae 0x1ee58f2 leaq 0x7f40d(%rip), %rsi # 0x1f64cf7 movq %rbx, %rdi callq 0x6a620 btl $0x14, %ebp jae 0x1ee5907 leaq 0x7f402(%rip), %rsi # 0x1f64d01 movq %rbx, %rdi callq 0x6a620 btl $0x15, %ebp jae 0x1ee591c leaq 0x7f3f7(%rip), %rsi # 0x1f64d0b movq %rbx, %rdi callq 0x6a620 btl $0x16, %ebp jae 0x1ee5931 leaq 0x7f3ec(%rip), %rsi # 0x1f64d15 movq %rbx, %rdi callq 0x6a620 btl $0x17, %ebp jae 0x1ee5946 leaq 0x7f3e1(%rip), %rsi # 0x1f64d1f movq %rbx, %rdi callq 0x6a620 btl $0x18, %ebp jae 0x1ee595b leaq 0x7f3d8(%rip), %rsi # 0x1f64d2b movq %rbx, %rdi callq 0x6a620 btl $0x1c, %ebp jae 0x1ee5970 leaq 0x7f3d1(%rip), %rsi # 0x1f64d39 movq %rbx, %rdi callq 0x6a620 btl $0x1d, %ebp jae 0x1ee5985 leaq 0x7f3ba(%rip), %rsi # 0x1f64d37 movq %rbx, %rdi callq 0x6a620 movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq movq %rax, %r14 movq (%rbx), %rdi cmpq %r15, %rdi je 0x1ee59a3 callq 0x6a4f0 movq %r14, %rdi callq 0x6a600
/embree[P]embree/common/sys/sysinfo.cpp
embree::stringOfISA[abi:cxx11](int)
std::string stringOfISA (int isa) { if (isa == SSE) return "SSE"; if (isa == SSE2) return "SSE2"; if (isa == SSE3) return "SSE3"; if (isa == SSSE3) return "SSSE3"; if (isa == SSE41) return "SSE4.1"; if (isa == SSE42) return "SSE4.2"; if (isa == AVX) return "AVX"; if (isa == AVX2) return "AVX2"; if (isa == AVX512) return "AVX512"; if (isa == NEON) return "NEON"; if (isa == NEON_2X) return "2xNEON"; return "UNKNOWN"; }
pushq %rbx movq %rdi, %rbx leaq 0x10(%rdi), %rax movq %rax, (%rdi) cmpl $0x200007e, %esi # imm = 0x200007E jle 0x1ee59fd cmpl $0xe737dfe, %esi # imm = 0xE737DFE jg 0x1ee5a3c cmpl $0x200007f, %esi # imm = 0x200007F je 0x1ee5ae3 cmpl $0x60000ff, %esi # imm = 0x60000FF je 0x1ee5ab3 cmpl $0x6007dff, %esi # imm = 0x6007DFF jne 0x1ee5b03 leaq 0x7f36a(%rip), %rsi # 0x1f64d5b leaq 0x7f367(%rip), %rdx # 0x1f64d5f jmp 0x1ee5b11 cmpl $0x2000006, %esi # imm = 0x2000006 jle 0x1ee5a6f cmpl $0x2000007, %esi # imm = 0x2000007 je 0x1ee5ad3 cmpl $0x200000f, %esi # imm = 0x200000F je 0x1ee5aa3 cmpl $0x200001f, %esi # imm = 0x200001F jne 0x1ee5b03 leaq 0x7f319(%rip), %rsi # 0x1f64d49 leaq 0x7f318(%rip), %rdx # 0x1f64d4f jmp 0x1ee5b11 cmpl $0xe737dff, %esi # imm = 0xE737DFF je 0x1ee5af3 cmpl $0x10000003, %esi # imm = 0x10000003 je 0x1ee5ac3 cmpl $0x26007dff, %esi # imm = 0x26007DFF jne 0x1ee5b03 leaq 0x7f304(%rip), %rsi # 0x1f64d67 leaq 0x7f303(%rip), %rdx # 0x1f64d6d jmp 0x1ee5b11 cmpl $0x2000001, %esi # imm = 0x2000001 je 0x1ee5a93 cmpl $0x2000003, %esi # imm = 0x2000003 jne 0x1ee5b03 leaq 0x563b(%rip), %rsi # 0x1eeb0c5 leaq 0x5638(%rip), %rdx # 0x1eeb0c9 jmp 0x1ee5b11 leaq 0x7f2a5(%rip), %rsi # 0x1f64d3f leaq 0x7f2a1(%rip), %rdx # 0x1f64d42 jmp 0x1ee5b11 leaq 0x7f299(%rip), %rsi # 0x1f64d43 leaq 0x7f297(%rip), %rdx # 0x1f64d48 jmp 0x1ee5b11 leaq 0x7f29d(%rip), %rsi # 0x1f64d57 leaq 0x7f299(%rip), %rdx # 0x1f64d5a jmp 0x1ee5b11 leaq 0x7f29f(%rip), %rsi # 0x1f64d69 leaq 0x7f29c(%rip), %rdx # 0x1f64d6d jmp 0x1ee5b11 leaq 0x7f26a(%rip), %rsi # 0x1f64d44 leaq 0x7f267(%rip), %rdx # 0x1f64d48 jmp 0x1ee5b11 leaq 0x7f266(%rip), %rsi # 0x1f64d50 leaq 0x7f265(%rip), %rdx # 0x1f64d56 jmp 0x1ee5b11 leaq 0x7f266(%rip), %rsi # 0x1f64d60 leaq 0x7f265(%rip), %rdx # 0x1f64d66 jmp 0x1ee5b11 leaq 0x7f264(%rip), %rsi # 0x1f64d6e leaq 0x7f264(%rip), %rdx # 0x1f64d75 movq %rbx, %rdi callq 0x8d7230 movq %rbx, %rax popq %rbx retq
/embree[P]embree/common/sys/sysinfo.cpp
embree::supportedTargetList[abi:cxx11](int)
std::string supportedTargetList (int features) { std::string v; if (hasISA(features,SSE)) v += "SSE "; if (hasISA(features,SSE2)) v += "SSE2 "; if (hasISA(features,SSE3)) v += "SSE3 "; if (hasISA(features,SSSE3)) v += "SSSE3 "; if (hasISA(features,SSE41)) v += "SSE4.1 "; if (hasISA(features,SSE42)) v += "SSE4.2 "; if (hasISA(features,AVX)) v += "AVX "; if (hasISA(features,AVXI)) v += "AVXI "; if (hasISA(features,AVX2)) v += "AVX2 "; if (hasISA(features,AVX512)) v += "AVX512 "; if (hasISA(features,NEON)) v += "NEON "; if (hasISA(features,NEON_2X)) v += "2xNEON "; return v; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movl %esi, %ebp movq %rdi, %rbx leaq 0x10(%rdi), %r15 movq %r15, (%rdi) movq $0x0, 0x8(%rdi) movb $0x0, 0x10(%rdi) notl %ebp testl $0x2000001, %ebp # imm = 0x2000001 jne 0x1ee5b5e leaq 0x7f141(%rip), %rsi # 0x1f64c97 movq %rbx, %rdi callq 0x6a620 testl $0x2000003, %ebp # imm = 0x2000003 jne 0x1ee5b75 leaq 0x557f(%rip), %rsi # 0x1eeb0ec movq %rbx, %rdi callq 0x6a620 testl $0x2000007, %ebp # imm = 0x2000007 jne 0x1ee5b8c leaq 0x7f119(%rip), %rsi # 0x1f64c9d movq %rbx, %rdi callq 0x6a620 testl $0x200000f, %ebp # imm = 0x200000F jne 0x1ee5ba3 leaq 0x7f101(%rip), %rsi # 0x1f64c9c movq %rbx, %rdi callq 0x6a620 testl $0x200001f, %ebp # imm = 0x200001F jne 0x1ee5bba leaq 0x7f0f1(%rip), %rsi # 0x1f64ca3 movq %rbx, %rdi callq 0x6a620 testl $0x200007f, %ebp # imm = 0x200007F jne 0x1ee5bd1 leaq 0x5529(%rip), %rsi # 0x1eeb0f2 movq %rbx, %rdi callq 0x6a620 testl $0x60000ff, %ebp # imm = 0x60000FF jne 0x1ee5be8 leaq 0x551a(%rip), %rsi # 0x1eeb0fa movq %rbx, %rdi callq 0x6a620 testl $0x60001ff, %ebp # imm = 0x60001FF jne 0x1ee5bff leaq 0x7f17f(%rip), %rsi # 0x1f64d76 movq %rbx, %rdi callq 0x6a620 testl $0x6007dff, %ebp # imm = 0x6007DFF jne 0x1ee5c16 leaq 0x54f1(%rip), %rsi # 0x1eeb0ff movq %rbx, %rdi callq 0x6a620 testl $0xe737dff, %ebp # imm = 0xE737DFF jne 0x1ee5c2d leaq 0x54e0(%rip), %rsi # 0x1eeb105 movq %rbx, %rdi callq 0x6a620 testl $0x10000003, %ebp # imm = 0x10000003 jne 0x1ee5c44 leaq 0x7f0fd(%rip), %rsi # 0x1f64d39 movq %rbx, %rdi callq 0x6a620 testl $0x26007dff, %ebp # imm = 0x26007DFF jne 0x1ee5c5b leaq 0x7f0e4(%rip), %rsi # 0x1f64d37 movq %rbx, %rdi callq 0x6a620 movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq movq %rax, %r14 movq (%rbx), %rdi cmpq %r15, %rdi je 0x1ee5c79 callq 0x6a4f0 movq %r14, %rdi callq 0x6a600
/embree[P]embree/common/sys/sysinfo.cpp
embree::getExecutableFileName[abi:cxx11]()
std::string getExecutableFileName() { std::string pid = "/proc/" + toString(getpid()) + "/exe"; char buf[4096]; memset(buf,0,sizeof(buf)); if (readlink(pid.c_str(), buf, sizeof(buf)-1) == -1) return std::string(); return std::string(buf); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x1048, %rsp # imm = 0x1048 movq %rdi, %rbx callq 0x6abf0 movslq %eax, %r15 movq %r15, %r14 negq %r14 cmovsq %r15, %r14 movl $0x1, %r12d cmpq $0xa, %r14 jb 0x1ee5d07 movl $0x4, %r12d movabsq $0x346dc5d63886594b, %rsi # imm = 0x346DC5D63886594B movq %r14, %rcx cmpq $0x63, %rcx jbe 0x1ee5cfe cmpq $0x3e7, %rcx # imm = 0x3E7 jbe 0x1ee5d04 cmpq $0x2710, %rcx # imm = 0x2710 jb 0x1ee5d07 movq %rcx, %rax mulq %rsi shrq $0xb, %rdx addl $0x4, %r12d cmpq $0x1869f, %rcx # imm = 0x1869F movq %rdx, %rcx ja 0x1ee5cc6 addl $-0x3, %r12d jmp 0x1ee5d07 addl $-0x2, %r12d jmp 0x1ee5d07 decl %r12d shrq $0x3f, %r15 leal (%r15,%r12), %esi leaq 0x30(%rsp), %rbp movq %rbp, -0x10(%rbp) leaq 0x20(%rsp), %r13 movq %r13, %rdi movl $0x2d, %edx callq 0x6a580 addq (%r13), %r15 movq %r15, %rdi movl %r12d, %esi movq %r14, %rdx callq 0x8d7190 leaq 0x7f039(%rip), %rcx # 0x1f64d7c movl $0x6, %r8d movq %r13, %rdi xorl %esi, %esi xorl %edx, %edx callq 0x6a1a0 leaq 0x50(%rsp), %r14 movq %r14, -0x10(%r14) movq (%rax), %rdx movq %rax, %rcx addq $0x10, %rcx cmpq %rcx, %rdx je 0x1ee5d7c movq %rdx, 0x40(%rsp) movq (%rcx), %rdx movq %rdx, 0x50(%rsp) jmp 0x1ee5d83 movups (%rcx), %xmm0 movups %xmm0, (%r14) movq 0x8(%rax), %rdx leaq 0x40(%rsp), %rdi movq %rdx, 0x8(%rdi) movq %rcx, (%rax) movq $0x0, 0x8(%rax) movb $0x0, 0x10(%rax) leaq 0x7efdd(%rip), %rsi # 0x1f64d83 callq 0x6a620 leaq 0x10(%rsp), %r15 movq %r15, -0x10(%r15) movq (%rax), %rdx movq %rax, %rcx addq $0x10, %rcx cmpq %rcx, %rdx je 0x1ee5dd1 movq %rdx, (%rsp) movq (%rcx), %rdx movq %rdx, 0x10(%rsp) jmp 0x1ee5dd8 movups (%rcx), %xmm0 movups %xmm0, (%r15) movq 0x8(%rax), %rdx movq %rdx, 0x8(%rsp) movq %rcx, (%rax) movq $0x0, 0x8(%rax) movb $0x0, 0x10(%rax) movq 0x40(%rsp), %rdi cmpq %r14, %rdi je 0x1ee5dff callq 0x6a4f0 movq 0x20(%rsp), %rdi cmpq %rbp, %rdi je 0x1ee5e0e callq 0x6a4f0 leaq 0x40(%rsp), %r14 movl $0x1000, %edx # imm = 0x1000 movq %r14, %rdi xorl %esi, %esi callq 0x6a2c0 movq (%rsp), %rdi movl $0xfff, %edx # imm = 0xFFF movq %r14, %rsi callq 0x6a270 leaq 0x10(%rbx), %rcx movq %rcx, (%rbx) cmpq $-0x1, %rax je 0x1ee5e62 leaq 0x40(%rsp), %r14 movq %r14, %rdi callq 0x6a3d0 leaq (%rsp,%rax), %rdx addq $0x40, %rdx movq %rbx, %rdi movq %r14, %rsi callq 0x8d7230 jmp 0x1ee5e6e movq $0x0, 0x8(%rbx) movb $0x0, 0x10(%rbx) movq (%rsp), %rdi cmpq %r15, %rdi je 0x1ee5e7c callq 0x6a4f0 movq %rbx, %rax addq $0x1048, %rsp # imm = 0x1048 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx movq (%rsp), %rdi cmpq %r15, %rdi jne 0x1ee5ec0 jmp 0x1ee5ec5 movq %rax, %rbx movq 0x40(%rsp), %rdi cmpq %r14, %rdi je 0x1ee5eb6 callq 0x6a4f0 jmp 0x1ee5eb6 movq %rax, %rbx movq 0x20(%rsp), %rdi cmpq %rbp, %rdi je 0x1ee5ec5 callq 0x6a4f0 movq %rbx, %rdi callq 0x6a600
/embree[P]embree/common/sys/sysinfo.cpp
embree::getNumberOfLogicalThreads()
unsigned int getNumberOfLogicalThreads() { static int nThreads = -1; if (nThreads != -1) return nThreads; #if defined(__MACOSX__) || defined(__ANDROID__) nThreads = sysconf(_SC_NPROCESSORS_ONLN); // does not work in Linux LXC container assert(nThreads); #elif defined(__EMSCRIPTEN__) // WebAssembly supports pthreads, but not pthread_getaffinity_np. Get the number of logical // threads from the browser or Node.js using JavaScript. nThreads = MAIN_THREAD_EM_ASM_INT({ const isBrowser = typeof window !== 'undefined'; const isNode = typeof process !== 'undefined' && process.versions != null && process.versions.node != null; if (isBrowser) { // Return 1 if the browser does not expose hardwareConcurrency. return window.navigator.hardwareConcurrency || 1; } else if (isNode) { return require('os').cpus().length; } else { return 1; } }); #else cpu_set_t set; if (pthread_getaffinity_np(pthread_self(), sizeof(set), &set) == 0) nThreads = CPU_COUNT(&set); #endif assert(nThreads); return nThreads; }
movl 0x23f223(%rip), %eax # 0x2125210 cmpl $-0x1, %eax je 0x1ee5ff3 retq subq $0x88, %rsp callq 0x6a6c0 leaq 0x8(%rsp), %rdx movl $0x80, %esi movq %rax, %rdi callq 0x6ac00 testl %eax, %eax jne 0x1ee602a leaq 0x8(%rsp), %rsi movl $0x80, %edi callq 0x6ac10 movl %eax, 0x23f1e6(%rip) # 0x2125210 movl 0x23f1e0(%rip), %eax # 0x2125210 addq $0x88, %rsp retq
/embree[P]embree/common/sys/sysinfo.cpp
embree::alignedMalloc(unsigned long, unsigned long)
void* alignedMalloc(size_t size, size_t align) { if (size == 0) return nullptr; assert((align & (align-1)) == 0); void* ptr = _mm_malloc(size,align); if (size != 0 && ptr == nullptr) throw std::bad_alloc(); return ptr; }
pushq %rax testq %rdi, %rdi je 0x1ee60c5 movq %rdi, %rdx cmpq $0x1, %rsi jne 0x1ee60c9 movq %rdx, %rdi callq 0x6a8d0 jmp 0x1ee60f8 xorl %eax, %eax jmp 0x1ee60fd cmpq $0x9, %rsi movl $0x8, %eax cmovaeq %rsi, %rax leaq -0x1(%rsi), %rcx testq %rcx, %rsi cmovneq %rsi, %rax movq %rsp, %rdi movq %rax, %rsi callq 0x6abc0 testl %eax, %eax jne 0x1ee60f6 movq (%rsp), %rax jmp 0x1ee60f8 xorl %eax, %eax testq %rax, %rax je 0x1ee60ff popq %rcx retq movl $0x8, %edi callq 0x6a3b0 movq 0x23e200(%rip), %rcx # 0x2124310 addq $0x10, %rcx movq %rcx, (%rax) movq 0x23e3b2(%rip), %rsi # 0x21244d0 movq 0x23ddab(%rip), %rdx # 0x2123ed0 movq %rax, %rdi callq 0x6a5d0
/embree[P]embree/common/sys/alloc.cpp
embree::os_init(bool, bool)
bool os_init(bool hugepages, bool verbose) { Lock<MutexSys> lock(os_init_mutex); if (!hugepages) { huge_pages_enabled = false; return true; } #if defined(__LINUX__) int hugepagesize = 0; std::ifstream file; file.open("/proc/meminfo",std::ios::in); if (!file.is_open()) { if (verbose) std::cout << "WARNING: Could not open /proc/meminfo. Huge page support cannot get enabled!" << std::endl; huge_pages_enabled = false; return false; } std::string line; while (getline(file,line)) { std::stringstream sline(line); while (!sline.eof() && sline.peek() == ' ') sline.ignore(); std::string tag; getline(sline,tag,' '); while (!sline.eof() && sline.peek() == ' ') sline.ignore(); std::string val; getline(sline,val,' '); while (!sline.eof() && sline.peek() == ' ') sline.ignore(); std::string unit; getline(sline,unit,' '); if (tag == "Hugepagesize:" && unit == "kB") { hugepagesize = std::stoi(val)*1024; break; } } if (hugepagesize != PAGE_SIZE_2M) { if (verbose) std::cout << "WARNING: Only 2MB huge pages supported. Huge page support cannot get enabled!" << std::endl; huge_pages_enabled = false; return false; } #endif huge_pages_enabled = true; return true; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x438, %rsp # imm = 0x438 movl %esi, %ebp movl %edi, %ebx leaq 0x269d3d(%rip), %rdi # 0x214fe90 movq %rdi, 0x70(%rsp) movb $0x1, 0x78(%rsp) callq 0x1ee7bb6 testl %ebx, %ebx je 0x1ee64c3 leaq 0x230(%rsp), %rdi callq 0x6a340 leaq 0x7ec1b(%rip), %rsi # 0x1f64d99 leaq 0x230(%rsp), %r15 movq %r15, %rdi movl $0x8, %edx callq 0x6a730 leaq 0x2a8(%rsp), %rdi callq 0x6aa80 testb %al, %al je 0x1ee64ce movl %ebp, 0x4(%rsp) leaq 0x18(%rsp), %rax movq %rax, -0x10(%rax) movq $0x0, -0x8(%rax) movb $0x0, (%rax) leaq 0x90(%rsp), %r12 movq $0x0, 0x28(%rsp) leaq 0x8(%rsp), %r14 leaq 0xa8(%rsp), %rbp leaq 0x80(%rsp), %rbx movq 0x230(%rsp), %rax movq -0x18(%rax), %rdi addq %r15, %rdi movl $0xa, %esi callq 0x6aaa0 movsbl %al, %edx movq %r15, %rdi movq %r14, %rsi callq 0x6a040 movq (%rax), %rcx movq -0x18(%rcx), %rcx testb $0x5, 0x20(%rax,%rcx) jne 0x1ee643f movq %rbp, %rdi movq %r14, %rsi movl $0x18, %edx callq 0x6ac70 movq 0xa8(%rsp), %rax movq -0x18(%rax), %rax testb $0x2, 0xc8(%rsp,%rax) jne 0x1ee625c movq %rbp, %rdi callq 0x6ab10 cmpl $0x20, %eax jne 0x1ee625c movq %rbp, %rdi callq 0x6a750 jmp 0x1ee622f movq %r12, 0x80(%rsp) movq $0x0, 0x88(%rsp) movb $0x0, 0x90(%rsp) movq %rbp, %rdi movq %rbx, %rsi movl $0x20, %edx callq 0x6a040 movq 0xa8(%rsp), %rax movq -0x18(%rax), %rax testb $0x2, 0xc8(%rsp,%rax) jne 0x1ee62b5 movq %rbp, %rdi callq 0x6ab10 cmpl $0x20, %eax jne 0x1ee62b5 movq %rbp, %rdi callq 0x6a750 jmp 0x1ee6288 leaq 0x60(%rsp), %rax movq %rax, 0x50(%rsp) movq $0x0, 0x58(%rsp) movb $0x0, 0x60(%rsp) movq %rbp, %rdi leaq 0x50(%rsp), %rsi movl $0x20, %edx callq 0x6a040 movq 0xa8(%rsp), %rax movq -0x18(%rax), %rax testb $0x2, 0xc8(%rsp,%rax) jne 0x1ee630c movq %rbp, %rdi callq 0x6ab10 cmpl $0x20, %eax jne 0x1ee630c movq %rbp, %rdi callq 0x6a750 jmp 0x1ee62df leaq 0x40(%rsp), %rax movq %rax, 0x30(%rsp) movq $0x0, 0x38(%rsp) movb $0x0, 0x40(%rsp) movq %rbp, %rdi leaq 0x30(%rsp), %rsi movl $0x20, %edx callq 0x6a040 movq %rbx, %r13 movq %rbx, %rdi leaq 0x7eab1(%rip), %rsi # 0x1f64df4 callq 0x6a8e0 movb $0x1, %bl testl %eax, %eax jne 0x1ee63de leaq 0x30(%rsp), %rdi leaq 0x7eaa4(%rip), %rsi # 0x1f64e02 callq 0x6a8e0 testl %eax, %eax jne 0x1ee63de movq 0x50(%rsp), %rbx callq 0x6aa60 movq %rax, %r12 movl (%rax), %r15d movl $0x0, (%rax) movq %rbx, %rdi leaq 0xa0(%rsp), %rsi movl $0xa, %edx callq 0x6a840 cmpq %rbx, 0xa0(%rsp) je 0x1ee6554 movslq %eax, %rdx cmpq %rax, %rdx jne 0x1ee6560 movq %rax, %rcx movl (%r12), %eax cmpl $0x22, %eax je 0x1ee6560 testl %eax, %eax jne 0x1ee63c4 movl %r15d, (%r12) shll $0xa, %ecx movq %rcx, 0x28(%rsp) xorl %ebx, %ebx leaq 0x90(%rsp), %r12 leaq 0x230(%rsp), %r15 movq 0x30(%rsp), %rdi leaq 0x40(%rsp), %rax cmpq %rax, %rdi je 0x1ee63f2 callq 0x6a4f0 movq 0x50(%rsp), %rdi leaq 0x60(%rsp), %rax cmpq %rax, %rdi je 0x1ee6406 callq 0x6a4f0 movq 0x80(%rsp), %rdi cmpq %r12, %rdi je 0x1ee6418 callq 0x6a4f0 movq %rbp, %rdi movq 0x23e72e(%rip), %rsi # 0x2124b50 callq 0x6a740 leaq 0x128(%rsp), %rdi callq 0x6a6f0 testb %bl, %bl movq %r13, %rbx jne 0x1ee61e6 movq 0x28(%rsp), %rax movq %rax, %r14 cmpl $0x200000, %eax # imm = 0x200000 sete %bl movl 0x4(%rsp), %eax xorb $0x1, %al orb %bl, %al jne 0x1ee649f movq 0x23def8(%rip), %r15 # 0x2124358 leaq 0x7e99e(%rip), %rsi # 0x1f64e05 movl $0x4d, %edx movq %r15, %rdi callq 0x6a9f0 movq (%r15), %rax addq -0x18(%rax), %r15 movq %r15, %rdi movl $0xa, %esi callq 0x6aaa0 movsbl %al, %esi movq 0x23dec6(%rip), %rdi # 0x2124358 callq 0x6a500 movq %rax, %rdi callq 0x6a490 cmpl $0x200000, %r14d # imm = 0x200000 sete 0x2699eb(%rip) # 0x214fe98 movq 0x8(%rsp), %rdi leaq 0x18(%rsp), %rax cmpq %rax, %rdi je 0x1ee6522 callq 0x6a4f0 jmp 0x1ee6522 movb $0x0, 0x2699ce(%rip) # 0x214fe98 movb $0x1, %bl jmp 0x1ee652f testb %bpl, %bpl je 0x1ee6519 movq 0x23de7e(%rip), %rbx # 0x2124358 leaq 0x7e8c6(%rip), %rsi # 0x1f64da7 movl $0x4c, %edx movq %rbx, %rdi callq 0x6a9f0 movq (%rbx), %rax addq -0x18(%rax), %rbx movq %rbx, %rdi movl $0xa, %esi callq 0x6aaa0 movsbl %al, %esi movq 0x23de4c(%rip), %rdi # 0x2124358 callq 0x6a500 movq %rax, %rdi callq 0x6a490 movb $0x0, 0x269978(%rip) # 0x214fe98 xorl %ebx, %ebx leaq 0x230(%rsp), %rdi callq 0x6a070 cmpb $0x1, 0x78(%rsp) jne 0x1ee6540 movq 0x70(%rsp), %rdi callq 0x1ee7c24 movl %ebx, %eax addq $0x438, %rsp # imm = 0x438 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0x7e8f8(%rip), %rdi # 0x1f64e53 callq 0x6a190 leaq 0x7e8ec(%rip), %rdi # 0x1f64e53 callq 0x6a5c0 jmp 0x1ee65c2 movq %rax, %rdi callq 0x8d6de8 movq %rax, %r14 jmp 0x1ee664b movq %rax, %r14 jmp 0x1ee663e movq %rax, %r14 cmpl $0x0, (%r12) jne 0x1ee6599 movl %r15d, (%r12) jmp 0x1ee6599 movq %rax, %r14 movq 0x30(%rsp), %rdi leaq 0x40(%rsp), %rax cmpq %rax, %rdi je 0x1ee65ad callq 0x6a4f0 leaq 0x18(%rsp), %rbx leaq 0x90(%rsp), %r12 jmp 0x1ee65d4 jmp 0x1ee65cc jmp 0x1ee65ea jmp 0x1ee65c2 movq %rax, %r14 leaq 0x18(%rsp), %rbx jmp 0x1ee662f movq %rax, %r14 leaq 0x18(%rsp), %rbx movq 0x50(%rsp), %rdi leaq 0x60(%rsp), %rax cmpq %rax, %rdi je 0x1ee65f2 callq 0x6a4f0 jmp 0x1ee65f2 movq %rax, %r14 leaq 0x18(%rsp), %rbx movq 0x80(%rsp), %rdi cmpq %r12, %rdi je 0x1ee660e callq 0x6a4f0 jmp 0x1ee660e movq %rax, %r14 leaq 0x18(%rsp), %rbx movq 0x23e53b(%rip), %rsi # 0x2124b50 leaq 0xa8(%rsp), %rdi callq 0x6a740 leaq 0x128(%rsp), %rdi callq 0x6a6f0 movq 0x8(%rsp), %rdi cmpq %rbx, %rdi je 0x1ee663e callq 0x6a4f0 leaq 0x230(%rsp), %rdi callq 0x6a070 leaq 0x70(%rsp), %rdi callq 0x8d6eda movq %r14, %rdi callq 0x6a600
/embree[P]embree/common/sys/alloc.cpp
embree::os_malloc(unsigned long, bool&)
void* os_malloc(size_t bytes, bool& hugepages) { if (bytes == 0) { hugepages = false; return nullptr; } /* try direct huge page allocation first */ if (isHugePageCandidate(bytes)) { #if defined(__MACOSX__) void* ptr = mmap(0, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, VM_FLAGS_SUPERPAGE_SIZE_2MB, 0); if (ptr != MAP_FAILED) { hugepages = true; return ptr; } #elif defined(MAP_HUGETLB) void* ptr = mmap(0, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0); if (ptr != MAP_FAILED) { hugepages = true; return ptr; } #endif } /* fallback to 4k pages */ void* ptr = (char*) mmap(0, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if (ptr == MAP_FAILED) throw std::bad_alloc(); hugepages = false; /* advise huge page hint for THP */ os_advise(ptr,bytes); return ptr; }
pushq %r15 pushq %r14 pushq %rbx movq %rsi, %rbx testq %rdi, %rdi je 0x1ee6707 movq %rdi, %r14 cmpb $0x1, 0x269820(%rip) # 0x214fe98 jne 0x1ee66cc leaq 0x1fffff(%r14), %rax movabsq $0x7fffffffffe00000, %rcx # imm = 0x7FFFFFFFFFE00000 andq %rax, %rcx subq %r14, %rcx movq %rcx, %rax shlq $0x6, %rax leaq (%rax,%rcx,2), %rax cmpq %r14, %rax jae 0x1ee66cc xorl %edi, %edi movq %r14, %rsi movl $0x3, %edx movl $0x40022, %ecx # imm = 0x40022 movl $0xffffffff, %r8d # imm = 0xFFFFFFFF xorl %r9d, %r9d callq 0x6a970 cmpq $-0x1, %rax je 0x1ee66cc movq %rax, %r15 movb $0x1, (%rbx) jmp 0x1ee670d xorl %edi, %edi movq %r14, %rsi movl $0x3, %edx movl $0x22, %ecx movl $0xffffffff, %r8d # imm = 0xFFFFFFFF xorl %r9d, %r9d callq 0x6a970 cmpq $-0x1, %rax je 0x1ee6716 movq %rax, %r15 movb $0x0, (%rbx) movq %rax, %rdi movq %r14, %rsi movl $0xe, %edx callq 0x6a570 jmp 0x1ee670d movb $0x0, (%rbx) xorl %r15d, %r15d movq %r15, %rax popq %rbx popq %r14 popq %r15 retq movl $0x8, %edi callq 0x6a3b0 movq 0x23dbe9(%rip), %rcx # 0x2124310 addq $0x10, %rcx movq %rcx, (%rax) movq 0x23dd9b(%rip), %rsi # 0x21244d0 movq 0x23d794(%rip), %rdx # 0x2123ed0 movq %rax, %rdi callq 0x6a5d0
/embree[P]embree/common/sys/alloc.cpp
embree::os_shrink(void*, unsigned long, unsigned long, bool)
size_t os_shrink(void* ptr, size_t bytesNew, size_t bytesOld, bool hugepages) { const size_t pageSize = hugepages ? PAGE_SIZE_2M : PAGE_SIZE_4K; bytesNew = (bytesNew+pageSize-1) & ~(pageSize-1); bytesOld = (bytesOld+pageSize-1) & ~(pageSize-1); if (bytesNew >= bytesOld) return bytesOld; if (munmap((char*)ptr+bytesNew,bytesOld-bytesNew) == -1) throw std::bad_alloc(); return bytesNew; }
pushq %rbx movq %rsi, %rbx testl %ecx, %ecx movq $-0x200000, %rax # imm = 0xFFE00000 movq $-0x1000, %rcx # imm = 0xF000 cmovneq %rax, %rcx movl $0x1fffff, %esi # imm = 0x1FFFFF movl $0xfff, %eax # imm = 0xFFF cmovneq %rsi, %rax addq %rax, %rbx andq %rcx, %rbx addq %rdx, %rax andq %rcx, %rax movq %rax, %rsi subq %rbx, %rsi jbe 0x1ee679a addq %rbx, %rdi callq 0x6a590 movl %eax, %ecx movq %rbx, %rax cmpl $-0x1, %ecx je 0x1ee679c popq %rbx retq movl $0x8, %edi callq 0x6a3b0 movq 0x23db63(%rip), %rcx # 0x2124310 addq $0x10, %rcx movq %rcx, (%rax) movq 0x23dd15(%rip), %rsi # 0x21244d0 movq 0x23d70e(%rip), %rdx # 0x2123ed0 movq %rax, %rdi callq 0x6a5d0
/embree[P]embree/common/sys/alloc.cpp
embree::os_free(void*, unsigned long, bool)
void os_free(void* ptr, size_t bytes, bool hugepages) { if (bytes == 0) return; /* for hugepages we need to also align the size */ const size_t pageSize = hugepages ? PAGE_SIZE_2M : PAGE_SIZE_4K; bytes = (bytes+pageSize-1) & ~(pageSize-1); if (munmap(ptr,bytes) == -1) throw std::bad_alloc(); }
pushq %rax testq %rsi, %rsi je 0x1ee6806 leaq 0x1000(%rsi), %rax addq $0x200000, %rsi # imm = 0x200000 testb %dl, %dl movq $-0x200000, %rcx # imm = 0xFFE00000 movq $-0x1000, %rdx # imm = 0xF000 cmovneq %rcx, %rdx cmoveq %rax, %rsi decq %rsi andq %rdx, %rsi callq 0x6a590 cmpl $-0x1, %eax je 0x1ee6808 popq %rax retq movl $0x8, %edi callq 0x6a3b0 movq 0x23daf7(%rip), %rcx # 0x2124310 addq $0x10, %rcx movq %rcx, (%rax) movq 0x23dca9(%rip), %rsi # 0x21244d0 movq 0x23d6a2(%rip), %rdx # 0x2123ed0 movq %rax, %rdi callq 0x6a5d0 nop
/embree[P]embree/common/sys/alloc.cpp
embree::mapThreadID(unsigned long)
size_t mapThreadID(size_t threadID) { Lock<MutexSys> lock(mutex); if (threadIDs.size() == 0) { /* parse thread/CPU topology */ for (size_t cpuID=0;;cpuID++) { std::fstream fs; std::string cpu = std::string("/sys/devices/system/cpu/cpu") + std::to_string((long long)cpuID) + std::string("/topology/thread_siblings_list"); fs.open (cpu.c_str(), std::fstream::in); if (fs.fail()) break; int i; while (fs >> i) { if (std::none_of(threadIDs.begin(),threadIDs.end(),[&] (int id) { return id == i; })) threadIDs.push_back(i); if (fs.peek() == ',') fs.ignore(); } fs.close(); } #if 0 for (size_t i=0;i<threadIDs.size();i++) std::cout << i << " -> " << threadIDs[i] << std::endl; #endif /* verify the mapping and do not use it if the mapping has errors */ for (size_t i=0;i<threadIDs.size();i++) { for (size_t j=0;j<threadIDs.size();j++) { if (i != j && threadIDs[i] == threadIDs[j]) { threadIDs.clear(); } } } } /* re-map threadIDs if mapping is available */ size_t ID = threadID; if (threadID < threadIDs.size()) ID = threadIDs[threadID]; /* find correct thread to affinitize to */ cpu_set_t set; CPU_ZERO(&set); if (pthread_getaffinity_np(pthread_self(), sizeof(set), &set) == 0) { for (int i=0, j=0; i<CPU_SETSIZE; i++) { if (!CPU_ISSET(i,&set)) continue; if (j == ID) { ID = i; break; } j++; } } return ID; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x2d8, %rsp # imm = 0x2D8 movq %rdi, %r14 leaq 0x269655(%rip), %rdi # 0x214fea8 movq %rdi, 0xa8(%rsp) movb $0x1, 0xb0(%rsp) callq 0x1ee7bb6 movq 0x269649(%rip), %rax # 0x214feb8 cmpq 0x26963a(%rip), %rax # 0x214feb0 jne 0x1ee6d39 movq %r14, 0xb8(%rsp) leaq 0x38(%rsp), %rbx leaq 0x78(%rsp), %r15 movq $0x0, 0x20(%rsp) leaq 0xc0(%rsp), %r14 leaq 0x28(%rsp), %r13 movq %r14, %rdi callq 0x6a560 movq %r15, %r12 movq %rbx, 0x28(%rsp) movq %r13, %rdi leaq 0x7e59a(%rip), %rsi # 0x1f64e58 leaq 0x7e5ae(%rip), %rdx # 0x1f64e73 callq 0x8d7230 movq 0x20(%rsp), %rax movq %rax, %rbp negq %rbp cmovsq %rax, %rbp movl $0x1, %r15d cmpq $0xa, %rbp jb 0x1ee6939 movl $0x4, %r15d movq %rbp, %rcx movabsq $0x346dc5d63886594b, %rsi # imm = 0x346DC5D63886594B cmpq $0x63, %rcx jbe 0x1ee6930 cmpq $0x3e7, %rcx # imm = 0x3E7 jbe 0x1ee6936 cmpq $0x2710, %rcx # imm = 0x2710 jb 0x1ee6939 movq %rcx, %rax mulq %rsi shrq $0xb, %rdx addl $0x4, %r15d cmpq $0x1869f, %rcx # imm = 0x1869F movq %rdx, %rcx ja 0x1ee68f8 addl $-0x3, %r15d jmp 0x1ee6939 addl $-0x2, %r15d jmp 0x1ee6939 decl %r15d movq 0x20(%rsp), %rbx shrq $0x3f, %rbx leal (%rbx,%r15), %esi leaq 0x58(%rsp), %rax movq %rax, 0x48(%rsp) leaq 0x48(%rsp), %rdi movl $0x2d, %edx callq 0x6a580 addq 0x48(%rsp), %rbx movq %rbx, %rdi movl %r15d, %esi movq %rbp, %rdx callq 0x8d7190 movq 0x28(%rsp), %rcx movl $0xf, %esi leaq 0x38(%rsp), %rbx cmpq %rbx, %rcx je 0x1ee698b movq 0x38(%rsp), %rsi movq 0x30(%rsp), %r8 movq 0x50(%rsp), %rdx leaq (%rdx,%r8), %rax cmpq %rsi, %rax movq %r12, %r15 leaq 0x10(%rsp), %r12 leaq 0x98(%rsp), %rbp jbe 0x1ee69c9 movl $0xf, %esi leaq 0x58(%rsp), %rdi cmpq %rdi, 0x48(%rsp) je 0x1ee69c4 movq 0x58(%rsp), %rsi cmpq %rsi, %rax jbe 0x1ee69d8 movq 0x48(%rsp), %rsi movq %r13, %rdi callq 0x6a720 jmp 0x1ee69e6 leaq 0x48(%rsp), %rdi xorl %esi, %esi xorl %edx, %edx callq 0x6a1a0 movq %r12, (%rsp) movq (%rax), %rdx leaq 0x10(%rax), %rcx cmpq %rcx, %rdx je 0x1ee6a04 movq %rdx, (%rsp) movq (%rcx), %rdx movq %rdx, 0x10(%rsp) jmp 0x1ee6a0c movups (%rcx), %xmm0 movups %xmm0, (%r12) movq %rax, %rdx addq $0x8, %rdx movq 0x8(%rax), %rsi movq %rsi, 0x8(%rsp) movq %rcx, (%rax) movq $0x0, (%rdx) movb $0x0, (%rcx) movq %rbp, 0x88(%rsp) leaq 0x88(%rsp), %rdi leaq 0x7e434(%rip), %rsi # 0x1f64e74 leaq 0x7e44b(%rip), %rdx # 0x1f64e92 callq 0x8d7230 movq (%rsp), %rcx movl $0xf, %esi cmpq %r12, %rcx je 0x1ee6a5f movq 0x10(%rsp), %rsi movq 0x8(%rsp), %r8 movq 0x90(%rsp), %rdx leaq (%rdx,%r8), %rax cmpq %rsi, %rax jbe 0x1ee6a91 movl $0xf, %esi cmpq %rbp, 0x88(%rsp) je 0x1ee6a8c movq 0x98(%rsp), %rsi cmpq %rsi, %rax jbe 0x1ee6aa3 movq 0x88(%rsp), %rsi movq %rsp, %rdi callq 0x6a720 jmp 0x1ee6ab4 leaq 0x88(%rsp), %rdi xorl %esi, %esi xorl %edx, %edx callq 0x6a1a0 movq %r15, 0x68(%rsp) movq (%rax), %rdx leaq 0x10(%rax), %rcx cmpq %rcx, %rdx je 0x1ee6ad4 movq %rdx, 0x68(%rsp) movq (%rcx), %rdx movq %rdx, 0x78(%rsp) jmp 0x1ee6adb movups (%rcx), %xmm0 movups %xmm0, (%r15) movq %rax, %rdx addq $0x8, %rdx movq 0x8(%rax), %rsi movq %rsi, 0x70(%rsp) movq %rcx, (%rax) movq $0x0, (%rdx) movb $0x0, (%rcx) movq 0x88(%rsp), %rdi cmpq %rbp, %rdi je 0x1ee6b0a callq 0x6a4f0 movq (%rsp), %rdi cmpq %r12, %rdi movq %rsp, %r12 leaq 0x269395(%rip), %rbp # 0x214feb0 je 0x1ee6b22 callq 0x6a4f0 movq 0x48(%rsp), %rdi leaq 0x58(%rsp), %rax cmpq %rax, %rdi je 0x1ee6b36 callq 0x6a4f0 movq 0x28(%rsp), %rdi cmpq %rbx, %rdi je 0x1ee6b45 callq 0x6a4f0 movq 0x68(%rsp), %rsi movq %r14, %rdi movl $0x8, %edx callq 0x6a2f0 movq 0xc0(%rsp), %rax movq -0x18(%rax), %rax testb $0x5, 0xe0(%rsp,%rax) jne 0x1ee6cb7 movq %r14, %rdi movq %r13, %rsi callq 0x6ac60 movq (%rax), %rcx movq -0x18(%rcx), %rcx testb $0x5, 0x20(%rax,%rcx) jne 0x1ee6c8e movq 0x26931b(%rip), %rax # 0x214feb0 movq 0x26931c(%rip), %rsi # 0x214feb8 movq %rsi, %rdx subq %rax, %rdx movq %rdx, %rdi sarq $0x5, %rdi testq %rdi, %rdi jle 0x1ee6beb movl 0x28(%rsp), %r8d andq $-0x20, %rdx addq %rax, %rdx incq %rdi addq $0x10, %rax movq %rax, %rcx cmpl -0x10(%rcx), %r8d je 0x1ee6c31 cmpl -0x8(%rcx), %r8d je 0x1ee6c37 cmpl (%rcx), %r8d je 0x1ee6c41 cmpl 0x8(%rcx), %r8d je 0x1ee6c3d decq %rdi addq $0x20, %rcx cmpq $0x1, %rdi jg 0x1ee6bc4 movq %rdx, %rax movq %rsi, %rdx subq %rax, %rdx sarq $0x3, %rdx cmpq $0x1, %rdx je 0x1ee6c22 cmpq $0x2, %rdx je 0x1ee6c16 movq %rsi, %rcx cmpq $0x3, %rdx jne 0x1ee6c41 movl 0x28(%rsp), %ecx cmpl (%rax), %ecx je 0x1ee6c2c addq $0x8, %rax movl 0x28(%rsp), %ecx cmpl (%rax), %ecx je 0x1ee6c2c addq $0x8, %rax movl 0x28(%rsp), %ecx cmpl (%rax), %ecx cmovneq %rsi, %rax movq %rax, %rcx jmp 0x1ee6c41 addq $-0x10, %rcx jmp 0x1ee6c41 addq $-0x8, %rcx jmp 0x1ee6c41 addq $0x8, %rcx cmpq %rsi, %rcx jne 0x1ee6c70 movslq 0x28(%rsp), %rax movq %rax, (%rsp) cmpq 0x26926a(%rip), %rsi # 0x214fec0 je 0x1ee6c65 movq %rax, (%rsi) addq $0x8, 0x269255(%rip) # 0x214feb8 jmp 0x1ee6c70 movq %rbp, %rdi movq %r12, %rdx callq 0x1ee7262 movq %r14, %rdi callq 0x6ab10 cmpl $0x2c, %eax jne 0x1ee6b71 movq %r14, %rdi callq 0x6a750 jmp 0x1ee6b71 movq %r14, %rdi callq 0x6a260 movq 0x68(%rsp), %rdi cmpq %r15, %rdi je 0x1ee6ca5 callq 0x6a4f0 movq %r14, %rdi callq 0x6a870 incq 0x20(%rsp) jmp 0x1ee68a4 movq 0x68(%rsp), %rdi cmpq %r15, %rdi je 0x1ee6cc6 callq 0x6a4f0 leaq 0xc0(%rsp), %rdi callq 0x6a870 movq 0x2691d6(%rip), %rax # 0x214feb0 movq 0x2691d7(%rip), %rcx # 0x214feb8 cmpq %rax, %rcx movq 0xb8(%rsp), %r14 je 0x1ee6d39 xorl %edx, %edx cmpq %rax, %rcx je 0x1ee6d27 xorl %esi, %esi cmpq %rsi, %rdx je 0x1ee6d15 cmpq %rax, %rcx je 0x1ee6d15 movq (%rax,%rsi,8), %rdi cmpq %rdi, (%rax,%rdx,8) jne 0x1ee6d15 movq %rax, 0x2691a6(%rip) # 0x214feb8 movq %rax, %rcx incq %rsi movq %rcx, %rdi subq %rax, %rdi sarq $0x3, %rdi cmpq %rdi, %rsi jb 0x1ee6cf7 incq %rdx movq %rcx, %rsi subq %rax, %rsi sarq $0x3, %rsi cmpq %rsi, %rdx jb 0x1ee6cf0 movq 0x269178(%rip), %rcx # 0x214feb8 movq 0x269169(%rip), %rax # 0x214feb0 subq %rax, %rcx sarq $0x3, %rcx cmpq %r14, %rcx jbe 0x1ee6d57 movq (%rax,%r14,8), %r14 xorps %xmm0, %xmm0 leaq 0xc0(%rsp), %rbx movaps %xmm0, 0x70(%rbx) movaps %xmm0, 0x60(%rbx) movaps %xmm0, 0x50(%rbx) movaps %xmm0, 0x40(%rbx) movaps %xmm0, 0x30(%rbx) movaps %xmm0, 0x20(%rbx) movaps %xmm0, 0x10(%rbx) movaps %xmm0, (%rbx) callq 0x6a6c0 movl $0x80, %esi movq %rax, %rdi movq %rbx, %rdx callq 0x6ac00 testl %eax, %eax jne 0x1ee6dcd xorl %eax, %eax xorl %ecx, %ecx movq %rax, %rdx shrq $0x6, %rdx movq 0xc0(%rsp,%rdx,8), %rdx btq %rax, %rdx jae 0x1ee6dbd movslq %ecx, %rdx cmpq %rdx, %r14 je 0x1ee6dca incl %ecx incq %rax cmpq $0x400, %rax # imm = 0x400 jne 0x1ee6d9e jmp 0x1ee6dcd movq %rax, %r14 cmpb $0x1, 0xb0(%rsp) jne 0x1ee6de4 movq 0xa8(%rsp), %rdi callq 0x1ee7c24 movq %r14, %rax addq $0x2d8, %rsp # imm = 0x2D8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rdi callq 0x8d6de8 jmp 0x1ee6e70 movq %rax, %rbx jmp 0x1ee6e8f movq %rax, %rbx jmp 0x1ee6e82 movq %rax, %rbx jmp 0x1ee6e5b movq %rax, %rbx jmp 0x1ee6e47 movq %rax, %rbx jmp 0x1ee6e34 movq %rax, %rbx movq 0x88(%rsp), %rdi cmpq %rbp, %rdi je 0x1ee6e34 callq 0x6a4f0 movq (%rsp), %rdi leaq 0x10(%rsp), %rax cmpq %rax, %rdi je 0x1ee6e47 callq 0x6a4f0 movq 0x48(%rsp), %rdi leaq 0x58(%rsp), %rax cmpq %rax, %rdi je 0x1ee6e5b callq 0x6a4f0 movq 0x28(%rsp), %rdi leaq 0x38(%rsp), %rax cmpq %rax, %rdi jne 0x1ee6e7d jmp 0x1ee6e82 jmp 0x1ee6e70 jmp 0x1ee6e70 movq %rax, %rbx movq 0x68(%rsp), %rdi cmpq %r15, %rdi je 0x1ee6e82 callq 0x6a4f0 leaq 0xc0(%rsp), %rdi callq 0x6a870 leaq 0xa8(%rsp), %rdi callq 0x8d6eda movq %rbx, %rdi callq 0x6a600
/embree[P]embree/common/sys/thread.cpp
embree::setAffinity(long)
void setAffinity(ssize_t affinity) { cpu_set_t cset; CPU_ZERO(&cset); //size_t threadID = mapThreadID(affinity); // this is not working properly in LXC containers when some processors are disabled size_t threadID = affinity; CPU_SET(threadID, &cset); pthread_setaffinity_np(pthread_self(), sizeof(cset), &cset); }
subq $0x88, %rsp xorps %xmm0, %xmm0 movaps %xmm0, 0x70(%rsp) movaps %xmm0, 0x60(%rsp) movaps %xmm0, 0x50(%rsp) movaps %xmm0, 0x40(%rsp) movaps %xmm0, 0x30(%rsp) movaps %xmm0, 0x20(%rsp) movaps %xmm0, 0x10(%rsp) movaps %xmm0, (%rsp) cmpq $0x3ff, %rdi # imm = 0x3FF ja 0x1ee6ef0 movl $0x1, %eax movl %edi, %ecx shlq %cl, %rax shrq $0x6, %rdi orq %rax, (%rsp,%rdi,8) callq 0x6a6c0 movq %rsp, %rdx movl $0x80, %esi movq %rax, %rdi callq 0x6a610 addq $0x88, %rsp retq
/embree[P]embree/common/sys/thread.cpp
embree::createThread(void (*)(void*), void*, unsigned long, long)
thread_t createThread(thread_func f, void* arg, size_t stack_size, ssize_t threadID) { /* set stack size */ pthread_attr_t attr; pthread_attr_init(&attr); if (stack_size > 0) pthread_attr_setstacksize (&attr, stack_size); /* create thread */ pthread_t* tid = new pthread_t; if (pthread_create(tid,&attr,(void*(*)(void*))threadStartup,new ThreadStartupData(f,arg,threadID)) != 0) { pthread_attr_destroy(&attr); delete tid; FATAL("pthread_create failed"); } pthread_attr_destroy(&attr); /* set affinity */ #if defined(__LINUX__) && !defined(__ANDROID__) if (threadID >= 0) { cpu_set_t cset; CPU_ZERO(&cset); threadID = mapThreadID(threadID); CPU_SET(threadID, &cset); pthread_setaffinity_np(*tid, sizeof(cset), &cset); } #elif defined(__FreeBSD__) if (threadID >= 0) { cpuset_t cset; CPU_ZERO(&cset); CPU_SET(threadID, &cset); pthread_setaffinity_np(*tid, sizeof(cset), &cset); } #elif defined(__ANDROID__) if (threadID >= 0) { cpu_set_t cset; CPU_ZERO(&cset); CPU_SET(threadID, &cset); sched_setaffinity(pthread_gettid_np(*tid), sizeof(cset), &cset); } #endif return thread_t(tid); }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0xc0, %rsp movq %rcx, %r14 movq %rdx, %rbx movq %rsi, %r15 movq %rdi, %r12 leaq 0x88(%rsp), %rdi callq 0x6a1c0 testq %rbx, %rbx je 0x1ee6f4a leaq 0x88(%rsp), %rdi movq %rbx, %rsi callq 0x6a2a0 movl $0x8, %edi callq 0x6a170 movq %rax, %rbx movl $0x18, %edi callq 0x6a170 movq %r12, (%rax) movq %r15, 0x8(%rax) movslq %r14d, %rcx movq %rcx, 0x10(%rax) leaq 0xe5(%rip), %rdx # 0x1ee705b leaq 0x88(%rsp), %r15 movq %rbx, %rdi movq %r15, %rsi movq %rax, %rcx callq 0x6ab90 movl %eax, %ebp movq %r15, %rdi callq 0x6a6d0 testl %ebp, %ebp jne 0x1ee700e testq %r14, %r14 js 0x1ee6ffb xorps %xmm0, %xmm0 movaps %xmm0, 0x70(%rsp) movaps %xmm0, 0x60(%rsp) movaps %xmm0, 0x50(%rsp) movaps %xmm0, 0x40(%rsp) movaps %xmm0, 0x30(%rsp) movaps %xmm0, 0x20(%rsp) movaps %xmm0, 0x10(%rsp) movaps %xmm0, (%rsp) movq %r14, %rdi callq 0x1ee6838 cmpq $0x3ff, %rax # imm = 0x3FF ja 0x1ee6feb movl $0x1, %edx movl %eax, %ecx shlq %cl, %rdx shrq $0x6, %rax orq %rdx, (%rsp,%rax,8) movq (%rbx), %rdi movq %rsp, %rdx movl $0x80, %esi callq 0x6a610 movq %rbx, %rax addq $0xc0, %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq movq %rbx, %rdi callq 0x6a4f0 movl $0x10, %edi callq 0x6a3b0 movq %rax, %rbx leaq 0x7de69(%rip), %rsi # 0x1f64e93 movq %rax, %rdi callq 0x6a230 movq 0x23dbcf(%rip), %rsi # 0x2124c08 movq 0x23d988(%rip), %rdx # 0x21249c8 movq %rbx, %rdi callq 0x6a5d0 movq %rax, %r14 movq %rbx, %rdi callq 0x6a8a0 movq %r14, %rdi callq 0x6a600
/embree[P]embree/common/sys/thread.cpp
embree::threadStartup(embree::ThreadStartupData*)
static void* threadStartup(ThreadStartupData* parg) { _mm_setcsr(_mm_getcsr() | /*FTZ:*/ (1<<15) | /*DAZ:*/ (1<<6)); /*! Mac OS X does not support setting affinity at thread creation time */ #if defined(__MACOSX__) if (parg->affinity >= 0) setAffinity(parg->affinity); #endif parg->f(parg->arg); delete parg; return nullptr; }
pushq %rbx subq $0x10, %rsp stmxcsr 0xc(%rsp) movl $0x8040, %eax # imm = 0x8040 orl 0xc(%rsp), %eax movl %eax, 0x8(%rsp) ldmxcsr 0x8(%rsp) movq %rdi, %rbx movq 0x8(%rdi), %rdi callq *(%rbx) movq %rbx, %rdi callq 0x6a4f0 xorl %eax, %eax addq $0x10, %rsp popq %rbx retq
/embree[P]embree/common/sys/thread.cpp