name
string | code
string | asm
string | file
string |
|---|---|---|---|
testing::internal::FloatingPoint<float>::AlmostEquals(testing::internal::FloatingPoint<float> const&) const
|
Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
|
movl (%rdi), %eax
movl %eax, %ecx
notl %ecx
testl $0x7f800000, %ecx # imm = 0x7F800000
sete %cl
testl $0x7fffff, %eax # imm = 0x7FFFFF
setne %dl
testb %dl, %cl
jne 0x566c5
movl (%rsi), %ecx
movl %ecx, %edx
notl %edx
testl $0x7f800000, %edx # imm = 0x7F800000
sete %dl
testl $0x7fffff, %ecx # imm = 0x7FFFFF
setne %sil
testb %sil, %dl
je 0x566c8
xorl %eax, %eax
retq
movl %eax, %edx
negl %edx
movl $0x80000000, %esi # imm = 0x80000000
movl %eax, %edi
orl %esi, %edi
testl %eax, %eax
cmovsl %edx, %edi
movl %ecx, %eax
negl %eax
orl %ecx, %esi
testl %ecx, %ecx
cmovsl %eax, %esi
movl %edi, %eax
subl %esi, %eax
negl %eax
subl %esi, %edi
cmovbl %eax, %edi
cmpl $0x5, %edi
setb %al
retq
nop
|
/walterzhaoJR[P]leveldb/third_party/googletest/googletest/include/gtest/internal/gtest-internal.h
|
testing::internal::ThreadLocal<std::vector<testing::internal::TraceInfo, std::allocator<testing::internal::TraceInfo>>>::CreateKey()
|
static pthread_key_t CreateKey() {
pthread_key_t key;
// When a thread exits, DeleteThreadLocalValue() will be called on
// the object managed for that thread.
GTEST_CHECK_POSIX_SUCCESS_(
pthread_key_create(&key, &DeleteThreadLocalValue));
return key;
}
|
pushq %rbx
subq $0x10, %rsp
leaq -0x2558e(%rip), %rsi # 0x326c0
leaq 0xc(%rsp), %rdi
callq 0xaa30
testl %eax, %eax
je 0x57cc1
movl %eax, %ebx
leaq 0x839a(%rip), %rdx # 0x5ffff
leaq 0x8(%rsp), %rdi
movl $0x3, %esi
movl $0x6f8, %ecx # imm = 0x6F8
callq 0x4d76a
movq 0x29368(%rip), %rdi # 0x80fe8
leaq 0x86e6(%rip), %rsi # 0x6036d
movl $0x31, %edx
callq 0xa6b0
movq 0x29350(%rip), %rdi # 0x80fe8
leaq 0x8419(%rip), %rsi # 0x600b8
movl $0x12, %edx
callq 0xa6b0
movq 0x29338(%rip), %rdi # 0x80fe8
movl %ebx, %esi
callq 0xab30
leaq 0x8(%rsp), %rdi
callq 0x4d89c
movl 0xc(%rsp), %eax
addq $0x10, %rsp
popq %rbx
retq
movq %rax, %rbx
leaq 0x8(%rsp), %rdi
callq 0x4d89c
movq %rbx, %rdi
callq 0xab90
|
/walterzhaoJR[P]leveldb/third_party/googletest/googletest/include/gtest/internal/gtest-port.h
|
testing::internal::ComparisonBase<testing::internal::EqMatcher<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, testing::internal::AnyEq>::Impl<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>::~Impl()
|
bool MatchAndExplain(Lhs lhs,
MatchResultListener* /* listener */) const override {
return Op()(lhs, Unwrap(rhs_));
}
|
pushq %rbx
movq %rdi, %rbx
leaq 0x28c53(%rip), %rax # 0x80d08
movq %rax, (%rdi)
movq 0x8(%rdi), %rdi
leaq 0x18(%rbx), %rax
cmpq %rax, %rdi
je 0x580ca
callq 0xa570
movq %rbx, %rdi
popq %rbx
jmp 0xa570
nop
|
/walterzhaoJR[P]leveldb/third_party/googletest/googletest/include/gtest/gtest-matchers.h
|
virtual thunk to ncnn::BatchNorm_x86_avx::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int BatchNorm_x86_avx::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int dims = bottom_top_blob.dims;
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int c = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
if (dims == 1)
{
float* ptr = bottom_top_blob;
const float* aptr = a_data;
const float* bptr = b_data;
const int size = w * elempack;
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; i + 15 < size; i += 16)
{
__m512 _p512 = _mm512_loadu_ps(ptr);
__m512 _a512 = _mm512_loadu_ps(aptr);
__m512 _b512 = _mm512_loadu_ps(bptr);
_p512 = _mm512_fmadd_ps(_p512, _b512, _a512);
_mm512_storeu_ps(ptr, _p512);
ptr += 16;
aptr += 16;
bptr += 16;
}
#endif // __AVX512F__
for (; i + 7 < size; i += 8)
{
__m256 _p256 = _mm256_loadu_ps(ptr);
__m256 _a256 = _mm256_loadu_ps(aptr);
__m256 _b256 = _mm256_loadu_ps(bptr);
_p256 = _mm256_comp_fmadd_ps(_p256, _b256, _a256);
_mm256_storeu_ps(ptr, _p256);
ptr += 8;
aptr += 8;
bptr += 8;
}
#endif // __AVX__
for (; i + 3 < size; i += 4)
{
__m128 _p128 = _mm_loadu_ps(ptr);
__m128 _a128 = _mm_loadu_ps(aptr);
__m128 _b128 = _mm_loadu_ps(bptr);
_p128 = _mm_comp_fmadd_ps(_p128, _b128, _a128);
_mm_storeu_ps(ptr, _p128);
ptr += 4;
aptr += 4;
bptr += 4;
}
#endif // __SSE__
for (; i < size; i++)
{
*ptr = *bptr * *ptr + *aptr;
ptr++;
aptr++;
bptr++;
}
}
if (dims == 2)
{
const int size = w * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
float* ptr = bottom_top_blob.row(i);
float a = a_data[i];
float b = b_data[i];
#if __SSE2__
__m128 _a128 = (elempack == 4) ? _mm_loadu_ps((const float*)a_data + i * 4) : _mm_set1_ps(a);
__m128 _b128 = (elempack == 4) ? _mm_loadu_ps((const float*)b_data + i * 4) : _mm_set1_ps(b);
#if __AVX__
__m256 _a256 = (elempack == 8) ? _mm256_loadu_ps((const float*)a_data + i * 8) : _mm256_insertf128_ps(_mm256_castps128_ps256(_a128), _a128, 1);
__m256 _b256 = (elempack == 8) ? _mm256_loadu_ps((const float*)b_data + i * 8) : _mm256_insertf128_ps(_mm256_castps128_ps256(_b128), _b128, 1);
#if __AVX512F__
__m512 _a512 = (elempack == 16) ? _mm512_loadu_ps((const float*)a_data + i * 16) : _mm512_insertf32x8(_mm512_castps256_ps512(_a256), _a256, 1);
__m512 _b512 = (elempack == 16) ? _mm512_loadu_ps((const float*)b_data + i * 16) : _mm512_insertf32x8(_mm512_castps256_ps512(_b256), _b256, 1);
#endif // __AVX512F__
#endif // __AVX__
#endif // __SSE2__
int j = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; j + 15 < size; j += 16)
{
__m512 _p512 = _mm512_loadu_ps(ptr);
_p512 = _mm512_fmadd_ps(_p512, _b512, _a512);
_mm512_storeu_ps(ptr, _p512);
ptr += 16;
}
#endif // __AVX512F__
for (; j + 7 < size; j += 8)
{
__m256 _p256 = _mm256_loadu_ps(ptr);
_p256 = _mm256_comp_fmadd_ps(_p256, _b256, _a256);
_mm256_storeu_ps(ptr, _p256);
ptr += 8;
}
#endif // __AVX__
for (; j + 3 < size; j += 4)
{
__m128 _p128 = _mm_loadu_ps(ptr);
_p128 = _mm_comp_fmadd_ps(_p128, _b128, _a128);
_mm_storeu_ps(ptr, _p128);
ptr += 4;
}
#endif // __SSE__
for (; j < size; j++)
{
*ptr = b * *ptr + a;
ptr++;
}
}
}
if (dims == 3 || dims == 4)
{
const int size = w * h * d * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < c; q++)
{
float* ptr = bottom_top_blob.channel(q);
float a = a_data[q];
float b = b_data[q];
#if __SSE2__
__m128 _a128 = (elempack == 4) ? _mm_loadu_ps((const float*)a_data + q * 4) : _mm_set1_ps(a);
__m128 _b128 = (elempack == 4) ? _mm_loadu_ps((const float*)b_data + q * 4) : _mm_set1_ps(b);
#if __AVX__
__m256 _a256 = (elempack == 8) ? _mm256_loadu_ps((const float*)a_data + q * 8) : _mm256_insertf128_ps(_mm256_castps128_ps256(_a128), _a128, 1);
__m256 _b256 = (elempack == 8) ? _mm256_loadu_ps((const float*)b_data + q * 8) : _mm256_insertf128_ps(_mm256_castps128_ps256(_b128), _b128, 1);
#if __AVX512F__
__m512 _a512 = (elempack == 16) ? _mm512_loadu_ps((const float*)a_data + q * 16) : _mm512_insertf32x8(_mm512_castps256_ps512(_a256), _a256, 1);
__m512 _b512 = (elempack == 16) ? _mm512_loadu_ps((const float*)b_data + q * 16) : _mm512_insertf32x8(_mm512_castps256_ps512(_b256), _b256, 1);
#endif // __AVX512F__
#endif // __AVX__
#endif // __SSE2__
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; i + 15 < size; i += 16)
{
__m512 _p512 = _mm512_loadu_ps(ptr);
_p512 = _mm512_fmadd_ps(_p512, _b512, _a512);
_mm512_storeu_ps(ptr, _p512);
ptr += 16;
}
#endif // __AVX512F__
for (; i + 7 < size; i += 8)
{
__m256 _p256 = _mm256_loadu_ps(ptr);
_p256 = _mm256_comp_fmadd_ps(_p256, _b256, _a256);
_mm256_storeu_ps(ptr, _p256);
ptr += 8;
}
#endif // __AVX__
for (; i + 3 < size; i += 4)
{
__m128 _p128 = _mm_loadu_ps(ptr);
_p128 = _mm_comp_fmadd_ps(_p128, _b128, _a128);
_mm_storeu_ps(ptr, _p128);
ptr += 4;
}
#endif // __SSE__
for (; i < size; i++)
{
*ptr = b * *ptr + a;
ptr++;
}
}
}
return 0;
}
|
pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x90ec0
xorl %eax, %eax
popq %rcx
retq
nop
|
/ysh329[P]ncnn/build_O3/src/layer/x86/batchnorm_x86_avx.cpp
|
ncnn::conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse(ncnn::Mat const&, ncnn::Mat&, int, int, ncnn::Option const&)
|
static void conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
#if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__)
#if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avx512vnni(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avxvnni(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avx2(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
#if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_xop(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
#endif
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-8a-inch/8a-36-outch/4b
kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4);
int p = 0;
for (; p + 3 < outch; p += 4)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
Mat g0 = kernel_tm_pack8to1.channel(p / 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
#if __AVXVNNI__ || __AVX512VNNI__ || __XOP__
for (int i = 0; i < 4; i++)
{
const short* k00 = k0.row<const short>(q + i * 2);
const short* k10 = k1.row<const short>(q + i * 2);
const short* k20 = k2.row<const short>(q + i * 2);
const short* k30 = k3.row<const short>(q + i * 2);
const short* k01 = k0.row<const short>(q + i * 2 + 1);
const short* k11 = k1.row<const short>(q + i * 2 + 1);
const short* k21 = k2.row<const short>(q + i * 2 + 1);
const short* k31 = k3.row<const short>(q + i * 2 + 1);
g00[0] = k00[k];
g00[1] = k01[k];
g00[2] = k10[k];
g00[3] = k11[k];
g00[4] = k20[k];
g00[5] = k21[k];
g00[6] = k30[k];
g00[7] = k31[k];
g00 += 8;
}
#else
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00[1] = k1.row<const short>(q + i)[k];
g00[2] = k2.row<const short>(q + i)[k];
g00[3] = k3.row<const short>(q + i)[k];
g00 += 4;
}
#endif
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00 += 1;
}
}
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movl %ecx, %r14d
movl %edx, %ebx
movq %rsi, 0x30(%rsp)
movq %rdi, %r12
leaq 0x80(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movl $0x2, %r8d
movl $0x24, %esi
xorl %r9d, %r9d
callq 0x5b24c
movl %r14d, %eax
movq %rax, 0x50(%rsp)
movq %r14, 0x78(%rsp)
testl %r14d, %r14d
movq %rbx, 0x70(%rsp)
jle 0xed0bb
leal (%rbx,%rbx,8), %eax
movl %eax, 0x40(%rsp)
movq (%r12), %rax
movq %rax, 0x38(%rsp)
movslq 0xac(%rsp), %rcx
movq 0x80(%rsp), %rax
movq %rax, 0x28(%rsp)
movq 0x90(%rsp), %rax
movq 0xc0(%rsp), %rdx
imulq %rax, %rdx
movq %rdx, 0x48(%rsp)
imulq %rax, %rcx
movq %rcx, 0x60(%rsp)
movl %ebx, %eax
movq %rax, 0x58(%rsp)
movq $0x0, 0x18(%rsp)
leaq 0x32bec2(%rip), %r13 # 0x418dc0
testl %ebx, %ebx
jle 0xed094
movl 0x40(%rsp), %eax
imull 0x18(%rsp), %eax
cltq
addq 0x38(%rsp), %rax
movq %rax, 0x68(%rsp)
movq 0x28(%rsp), %rbx
xorl %eax, %eax
movq %rbx, 0x8(%rsp)
movq %rax, 0x20(%rsp)
leaq (%rax,%rax,8), %rax
movq 0x68(%rsp), %r11
movsbl (%r11,%rax), %ecx
movsbl 0x1(%r11,%rax), %edx
movsbl 0x2(%r11,%rax), %esi
movsbl 0x3(%r11,%rax), %edi
movsbl 0x4(%r11,%rax), %r8d
movsbl 0x5(%r11,%rax), %r9d
movsbl 0x6(%r11,%rax), %ebx
movsbl 0x7(%r11,%rax), %ebp
movsbl 0x8(%r11,%rax), %r14d
movl $0x4, %r15d
movswl %cx, %eax
movl %eax, 0x10(%rsp)
movswl %dx, %ecx
movswl %si, %eax
movswl %di, %edi
movswl %r8w, %esi
movswl %r9w, %r9d
movswl %bx, %ebx
movswl %bp, %edx
movswl %r14w, %r8d
movzwl -0x4(%r15,%r13), %r12d
movl %r12d, %r14d
imull 0x10(%rsp), %r14d
movzwl -0x2(%r15,%r13), %r11d
movl %r11d, %ebp
imull %ecx, %ebp
addl %r14d, %ebp
movzwl (%r15,%r13), %r10d
movl %r10d, %r14d
imull %eax, %r14d
addl %ebp, %r14d
movw %r14w, 0xcc(%rsp,%r15)
movl %r12d, %ebp
imull %edi, %ebp
movl %r11d, %r14d
imull %esi, %r14d
addl %ebp, %r14d
movl %r10d, %ebp
imull %r9d, %ebp
addl %r14d, %ebp
movw %bp, 0xce(%rsp,%r15)
imull %ebx, %r12d
imull %edx, %r11d
addl %r12d, %r11d
imull %r8d, %r10d
addl %r11d, %r10d
movw %r10w, 0xd0(%rsp,%r15)
addq $0x6, %r15
cmpq $0x28, %r15
jne 0xecf92
movq 0x8(%rsp), %rbx
movq %rbx, %rax
xorl %ecx, %ecx
leaq 0x32bda1(%rip), %r11 # 0x418dc2
leaq (%rcx,%rcx,2), %rsi
movzwl 0xd0(%rsp,%rsi,2), %edx
vmovd 0xd2(%rsp,%rsi,2), %xmm0
movq %r11, %rsi
xorl %edi, %edi
movzwl -0x2(%rsi), %r8d
imulw %dx, %r8w
vmovd (%rsi), %xmm1
vpmullw %xmm0, %xmm1, %xmm1
vmovd %xmm1, %r9d
addl %r8d, %r9d
vpextrw $0x1, %xmm1, %r8d
addl %r9d, %r8d
movw %r8w, (%rax,%rdi,2)
incq %rdi
addq $0x6, %rsi
cmpq $0x6, %rdi
jne 0xed03b
incq %rcx
addq $0xc, %rax
cmpq $0x6, %rcx
jne 0xed021
movq 0x20(%rsp), %rax
incq %rax
addq 0x60(%rsp), %rbx
cmpq 0x58(%rsp), %rax
jne 0xecf22
movq 0x18(%rsp), %rcx
incq %rcx
movq 0x48(%rsp), %rax
addq %rax, 0x28(%rsp)
movq %rcx, 0x18(%rsp)
cmpq 0x50(%rsp), %rcx
movq 0x70(%rsp), %rbx
jne 0xecefe
movq 0x78(%rsp), %r14
leal 0x3(%r14), %eax
testl %r14d, %r14d
cmovnsl %r14d, %eax
sarl $0x2, %eax
leal (%rax,%rax,2), %eax
movl %r14d, %ecx
subl %eax, %ecx
movq $0x0, (%rsp)
movl $0x8, %r8d
movq 0x30(%rsp), %rdi
movl %ebx, %esi
movl $0x24, %edx
movl $0x4, %r9d
callq 0x5a266
xorl %r8d, %r8d
movslq %ebx, %rax
cmpl $0x4, %r14d
jl 0xed2b1
movslq 0xac(%rsp), %rcx
movq 0x80(%rsp), %rdi
movq 0x90(%rsp), %r9
movq 0x30(%rsp), %rsi
movslq 0x2c(%rsi), %r8
movq (%rsi), %rdx
movq %rdx, 0x40(%rsp)
movq 0x10(%rsi), %rdx
movq 0x40(%rsi), %rsi
imulq %rdx, %rsi
movq %rsi, 0x38(%rsp)
imulq %r9, %rcx
imulq 0xc0(%rsp), %r9
imulq %rdx, %r8
movq %r8, 0x68(%rsp)
leaq (%r9,%r9,2), %rdx
addq %rdi, %rdx
leaq (,%r9,4), %rsi
movq %rsi, 0xc8(%rsp)
leaq (,%rcx,8), %r12
leaq (%rdi,%r9,2), %r15
addq %rdi, %r9
xorl %esi, %esi
movq %rsi, 0x48(%rsp)
shrq $0x2, %rsi
imulq 0x38(%rsp), %rsi
addq 0x40(%rsp), %rsi
movq %rsi, 0x20(%rsp)
movq %rdi, 0x60(%rsp)
movq %rdi, 0x10(%rsp)
movq %r9, 0x58(%rsp)
movq %r9, 0x8(%rsp)
movq %r15, 0x28(%rsp)
movq %rdx, 0x18(%rsp)
movq %rdx, %r9
xorl %r8d, %r8d
cmpl $0x8, %ebx
jl 0xed243
movq 0x68(%rsp), %rdi
imulq %r8, %rdi
addq 0x20(%rsp), %rdi
movq 0x10(%rsp), %rdx
movq 0x8(%rsp), %rsi
movq %r15, %rbp
movq %r9, %r10
xorl %ebx, %ebx
xorl %r14d, %r14d
xorl %r11d, %r11d
movzwl (%rdx,%r11), %r13d
movw %r13w, (%rdi,%r14)
movzwl (%rsi,%r11), %r13d
movw %r13w, 0x2(%rdi,%r14)
movzwl (%rbp,%r11), %r13d
movw %r13w, 0x4(%rdi,%r14)
movzwl (%r10,%r11), %r13d
movw %r13w, 0x6(%rdi,%r14)
addq %rcx, %r11
addq $0x8, %r14
cmpq $0x40, %r14
jne 0xed1eb
leaq 0x8(%rbx), %r11
addq $0xf, %rbx
addq %r12, %r10
addq %r12, %rbp
addq %r12, %rsi
addq %r12, %rdx
addq %r14, %rdi
cmpq %rax, %rbx
movq %r11, %rbx
jl 0xed1e5
incq %r8
addq $0x2, %r9
addq $0x2, %r15
addq $0x2, 0x8(%rsp)
addq $0x2, 0x10(%rsp)
cmpq $0x24, %r8
movq 0x70(%rsp), %rbx
jne 0xed1c0
movq 0x48(%rsp), %r10
leaq 0x4(%r10), %r8
addq $0x7, %r10
movq 0x18(%rsp), %rdx
movq 0xc8(%rsp), %rsi
addq %rsi, %rdx
movq 0x28(%rsp), %r15
addq %rsi, %r15
movq 0x58(%rsp), %r9
addq %rsi, %r9
movq 0x60(%rsp), %rdi
addq %rsi, %rdi
cmpq 0x50(%rsp), %r10
movq %r8, %rsi
jb 0xed183
movq 0x78(%rsp), %r14
cmpl %r14d, %r8d
jge 0xed3a4
movslq 0xac(%rsp), %rcx
movq 0x90(%rsp), %rsi
movq 0x30(%rsp), %r9
movslq 0x2c(%r9), %rdx
movq (%r9), %rdi
movq %rdi, 0x10(%rsp)
movq 0x10(%r9), %rdi
movq 0x40(%r9), %r9
imulq %rdi, %r9
movq %r9, 0x8(%rsp)
imulq %rdi, %rdx
movl %r8d, %r8d
movq 0xc0(%rsp), %r9
imulq %rsi, %r9
movq %r9, 0x20(%rsp)
imulq %r8, %r9
addq 0x80(%rsp), %r9
imulq %rsi, %rcx
leaq (,%rcx,8), %r11
movl %r8d, %esi
shrl $0x2, %esi
movl %r8d, %r14d
andl $0x3, %r14d
addl %esi, %r14d
imulq 0x8(%rsp), %r14
addq 0x10(%rsp), %r14
movq %r9, %r10
xorl %r12d, %r12d
cmpl $0x8, 0x70(%rsp)
jl 0xed384
movq %rdx, %rbx
imulq %r12, %rbx
addq %r14, %rbx
movq %r10, %r15
xorl %ebp, %ebp
xorl %esi, %esi
movq %r15, %r13
movzwl (%r13), %edi
movw %di, (%rbx,%rsi)
addq %rcx, %r13
addq $0x2, %rsi
cmpq $0x10, %rsi
jne 0xed358
leaq 0x8(%rbp), %r13
addq $0xf, %rbp
addq %r11, %r15
addq %rsi, %rbx
cmpq %rax, %rbp
movq %r13, %rbp
jl 0xed353
incq %r12
addq $0x2, %r10
cmpq $0x24, %r12
jne 0xed33d
incq %r8
addq 0x20(%rsp), %r9
cmpq 0x50(%rsp), %r8
jne 0xed31c
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0xed3e0
lock
decl (%rax)
jne 0xed3e0
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0xed3d3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xed3e0
testq %rsi, %rsi
je 0xed3e0
movq %rsi, %rdi
callq 0x563b0
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xed43b
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0xed433
lock
decl (%rax)
jne 0xed433
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0xed42d
testq %rsi, %rsi
je 0xed433
movq %rsi, %rdi
callq 0x563b0
jmp 0xed433
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_3x3_pack8to1_int8.h
|
ncnn::conv1x1s2_sgemm_pack8to4_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void conv1x1s2_sgemm_pack8to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const int64_t* r0 = bottom_blob.channel(p);
int64_t* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to4_int8_sse(bottom_blob_shrinked, top_blob, kernel, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, 0x10(%rsp)
movq %rsi, %r15
movq %rdi, %r12
movl 0x2c(%rdi), %ebx
movl 0x38(%rdi), %r13d
movq 0x10(%rdi), %r8
movl 0x18(%rdi), %r9d
movl 0x2c(%rsi), %ebp
movl 0x30(%rsi), %r14d
leaq 0x20(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq %rcx, 0x18(%rsp)
movq 0x10(%rcx), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r14d, %edx
movl %r13d, %ecx
callq 0x5a266
testl %r13d, %r13d
jle 0xedc01
subl %ebp, %ebx
addl %ebx, %ebx
movq (%r12), %rax
movq 0x20(%rsp), %rcx
movslq %ebx, %rdx
xorl %esi, %esi
testl %r14d, %r14d
jle 0xedbf9
movq 0x60(%rsp), %rdi
imulq %rsi, %rdi
imulq 0x30(%rsp), %rdi
addq %rcx, %rdi
movq 0x40(%r12), %r8
imulq %rsi, %r8
imulq 0x10(%r12), %r8
addq %rax, %r8
xorl %r9d, %r9d
movl %ebp, %r10d
testl %ebp, %ebp
jle 0xedbed
movq (%r8), %r11
movq %r11, (%rdi)
addq $0x10, %r8
addq $0x8, %rdi
decl %r10d
jne 0xedbda
leaq (%r8,%rdx,8), %r8
incl %r9d
cmpl %r14d, %r9d
jne 0xedbd3
incq %rsi
cmpq %r13, %rsi
jne 0xedba7
leaq 0x20(%rsp), %rdi
movq %r15, %rsi
movq 0x10(%rsp), %rdx
movq 0x18(%rsp), %rcx
callq 0xeda28
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0xedc4b
lock
decl (%rax)
jne 0xedc4b
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0xedc3e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xedc4b
testq %rsi, %rsi
je 0xedc4b
movq %rsi, %rdi
callq 0x563b0
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xedc9a
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0xedc92
lock
decl (%rax)
jne 0xedc92
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0xedc8c
testq %rsi, %rsi
je 0xedc92
movq %rsi, %rdi
callq 0x563b0
jmp 0xedc92
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_1x1_pack8to4_int8.h
|
ncnn::conv1x1s2_sgemm_pack1to4_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void conv1x1s2_sgemm_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const signed char* r0 = bottom_blob.channel(p);
signed char* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
outptr[0] = r0[0];
outptr[1] = r0[2];
outptr[2] = r0[4];
outptr[3] = r0[6];
r0 += 8;
outptr += 4;
}
for (; j + 1 < outw; j += 2)
{
outptr[0] = r0[0];
outptr[1] = r0[2];
r0 += 4;
outptr += 2;
}
for (; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack1to4_int8_sse(bottom_blob_shrinked, top_blob, kernel, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, 0x10(%rsp)
movq %rsi, %r15
movq %rdi, %r12
movl 0x2c(%rdi), %ebx
movl 0x38(%rdi), %r13d
movq 0x10(%rdi), %r8
movl 0x18(%rdi), %r9d
movl 0x2c(%rsi), %ebp
movl 0x30(%rsi), %r14d
leaq 0x20(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq %rcx, 0x18(%rsp)
movq 0x10(%rcx), %rax
movq %rax, (%rsp)
movl %ebp, %esi
movl %r14d, %edx
movl %r13d, %ecx
callq 0x5a266
testl %r13d, %r13d
jle 0xef481
subl %ebp, %ebx
addl %ebx, %ebx
movslq %ebx, %rax
movl %ebp, %ecx
andl $-0x4, %ecx
xorl %edx, %edx
testl %r14d, %r14d
jle 0xef475
movq 0x60(%rsp), %rsi
imulq %rdx, %rsi
imulq 0x30(%rsp), %rsi
addq 0x20(%rsp), %rsi
movq 0x40(%r12), %rdi
imulq %rdx, %rdi
imulq 0x10(%r12), %rdi
addq (%r12), %rdi
xorl %r8d, %r8d
cmpl $0x4, %ebp
jl 0xef417
movl $0x3, %r9d
movb (%rdi), %r10b
movb %r10b, (%rsi)
movb 0x2(%rdi), %r10b
movb %r10b, 0x1(%rsi)
movb 0x4(%rdi), %r10b
movb %r10b, 0x2(%rsi)
movb 0x6(%rdi), %r10b
movb %r10b, 0x3(%rsi)
addq $0x8, %rdi
addq $0x4, %rsi
addl $0x4, %r9d
cmpl %ebp, %r9d
jl 0xef3e3
movl %ecx, %r9d
jmp 0xef41a
xorl %r9d, %r9d
movl %r9d, %r10d
orl $0x1, %r10d
cmpl %ebp, %r10d
jge 0xef44c
movb (%rdi), %r10b
movb %r10b, (%rsi)
movb 0x2(%rdi), %r10b
movb %r10b, 0x1(%rsi)
addq $0x4, %rdi
addq $0x2, %rsi
leal 0x2(%r9), %r10d
addl $0x3, %r9d
cmpl %ebp, %r9d
movl %r10d, %r9d
jl 0xef426
movl %ebp, %r10d
subl %r9d, %r10d
jle 0xef466
movb (%rdi), %r9b
movb %r9b, (%rsi)
addq $0x2, %rdi
incq %rsi
decl %r10d
jne 0xef454
addq %rax, %rdi
incl %r8d
cmpl %r14d, %r8d
jne 0xef3d8
incq %rdx
cmpq %r13, %rdx
jne 0xef3a5
leaq 0x20(%rsp), %rdi
movq %r15, %rsi
movq 0x10(%rsp), %rdx
movq 0x18(%rsp), %rcx
callq 0xef226
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0xef4cb
lock
decl (%rax)
jne 0xef4cb
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0xef4be
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xef4cb
testq %rsi, %rsi
je 0xef4cb
movq %rsi, %rdi
callq 0x563b0
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xef51a
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0xef512
lock
decl (%rax)
jne 0xef512
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0xef50c
testq %rsi, %rsi
je 0xef512
movq %rsi, %rdi
callq 0x563b0
jmp 0xef512
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_1x1_pack1to4_int8.h
|
ncnn::conv3x3s1_pack1to4_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void conv3x3s1_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = 9;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w - outw;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < 3; u++)
{
for (int v = 0; v < 3; v++)
{
const signed char* sptr = img.row<const signed char>(u) + v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[1];
ptr[2] = sptr[2];
ptr[3] = sptr[3];
sptr += 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[1];
sptr += 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += 1;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1to4_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x78, %rsp
movq %rdx, 0x18(%rsp)
movslq 0x2c(%rdi), %rbp
movq %rdi, 0x28(%rsp)
movslq 0x38(%rdi), %r14
movslq 0x2c(%rsi), %rbx
movq %rsi, 0x10(%rsp)
movl 0x30(%rsi), %r15d
movl %r15d, %esi
imull %ebx, %esi
movq %rcx, 0x8(%rsp)
movq 0x10(%rcx), %rax
leaq 0x30(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
movl $0x1, %r8d
movl $0x9, %edx
movl %r14d, %ecx
movl $0x1, %r9d
callq 0x5a266
movq %r14, 0x20(%rsp)
testq %r14, %r14
jle 0xef6d2
subq %rbx, %rbp
movl %ebx, %eax
andl $-0x4, %eax
xorl %ecx, %ecx
movq 0x28(%rsp), %rdi
movslq 0x2c(%rdi), %rdx
movq 0x40(%rdi), %rsi
imulq %rcx, %rsi
movq 0x10(%rdi), %r8
imulq %r8, %rsi
addq (%rdi), %rsi
movq 0x70(%rsp), %rdi
imulq %rcx, %rdi
imulq 0x40(%rsp), %rdi
addq 0x30(%rsp), %rdi
imulq %r8, %rdx
xorl %r8d, %r8d
movq %rdx, %r9
imulq %r8, %r9
addq %rsi, %r9
xorl %r10d, %r10d
testl %r15d, %r15d
jle 0xef6aa
leaq (%r9,%r10), %r11
xorl %r14d, %r14d
cmpl $0x4, %ebx
jl 0xef64a
movl $0x3, %r13d
movb (%r11), %r12b
movb %r12b, (%rdi)
movb 0x1(%r11), %r12b
movb %r12b, 0x1(%rdi)
movb 0x2(%r11), %r12b
movb %r12b, 0x2(%rdi)
movb 0x3(%r11), %r12b
movb %r12b, 0x3(%rdi)
addq $0x4, %r11
addq $0x4, %rdi
addl $0x4, %r13d
cmpl %ebx, %r13d
jl 0xef616
movl %eax, %r13d
jmp 0xef64d
xorl %r13d, %r13d
movl %r13d, %r12d
orl $0x1, %r12d
cmpl %ebx, %r12d
jge 0xef67f
movb (%r11), %r12b
movb %r12b, (%rdi)
movb 0x1(%r11), %r12b
movb %r12b, 0x1(%rdi)
addq $0x2, %r11
addq $0x2, %rdi
leal 0x2(%r13), %r12d
addl $0x3, %r13d
cmpl %ebx, %r13d
movl %r12d, %r13d
jl 0xef659
cmpl %r13d, %ebx
jle 0xef69b
movl %ebx, %r12d
subl %r13d, %r12d
movb (%r11), %r13b
movb %r13b, (%rdi)
incq %r11
incq %rdi
decl %r12d
jne 0xef68a
addq %rbp, %r11
incl %r14d
cmpl %r15d, %r14d
jne 0xef60b
incq %r10
cmpq $0x3, %r10
jne 0xef5fb
incq %r8
cmpq $0x3, %r8
jne 0xef5ee
incq %rcx
cmpq 0x20(%rsp), %rcx
jne 0xef5b7
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
leaq 0x30(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %rdx
callq 0x10410b
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0xef722
lock
decl (%rax)
jne 0xef722
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0xef715
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xef722
testq %rsi, %rsi
je 0xef722
movq %rsi, %rdi
callq 0x563b0
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xef771
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0xef769
lock
decl (%rax)
jne 0xef769
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0xef763
testq %rsi, %rsi
je 0xef769
movq %rsi, %rdi
callq 0x563b0
jmp 0xef769
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_3x3_pack1to4_int8.h
|
ncnn::conv7x7s2_pack1to4_int8_sse(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Option const&)
|
static void conv7x7s2_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = 49;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w * 2 - outw * 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < 7; u++)
{
for (int v = 0; v < 7; v++)
{
const signed char* sptr = img.row<const signed char>(u) + v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[2];
ptr[2] = sptr[4];
ptr[3] = sptr[6];
sptr += 8;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[2];
sptr += 4;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += 2;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1to4_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x78, %rsp
movq %rdx, 0x18(%rsp)
movslq 0x2c(%rdi), %rbp
movq %rdi, 0x28(%rsp)
movslq 0x38(%rdi), %r14
movslq 0x2c(%rsi), %rbx
movq %rsi, 0x10(%rsp)
movl 0x30(%rsi), %r15d
movl %r15d, %esi
imull %ebx, %esi
movq %rcx, 0x8(%rsp)
movq 0x10(%rcx), %rax
leaq 0x30(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq %rax, (%rsp)
movl $0x1, %r8d
movl $0x31, %edx
movl %r14d, %ecx
movl $0x1, %r9d
callq 0x5a266
movq %r14, 0x20(%rsp)
testq %r14, %r14
jle 0xefb88
subq %rbx, %rbp
addq %rbp, %rbp
movl %ebx, %eax
andl $-0x4, %eax
xorl %ecx, %ecx
movq 0x28(%rsp), %rdi
movslq 0x2c(%rdi), %rdx
movq 0x40(%rdi), %rsi
imulq %rcx, %rsi
movq 0x10(%rdi), %r8
imulq %r8, %rsi
addq (%rdi), %rsi
movq 0x70(%rsp), %rdi
imulq %rcx, %rdi
imulq 0x40(%rsp), %rdi
addq 0x30(%rsp), %rdi
imulq %r8, %rdx
xorl %r8d, %r8d
movq %rdx, %r9
imulq %r8, %r9
addq %rsi, %r9
xorl %r10d, %r10d
testl %r15d, %r15d
jle 0xefb60
leaq (%r9,%r10), %r11
xorl %r14d, %r14d
cmpl $0x4, %ebx
jl 0xefaff
movl $0x3, %r13d
movb (%r11), %r12b
movb %r12b, (%rdi)
movb 0x2(%r11), %r12b
movb %r12b, 0x1(%rdi)
movb 0x4(%r11), %r12b
movb %r12b, 0x2(%rdi)
movb 0x6(%r11), %r12b
movb %r12b, 0x3(%rdi)
addq $0x8, %r11
addq $0x4, %rdi
addl $0x4, %r13d
cmpl %ebx, %r13d
jl 0xefacb
movl %eax, %r13d
jmp 0xefb02
xorl %r13d, %r13d
movl %r13d, %r12d
orl $0x1, %r12d
cmpl %ebx, %r12d
jge 0xefb34
movb (%r11), %r12b
movb %r12b, (%rdi)
movb 0x2(%r11), %r12b
movb %r12b, 0x1(%rdi)
addq $0x4, %r11
addq $0x2, %rdi
leal 0x2(%r13), %r12d
addl $0x3, %r13d
cmpl %ebx, %r13d
movl %r12d, %r13d
jl 0xefb0e
cmpl %r13d, %ebx
jle 0xefb51
movl %ebx, %r12d
subl %r13d, %r12d
movb (%r11), %r13b
movb %r13b, (%rdi)
addq $0x2, %r11
incq %rdi
decl %r12d
jne 0xefb3f
addq %rbp, %r11
incl %r14d
cmpl %r15d, %r14d
jne 0xefac0
incq %r10
cmpq $0x7, %r10
jne 0xefab0
incq %r8
cmpq $0x7, %r8
jne 0xefaa3
incq %rcx
cmpq 0x20(%rsp), %rcx
jne 0xefa6c
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
leaq 0x30(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %rdx
callq 0x10410b
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0xefbd8
lock
decl (%rax)
jne 0xefbd8
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0xefbcb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0xefbd8
testq %rsi, %rsi
je 0xefbd8
movq %rsi, %rdi
callq 0x563b0
addq $0x78, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0xefc27
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0xefc1f
lock
decl (%rax)
jne 0xefc1f
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0xefc19
testq %rsi, %rsi
je 0xefc1f
movq %rsi, %rdi
callq 0x563b0
jmp 0xefc1f
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_7x7_pack1to4_int8.h
|
ncnn::im2col_sgemm_pack4to8_avx(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, ncnn::Option const&)
|
static void im2col_sgemm_pack4to8_avx(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + size % 8, 16u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 16u, 4, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x8
__m128 _r0 = _mm_load_ps(img0);
__m128 _r1 = _mm_load_ps(img0 + 4);
__m128 _r2 = _mm_load_ps(img0 + 4 * 2);
__m128 _r3 = _mm_load_ps(img0 + 4 * 3);
__m128 _r4 = _mm_load_ps(img0 + 4 * 4);
__m128 _r5 = _mm_load_ps(img0 + 4 * 5);
__m128 _r6 = _mm_load_ps(img0 + 4 * 6);
__m128 _r7 = _mm_load_ps(img0 + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r1);
_mm_store_ps(tmpptr + 4 * 3, _r5);
_mm_store_ps(tmpptr + 4 * 4, _r2);
_mm_store_ps(tmpptr + 4 * 5, _r6);
_mm_store_ps(tmpptr + 4 * 6, _r3);
_mm_store_ps(tmpptr + 4 * 7, _r7);
img0 += size * 4;
tmpptr += 32;
}
}
}
remain_size_start += nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + i % 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_load_ps(img0);
_mm_store_ps(tmpptr, _val);
img0 += size * 4;
tmpptr += 4;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 8 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
__m256 _sum0 = _mm256_loadu_ps(biasptr);
__m256 _sum1 = _sum0;
__m256 _sum2 = _sum0;
__m256 _sum3 = _sum0;
__m256 _sum4 = _sum0;
__m256 _sum5 = _sum0;
__m256 _sum6 = _sum0;
__m256 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
kptr += 8;
tmpptr += 8;
}
_mm256_store_ps(outptr0, _sum0);
_mm256_store_ps(outptr0 + 8, _sum1);
_mm256_store_ps(outptr0 + 8 * 2, _sum2);
_mm256_store_ps(outptr0 + 8 * 3, _sum3);
_mm256_store_ps(outptr0 + 8 * 4, _sum4);
_mm256_store_ps(outptr0 + 8 * 5, _sum5);
_mm256_store_ps(outptr0 + 8 * 6, _sum6);
_mm256_store_ps(outptr0 + 8 * 7, _sum7);
outptr0 += 8 * 8;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + i % 8);
const float* kptr = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
__m256 _sum0 = _mm256_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
kptr += 8;
tmpptr += 1;
}
_mm256_store_ps(outptr0, _sum0);
outptr0 += 8;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x88, %rsp
movq %rcx, 0x58(%rsp)
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r13
movslq 0x2c(%rdi), %rbp
movl 0x30(%rdi), %ebx
movl 0x38(%rdi), %r12d
leal (,%rbx,8), %esi
movl %ebp, %eax
shrl $0x3, %eax
movl %ebp, %ecx
andl $0x7, %ecx
addl %eax, %ecx
cmpq $0x8, %rbp
movl 0x38(%r15), %eax
movq %rax, 0x8(%rsp)
leaq 0x10(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
cmovll %ebx, %esi
cmovll %ebp, %ecx
movq %r8, (%rsp)
movl $0x10, %r8d
movl %r12d, %edx
movl $0x4, %r9d
callq 0x5a266
movl %ebp, %ecx
sarl $0x3, %ecx
testl %ecx, %ecx
jle 0xfc390
leal (,%rbp,4), %eax
cltq
movl %ecx, %ecx
shlq $0x2, %rax
movl $0x40, %edx
xorl %esi, %esi
testl %r12d, %r12d
jle 0xfc380
movq 0x50(%rsp), %rdi
imulq %rsi, %rdi
imulq 0x20(%rsp), %rdi
addq 0x10(%rsp), %rdi
xorl %r8d, %r8d
testl %ebx, %ebx
jle 0xfc374
movq 0x40(%r13), %r9
movq (%r13), %r10
imulq 0x10(%r13), %r9
addq %rdx, %r10
imulq %r8, %r9
addq %r10, %r9
movl %ebx, %r10d
vmovaps -0x40(%r9), %xmm0
vmovaps -0x30(%r9), %xmm1
vmovaps -0x20(%r9), %xmm2
vmovaps -0x10(%r9), %xmm3
vmovaps (%r9), %xmm4
vmovaps 0x10(%r9), %xmm5
vmovaps 0x20(%r9), %xmm6
vmovaps 0x30(%r9), %xmm7
vunpcklps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm3, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vunpckhps %xmm3, %xmm2, %xmm1 # xmm1 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vmovlhps %xmm9, %xmm8, %xmm2 # xmm2 = xmm8[0],xmm9[0]
vunpckhpd %xmm9, %xmm8, %xmm3 # xmm3 = xmm8[1],xmm9[1]
vmovlhps %xmm1, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm1[0]
vunpckhpd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpcklps %xmm7, %xmm6, %xmm9 # xmm9 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vunpckhps %xmm7, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovlhps %xmm9, %xmm1, %xmm6 # xmm6 = xmm1[0],xmm9[0]
vunpckhpd %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[1],xmm9[1]
vmovlhps %xmm5, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[1],xmm5[1]
vmovaps %xmm2, (%rdi)
vmovaps %xmm6, 0x10(%rdi)
vmovaps %xmm3, 0x20(%rdi)
vmovaps %xmm1, 0x30(%rdi)
vmovaps %xmm8, 0x40(%rdi)
vmovaps %xmm7, 0x50(%rdi)
vmovaps %xmm0, 0x60(%rdi)
vmovaps %xmm4, 0x70(%rdi)
subq $-0x80, %rdi
addq %rax, %r9
decl %r10d
jne 0xfc2ca
incq %r8
cmpq %r12, %r8
jne 0xfc2a8
incq %rsi
subq $-0x80, %rdx
cmpq %rcx, %rsi
jne 0xfc288
movq %rbp, %rax
andq $-0x8, %rax
cmpl %ebp, %eax
je 0xfc42f
leal (,%rbp,4), %ecx
movslq %ecx, %rcx
movq %rax, %rdx
shlq $0x4, %rdx
shlq $0x2, %rcx
testl %r12d, %r12d
jle 0xfc423
leal 0x7(%rax), %esi
testl %eax, %eax
cmovnsl %eax, %esi
movl %esi, %edi
andl $-0x8, %edi
movl %eax, %r8d
subl %edi, %r8d
sarl $0x3, %esi
addl %r8d, %esi
movslq %esi, %rsi
imulq 0x50(%rsp), %rsi
imulq 0x20(%rsp), %rsi
addq 0x10(%rsp), %rsi
xorl %edi, %edi
testl %ebx, %ebx
jle 0xfc41b
movq 0x40(%r13), %r8
movq (%r13), %r9
imulq 0x10(%r13), %r8
addq %rdx, %r9
imulq %rdi, %r8
addq %r9, %r8
movl %ebx, %r9d
vmovaps (%r8), %xmm0
vmovaps %xmm0, (%rsi)
addq $0x10, %rsi
addq %rcx, %r8
decl %r9d
jne 0xfc406
incq %rdi
cmpq %r12, %rdi
jne 0xfc3e8
incq %rax
addq $0x10, %rdx
cmpq %rbp, %rax
jl 0xfc3b4
cmpl $0x0, 0x8(%rsp)
movq 0x58(%rsp), %r13
jle 0xfc614
imull %r12d, %ebx
shll $0x2, %ebx
xorl %eax, %eax
leaq 0x60(%rsp), %rcx
vxorps %xmm0, %xmm0, %xmm0
movq 0x40(%r15), %rdx
imulq %rax, %rdx
imulq 0x10(%r15), %rdx
addq (%r15), %rdx
movq %rax, %rsi
shlq $0x5, %rsi
addq %r13, %rsi
testq %r13, %r13
cmoveq %rcx, %rsi
vmovups %ymm0, 0x60(%rsp)
cmpl $0x8, %ebp
jl 0xfc597
xorl %edi, %edi
xorl %r8d, %r8d
vmovups (%rsi), %ymm1
testl %ebx, %ebx
jle 0xfc528
movq 0x50(%rsp), %r9
movq 0x40(%r14), %r10
imulq %rax, %r10
imulq 0x10(%r14), %r10
addq (%r14), %r10
imulq 0x20(%rsp), %r9
imulq %rdi, %r9
addq 0x10(%rsp), %r9
xorl %r11d, %r11d
movl %ebx, %r12d
vmovaps %ymm1, %ymm2
vmovaps %ymm1, %ymm3
vmovaps %ymm1, %ymm4
vmovaps %ymm1, %ymm5
vmovaps %ymm1, %ymm6
vmovaps %ymm1, %ymm7
vmovaps %ymm1, %ymm8
vmovaps (%r10,%r11), %ymm9
vfmadd231ps (%r9,%r11){1to8}, %ymm9, %ymm8 # ymm8 = (ymm9 * mem) + ymm8
vfmadd231ps 0x4(%r9,%r11){1to8}, %ymm9, %ymm7 # ymm7 = (ymm9 * mem) + ymm7
vfmadd231ps 0x8(%r9,%r11){1to8}, %ymm9, %ymm6 # ymm6 = (ymm9 * mem) + ymm6
vfmadd231ps 0xc(%r9,%r11){1to8}, %ymm9, %ymm5 # ymm5 = (ymm9 * mem) + ymm5
vfmadd231ps 0x10(%r9,%r11){1to8}, %ymm9, %ymm4 # ymm4 = (ymm9 * mem) + ymm4
vfmadd231ps 0x14(%r9,%r11){1to8}, %ymm9, %ymm3 # ymm3 = (ymm9 * mem) + ymm3
vfmadd231ps 0x18(%r9,%r11){1to8}, %ymm9, %ymm2 # ymm2 = (ymm9 * mem) + ymm2
vfmadd231ps 0x1c(%r9,%r11){1to8}, %ymm9, %ymm1 # ymm1 = (ymm9 * mem) + ymm1
addq $0x20, %r11
decl %r12d
jne 0xfc4d8
jmp 0xfc544
vmovaps %ymm1, %ymm8
vmovaps %ymm1, %ymm7
vmovaps %ymm1, %ymm6
vmovaps %ymm1, %ymm5
vmovaps %ymm1, %ymm4
vmovaps %ymm1, %ymm3
vmovaps %ymm1, %ymm2
vmovaps %ymm8, (%rdx)
vmovaps %ymm7, 0x20(%rdx)
vmovaps %ymm6, 0x40(%rdx)
vmovaps %ymm5, 0x60(%rdx)
vmovaps %ymm4, 0x80(%rdx)
vmovaps %ymm3, 0xa0(%rdx)
vmovaps %ymm2, 0xc0(%rdx)
vmovaps %ymm1, 0xe0(%rdx)
addq $0x100, %rdx # imm = 0x100
leaq 0x8(%r8), %r9
addq $0xf, %r8
incq %rdi
cmpq %rbp, %r8
movq %r9, %r8
jl 0xfc486
jmp 0xfc59a
xorl %r9d, %r9d
cmpl %ebp, %r9d
jge 0xfc606
vmovups (%rsi), %ymm1
testl %ebx, %ebx
jle 0xfc5f6
movl %r9d, %r8d
shrl $0x3, %r8d
movl %r9d, %r10d
andl $0x7, %r10d
movq 0x40(%r14), %rdi
imulq %rax, %rdi
imulq 0x10(%r14), %rdi
addq (%r14), %rdi
addl %r8d, %r10d
movq 0x50(%rsp), %r8
imulq 0x20(%rsp), %r8
imulq %r10, %r8
addq 0x10(%rsp), %r8
xorl %r10d, %r10d
vbroadcastss (%r8,%r10,4), %ymm2
vfmadd231ps (%rdi), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
addq $0x20, %rdi
incq %r10
cmpl %r10d, %ebx
jne 0xfc5df
vmovaps %ymm1, (%rdx)
addq $0x20, %rdx
incl %r9d
cmpl %ebp, %r9d
jne 0xfc59f
incq %rax
cmpq 0x8(%rsp), %rax
jne 0xfc451
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0xfc64d
lock
decl (%rax)
jne 0xfc64d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0xfc63d
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0xfc64d
testq %rsi, %rsi
je 0xfc64d
movq %rsi, %rdi
vzeroupper
callq 0x563b0
addq $0x88, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
jmp 0xfc6a2
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0xfc69a
lock
decl (%rax)
jne 0xfc69a
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0xfc694
testq %rsi, %rsi
je 0xfc69a
movq %rsi, %rdi
callq 0x563b0
jmp 0xfc69a
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_sgemm_pack4to8.h
|
ncnn::im2col_sgemm_pack8to1_avx(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, ncnn::Option const&)
|
static void im2col_sgemm_pack8to1_avx(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u * 8, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + size % 8, 4u * 8, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * 8, 8, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
// transpose 8x8
__m256 _r0 = _mm256_load_ps(img0);
__m256 _r1 = _mm256_load_ps(img0 + 8);
__m256 _r2 = _mm256_load_ps(img0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(img0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(img0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(img0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(img0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(img0 + 8 * 7);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1));
_r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
img0 += size * 8;
tmpptr += 64;
}
}
}
remain_size_start += nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + i % 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_load_ps(img0);
_mm256_store_ps(tmpptr, _val);
img0 += size * 8;
tmpptr += 8;
}
}
}
}
int nn_outch = outch / 8;
int remain_outch_start = nn_outch * 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = kernel.channel(p / 8);
int nn = inch * maxk * 8; // inch always > 0
__m256 _sum0 = _mm256_broadcast_ss(biasptr);
__m256 _sum1 = _mm256_broadcast_ss(biasptr + 1);
__m256 _sum2 = _mm256_broadcast_ss(biasptr + 2);
__m256 _sum3 = _mm256_broadcast_ss(biasptr + 3);
__m256 _sum4 = _mm256_broadcast_ss(biasptr + 4);
__m256 _sum5 = _mm256_broadcast_ss(biasptr + 5);
__m256 _sum6 = _mm256_broadcast_ss(biasptr + 6);
__m256 _sum7 = _mm256_broadcast_ss(biasptr + 7);
for (int j = 0; j < nn; j++)
{
__m256 _val0 = _mm256_load_ps(tmpptr);
__m256 _w0 = _mm256_broadcast_ss(kptr0);
__m256 _w1 = _mm256_broadcast_ss(kptr0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val0, _w1, _sum1);
__m256 _w2 = _mm256_broadcast_ss(kptr0 + 2);
__m256 _w3 = _mm256_broadcast_ss(kptr0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val0, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val0, _w3, _sum3);
__m256 _w4 = _mm256_broadcast_ss(kptr0 + 4);
__m256 _w5 = _mm256_broadcast_ss(kptr0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val0, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val0, _w5, _sum5);
__m256 _w6 = _mm256_broadcast_ss(kptr0 + 6);
__m256 _w7 = _mm256_broadcast_ss(kptr0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val0, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val0, _w7, _sum7);
tmpptr += 8;
kptr0 += 8;
}
_mm256_storeu_ps(outptr0, _sum0);
_mm256_storeu_ps(outptr1, _sum1);
_mm256_storeu_ps(outptr2, _sum2);
_mm256_storeu_ps(outptr3, _sum3);
_mm256_storeu_ps(outptr4, _sum4);
_mm256_storeu_ps(outptr5, _sum5);
_mm256_storeu_ps(outptr6, _sum6);
_mm256_storeu_ps(outptr7, _sum7);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
outptr4 += 8;
outptr5 += 8;
outptr6 += 8;
outptr7 += 8;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + i % 8);
const float* kptr0 = kernel.channel(p / 8);
int nn = inch * maxk * 8; // inch always > 0
__m256 _sum = _mm256_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _w0 = _mm256_load_ps(kptr0);
_sum = _mm256_comp_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr0 += 8;
}
float sum[8];
_mm256_storeu_ps(sum, _sum);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr4[0] = sum[4];
outptr5[0] = sum[5];
outptr6[0] = sum[6];
outptr7[0] = sum[7];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
outptr4 += 1;
outptr5 += 1;
outptr6 += 1;
outptr7 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = kernel.channel(p / 8 + p % 8);
int nn = inch * maxk * 8; // inch always > 0
__m256 _sum0 = _mm256_set1_ps(bias0);
for (int j = 0; j < nn; j++)
{
__m256 _val0 = _mm256_load_ps(tmpptr);
__m256 _w0 = _mm256_broadcast_ss(kptr0);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val0, _sum0);
tmpptr += 8;
kptr0 += 1;
}
_mm256_storeu_ps(outptr0, _sum0);
outptr0 += 8;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + i % 8);
const float* kptr0 = kernel.channel(p / 8 + p % 8);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
__m256 _sum0 = _mm256_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m256 _val0 = _mm256_load_ps(tmpptr);
__m256 _w0 = _mm256_load_ps(kptr0);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
tmpptr += 8;
kptr0 += 8;
}
sum0 += _mm256_reduce_add_ps(_sum0);
outptr0[0] = sum0;
outptr0 += 1;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xe8, %rsp
movq %rcx, %r14
movq %rdx, 0x10(%rsp)
movq %rsi, %r15
movq %rdi, %r13
movslq 0x2c(%rdi), %r12
movl 0x30(%rdi), %ebp
movl 0x38(%rdi), %ebx
leal (,%rbp,8), %esi
movl %r12d, %eax
shrl $0x3, %eax
movl %r12d, %ecx
andl $0x7, %ecx
addl %eax, %ecx
cmpq $0x8, %r12
movslq 0x38(%r15), %rax
movq %rax, 0x40(%rsp)
leaq 0x50(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorpd %xmm0, %xmm0, %xmm0
vmovapd %xmm0, (%rdi)
vmovupd %xmm0, 0xc(%rdi)
vmovapd %xmm0, 0x20(%rdi)
vmovupd %xmm0, 0x2c(%rdi)
cmovll %ebp, %esi
cmovll %r12d, %ecx
movq %r8, (%rsp)
movl $0x20, %r8d
movl %ebx, %edx
movl $0x8, %r9d
callq 0x5a266
movl %r12d, %ecx
sarl $0x3, %ecx
testl %ecx, %ecx
jle 0xfc8a4
leal (,%r12,8), %eax
cltq
movl %ecx, %ecx
shlq $0x2, %rax
movl $0x80, %edx
xorl %esi, %esi
testl %ebx, %ebx
jle 0xfc891
movq 0x90(%rsp), %rdi
imulq %rsi, %rdi
imulq 0x60(%rsp), %rdi
addq 0x50(%rsp), %rdi
xorl %r8d, %r8d
testl %ebp, %ebp
jle 0xfc885
movq 0x40(%r13), %r9
movq (%r13), %r10
imulq 0x10(%r13), %r9
addq %rdx, %r10
imulq %r8, %r9
addq %r10, %r9
movl %ebp, %r10d
vmovapd -0x80(%r9), %ymm0
vmovapd -0x60(%r9), %ymm1
vmovapd -0x40(%r9), %ymm2
vmovapd -0x20(%r9), %ymm3
vmovapd (%r9), %ymm4
vmovapd 0x20(%r9), %ymm5
vmovapd 0x40(%r9), %ymm6
vmovapd 0x60(%r9), %ymm7
vunpcklps %ymm1, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklpd %ymm1, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
vunpckhpd %ymm1, %ymm8, %ymm1 # ymm1 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vinsertf128 $0x1, %xmm2, %ymm7, %ymm6
vinsertf128 $0x1, %xmm3, %ymm1, %ymm9
vinsertf128 $0x1, %xmm5, %ymm8, %ymm10
vinsertf128 $0x1, %xmm4, %ymm0, %ymm11
vperm2f128 $0x31, %ymm2, %ymm7, %ymm2 # ymm2 = ymm7[2,3],ymm2[2,3]
vperm2f128 $0x31, %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm3[2,3]
vperm2f128 $0x31, %ymm5, %ymm8, %ymm3 # ymm3 = ymm8[2,3],ymm5[2,3]
vperm2f128 $0x31, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm4[2,3]
vmovapd %ymm6, (%rdi)
vmovapd %ymm9, 0x20(%rdi)
vmovapd %ymm10, 0x40(%rdi)
vmovapd %ymm11, 0x60(%rdi)
vmovapd %ymm2, 0x80(%rdi)
vmovapd %ymm1, 0xa0(%rdi)
vmovapd %ymm3, 0xc0(%rdi)
vmovapd %ymm0, 0xe0(%rdi)
addq $0x100, %rdi # imm = 0x100
addq %rax, %r9
decl %r10d
jne 0xfc7a0
incq %r8
cmpq %rbx, %r8
jne 0xfc77e
incq %rsi
addq $0x100, %rdx # imm = 0x100
cmpq %rcx, %rsi
jne 0xfc75c
movq %r12, %rax
andq $-0x8, %rax
cmpl %r12d, %eax
je 0xfc947
leal (,%r12,8), %ecx
movslq %ecx, %rcx
movq %rax, %rdx
shlq $0x5, %rdx
shlq $0x2, %rcx
testl %ebx, %ebx
jle 0xfc93b
leal 0x7(%rax), %esi
testl %eax, %eax
cmovnsl %eax, %esi
movl %esi, %edi
andl $-0x8, %edi
movl %eax, %r8d
subl %edi, %r8d
sarl $0x3, %esi
addl %r8d, %esi
movslq %esi, %rsi
imulq 0x90(%rsp), %rsi
imulq 0x60(%rsp), %rsi
addq 0x50(%rsp), %rsi
xorl %edi, %edi
testl %ebp, %ebp
jle 0xfc933
movq 0x40(%r13), %r8
movq (%r13), %r9
imulq 0x10(%r13), %r8
addq %rdx, %r9
imulq %rdi, %r8
addq %r9, %r8
movl %ebp, %r9d
vmovapd (%r8), %ymm0
vmovapd %ymm0, (%rsi)
addq $0x20, %rsi
addq %rcx, %r8
decl %r9d
jne 0xfc91e
incq %rdi
cmpq %rbx, %rdi
jne 0xfc900
incq %rax
addq $0x20, %rdx
cmpq %r12, %rax
jl 0xfc8ca
movq %rbx, 0x38(%rsp)
movq 0x40(%rsp), %rcx
leal 0x7(%rcx), %eax
testl %ecx, %ecx
cmovnsl %ecx, %eax
movl %eax, %edx
andl $-0x8, %edx
movl %edx, 0x34(%rsp)
cmpl $0x8, %ecx
jl 0xfccd4
sarl $0x3, %eax
movq 0x38(%rsp), %rcx
imull %ebp, %ecx
shll $0x3, %ecx
movl %eax, %eax
movq %rax, 0xa0(%rsp)
xorl %ebx, %ebx
vxorpd %xmm0, %xmm0, %xmm0
movq %r14, 0xb8(%rsp)
movq %r12, 0x18(%rsp)
movq %rbp, 0xa8(%rsp)
movq %r15, 0xb0(%rsp)
leaq (,%rbx,8), %rbp
movq 0x40(%r15), %r8
imulq %r8, %rbp
movq (%r15), %rsi
leaq 0x1(,%rbx,8), %r9
imulq %r8, %r9
leaq 0x2(,%rbx,8), %r10
imulq %r8, %r10
leaq 0x3(,%rbx,8), %r11
imulq %r8, %r11
leaq 0x4(,%rbx,8), %r13
imulq %r8, %r13
leaq 0x5(,%rbx,8), %rax
imulq %r8, %rax
leaq 0x6(,%rbx,8), %rdi
imulq %r8, %rdi
leaq 0x7(,%rbx,8), %rdx
imulq %r8, %rdx
movq 0x10(%r15), %r8
imulq %r8, %rbp
addq %rsi, %rbp
movq %rbp, 0x20(%rsp)
imulq %r8, %r9
addq %rsi, %r9
imulq %r8, %r10
addq %rsi, %r10
imulq %r8, %r11
addq %rsi, %r11
imulq %r8, %r13
addq %rsi, %r13
imulq %r8, %rax
addq %rsi, %rax
imulq %r8, %rdi
addq %rsi, %rdi
imulq %r8, %rdx
addq %rsi, %rdx
movq %rbx, 0x28(%rsp)
movq %rbx, %r15
shlq $0x5, %r15
addq %r14, %r15
testq %r14, %r14
movq %r15, %r8
leaq 0xc0(%rsp), %rsi
cmoveq %rsi, %r8
movq %r8, 0x48(%rsp)
vmovupd %ymm0, 0xc0(%rsp)
cmpl $0x8, %r12d
jl 0xfcbbe
testq %r14, %r14
cmoveq %rsi, %r15
xorl %r12d, %r12d
xorl %ebp, %ebp
movq 0x48(%rsp), %rsi
vbroadcastss (%rsi), %ymm1
vbroadcastss 0x4(%r15), %ymm2
vbroadcastss 0x8(%r15), %ymm3
vbroadcastss 0xc(%r15), %ymm4
vbroadcastss 0x10(%r15), %ymm5
vbroadcastss 0x14(%r15), %ymm6
vbroadcastss 0x18(%r15), %ymm7
vbroadcastss 0x1c(%r15), %ymm8
testl %ecx, %ecx
jle 0xfcb4f
movq 0x10(%rsp), %rsi
movq 0x40(%rsi), %r14
imulq 0x28(%rsp), %r14
imulq 0x10(%rsi), %r14
addq (%rsi), %r14
movq 0x90(%rsp), %rsi
imulq 0x60(%rsp), %rsi
imulq %r12, %rsi
addq 0x50(%rsp), %rsi
xorl %ebx, %ebx
movl %ecx, %r8d
vmovaps (%rsi,%rbx), %ymm9
vfmadd231ps (%r14,%rbx){1to8}, %ymm9, %ymm1 # ymm1 = (ymm9 * mem) + ymm1
vfmadd231ps 0x4(%r14,%rbx){1to8}, %ymm9, %ymm2 # ymm2 = (ymm9 * mem) + ymm2
vfmadd231ps 0x8(%r14,%rbx){1to8}, %ymm9, %ymm3 # ymm3 = (ymm9 * mem) + ymm3
vfmadd231ps 0xc(%r14,%rbx){1to8}, %ymm9, %ymm4 # ymm4 = (ymm9 * mem) + ymm4
vfmadd231ps 0x10(%r14,%rbx){1to8}, %ymm9, %ymm5 # ymm5 = (ymm9 * mem) + ymm5
vfmadd231ps 0x14(%r14,%rbx){1to8}, %ymm9, %ymm6 # ymm6 = (ymm9 * mem) + ymm6
vfmadd231ps 0x18(%r14,%rbx){1to8}, %ymm9, %ymm7 # ymm7 = (ymm9 * mem) + ymm7
vfmadd231ps 0x1c(%r14,%rbx){1to8}, %ymm9, %ymm8 # ymm8 = (ymm9 * mem) + ymm8
addq $0x20, %rbx
decl %r8d
jne 0xfcb02
movq 0x20(%rsp), %rsi
vmovups %ymm1, (%rsi)
vmovups %ymm2, (%r9)
vmovups %ymm3, (%r10)
vmovups %ymm4, (%r11)
vmovups %ymm5, (%r13)
vmovups %ymm6, (%rax)
vmovups %ymm7, (%rdi)
vmovups %ymm8, (%rdx)
addq $0x20, %rsi
movq %rsi, 0x20(%rsp)
addq $0x20, %r9
addq $0x20, %r10
addq $0x20, %r11
addq $0x20, %r13
addq $0x20, %rax
addq $0x20, %rdi
addq $0x20, %rdx
leaq 0x8(%rbp), %r14
addq $0xf, %rbp
incq %r12
cmpq 0x18(%rsp), %rbp
movq %r14, %rbp
jl 0xfca93
movq 0x18(%rsp), %r12
jmp 0xfcbc1
xorl %r14d, %r14d
cmpl %r12d, %r14d
jge 0xfcca1
movq 0x50(%rsp), %r15
movq 0x90(%rsp), %r12
movq 0x10(%rsp), %rsi
movq 0x40(%rsi), %rbp
imulq 0x28(%rsp), %rbp
imulq 0x10(%rsi), %rbp
addq (%rsi), %rbp
imulq 0x60(%rsp), %r12
movq 0x48(%rsp), %rsi
vmovups (%rsi), %ymm1
testl %ecx, %ecx
jle 0xfcc35
movl %r14d, %r8d
shrl $0x3, %r8d
movl %r14d, %esi
andl $0x7, %esi
addl %r8d, %esi
imulq %r12, %rsi
addq %r15, %rsi
xorl %r8d, %r8d
movq %rbp, %rbx
vbroadcastss (%rsi,%r8,4), %ymm2
vfmadd231ps (%rbx), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
addq $0x20, %rbx
incq %r8
cmpl %r8d, %ecx
jne 0xfcc1e
movq 0x20(%rsp), %rsi
vmovss %xmm1, (%rsi)
vextractps $0x1, %xmm1, (%r9)
vextractps $0x2, %xmm1, (%r10)
vextractps $0x3, %xmm1, (%r11)
vextracti128 $0x1, %ymm1, %xmm1
vmovd %xmm1, (%r13)
vextractps $0x1, %xmm1, (%rax)
vextractps $0x2, %xmm1, (%rdi)
vextractps $0x3, %xmm1, (%rdx)
addq $0x4, %rsi
movq %rsi, 0x20(%rsp)
addq $0x4, %r9
addq $0x4, %r10
addq $0x4, %r11
addq $0x4, %r13
addq $0x4, %rax
addq $0x4, %rdi
addq $0x4, %rdx
incl %r14d
cmpl 0x18(%rsp), %r14d
jne 0xfcbf4
movq 0x28(%rsp), %rbx
incq %rbx
cmpq 0xa0(%rsp), %rbx
movq 0xb8(%rsp), %r14
movq 0xb0(%rsp), %r15
movq 0x18(%rsp), %r12
movq 0xa8(%rsp), %rbp
jne 0xfc9a6
movq 0x40(%rsp), %r13
movl 0x34(%rsp), %ecx
cmpl %r13d, %ecx
jge 0xfce7c
imull 0x38(%rsp), %ebp
leal (,%rbp,8), %eax
movslq %ecx, %rcx
testq %r14, %r14
je 0xfcd02
vmovss (%r14,%rcx,4), %xmm0
jmp 0xfcd06
vxorpd %xmm0, %xmm0, %xmm0
movq 0x40(%r15), %rdx
imulq %rcx, %rdx
imulq 0x10(%r15), %rdx
addq (%r15), %rdx
xorl %esi, %esi
cmpl $0x8, %r12d
jl 0xfcdb4
leal 0x7(%rcx), %esi
testl %ecx, %ecx
cmovnsl %ecx, %esi
movl %esi, %edi
andl $-0x8, %edi
movl %ecx, %r8d
subl %edi, %r8d
sarl $0x3, %esi
addl %r8d, %esi
movslq %esi, %rdi
vbroadcastss %xmm0, %ymm1
xorl %r8d, %r8d
vmovapd %ymm1, %ymm2
testl %eax, %eax
jle 0xfcd9c
movq 0x10(%rsp), %r9
movq 0x40(%r9), %rsi
imulq %rdi, %rsi
imulq 0x10(%r9), %rsi
addq (%r9), %rsi
movq %r8, %r9
shrq $0x3, %r9
imulq 0x90(%rsp), %r9
imulq 0x60(%rsp), %r9
addq 0x50(%rsp), %r9
xorl %r10d, %r10d
vmovapd %ymm1, %ymm2
vbroadcastss (%rsi,%r10,4), %ymm3
vfmadd231ps (%r9), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
addq $0x20, %r9
incq %r10
cmpl %r10d, %eax
jne 0xfcd85
vmovupd %ymm2, (%rdx)
addq $0x20, %rdx
leaq 0x8(%r8), %rsi
addq $0xf, %r8
cmpq %r12, %r8
movq %rsi, %r8
jl 0xfcd46
cmpl %r12d, %esi
jge 0xfce70
movq 0x50(%rsp), %rdi
leal 0x7(%rcx), %r9d
testl %ecx, %ecx
cmovnsl %ecx, %r9d
movq 0x90(%rsp), %r8
movl %r9d, %r10d
andl $-0x8, %r10d
movl %ecx, %r11d
subl %r10d, %r11d
sarl $0x3, %r9d
addl %r11d, %r9d
movslq %r9d, %r9
movq 0x10(%rsp), %r10
imulq 0x40(%r10), %r9
imulq 0x10(%r10), %r9
addq (%r10), %r9
imulq 0x60(%rsp), %r8
testl %ebp, %ebp
jle 0xfce3e
movl %esi, %r11d
shrl $0x3, %r11d
movl %esi, %r10d
andl $0x7, %r10d
addl %r11d, %r10d
imulq %r8, %r10
addq %rdi, %r10
vxorpd %xmm1, %xmm1, %xmm1
xorl %r11d, %r11d
movl %ebp, %ebx
vmovaps (%r10,%r11), %ymm2
vfmadd231ps (%r9,%r11), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
addq $0x20, %r11
decl %ebx
jne 0xfce28
jmp 0xfce42
vxorpd %xmm1, %xmm1, %xmm1
vextractf128 $0x1, %ymm1, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vaddps %xmm1, %xmm2, %xmm1
vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3]
vaddss %xmm1, %xmm0, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vmovss %xmm1, (%rdx)
addq $0x4, %rdx
incl %esi
cmpl %r12d, %esi
jne 0xfce03
incq %rcx
cmpq %r13, %rcx
jne 0xfccf5
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0xfceb5
lock
decl (%rax)
jne 0xfceb5
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0xfcea5
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0xfceb5
testq %rsi, %rsi
je 0xfceb5
movq %rsi, %rdi
vzeroupper
callq 0x563b0
addq $0xe8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
jmp 0xfcf0a
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0xfcf02
lock
decl (%rax)
jne 0xfcf02
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0xfcefc
testq %rsi, %rsi
je 0xfcf02
movq %rsi, %rdi
callq 0x563b0
jmp 0xfcf02
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_sgemm_pack8to1.h
|
ncnn::im2col_sgemm_pack8to4_avx(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, ncnn::Option const&)
|
static void im2col_sgemm_pack8to4_avx(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 32u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 32u, 8, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 32u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 32u, 8, opt.workspace_allocator);
{
int nn_size = size / 8;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
// transpose 8x8
__m256 _r0 = _mm256_load_ps(img0);
__m256 _r1 = _mm256_load_ps(img0 + 8);
__m256 _r2 = _mm256_load_ps(img0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(img0 + 8 * 3);
__m256 _r4 = _mm256_load_ps(img0 + 8 * 4);
__m256 _r5 = _mm256_load_ps(img0 + 8 * 5);
__m256 _r6 = _mm256_load_ps(img0 + 8 * 6);
__m256 _r7 = _mm256_load_ps(img0 + 8 * 7);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5);
__m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5);
__m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7);
__m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7);
__m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0));
_r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0));
_r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1));
_r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1));
_r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1));
_r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
_mm256_store_ps(tmpptr + 8 * 4, _r4);
_mm256_store_ps(tmpptr + 8 * 5, _r5);
_mm256_store_ps(tmpptr + 8 * 6, _r6);
_mm256_store_ps(tmpptr + 8 * 7, _r7);
img0 += size * 8;
tmpptr += 64;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
// transpose 8x4
__m256 _r0 = _mm256_load_ps(img0);
__m256 _r1 = _mm256_load_ps(img0 + 8);
__m256 _r2 = _mm256_load_ps(img0 + 8 * 2);
__m256 _r3 = _mm256_load_ps(img0 + 8 * 3);
__m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1);
__m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1);
__m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3);
__m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3);
__m256 _tmp4 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp5 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m256 _tmp6 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m256 _tmp7 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_r0 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 2, 0, 0));
_r1 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 2, 0, 0));
_r2 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 3, 0, 1));
_r3 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 3, 0, 1));
_mm256_store_ps(tmpptr, _r0);
_mm256_store_ps(tmpptr + 8, _r1);
_mm256_store_ps(tmpptr + 8 * 2, _r2);
_mm256_store_ps(tmpptr + 8 * 3, _r3);
img0 += size * 8;
tmpptr += 32;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_load_ps(img0);
_mm256_store_ps(tmpptr, _val);
img0 += size * 8;
tmpptr += 8;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk * 8; // inch always > 0
__m256 _sum0 = _mm256_loadu_ps(biasptr);
__m256 _sum1 = _sum0;
__m256 _sum2 = _sum0;
__m256 _sum3 = _sum0;
__m256 _sum4 = _sum0;
__m256 _sum5 = _sum0;
__m256 _sum6 = _sum0;
__m256 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
_sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
_sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr += 8;
}
_mm_store_ps(outptr0, _mm256_extractf128_ps(_sum0, 0));
_mm_store_ps(outptr0 + 4, _mm256_extractf128_ps(_sum1, 0));
_mm_store_ps(outptr0 + 8, _mm256_extractf128_ps(_sum2, 0));
_mm_store_ps(outptr0 + 12, _mm256_extractf128_ps(_sum3, 0));
_mm_store_ps(outptr0 + 16, _mm256_extractf128_ps(_sum4, 0));
_mm_store_ps(outptr0 + 20, _mm256_extractf128_ps(_sum5, 0));
_mm_store_ps(outptr0 + 24, _mm256_extractf128_ps(_sum6, 0));
_mm_store_ps(outptr0 + 28, _mm256_extractf128_ps(_sum7, 0));
_mm_store_ps(outptr1, _mm256_extractf128_ps(_sum0, 1));
_mm_store_ps(outptr1 + 4, _mm256_extractf128_ps(_sum1, 1));
_mm_store_ps(outptr1 + 8, _mm256_extractf128_ps(_sum2, 1));
_mm_store_ps(outptr1 + 12, _mm256_extractf128_ps(_sum3, 1));
_mm_store_ps(outptr1 + 16, _mm256_extractf128_ps(_sum4, 1));
_mm_store_ps(outptr1 + 20, _mm256_extractf128_ps(_sum5, 1));
_mm_store_ps(outptr1 + 24, _mm256_extractf128_ps(_sum6, 1));
_mm_store_ps(outptr1 + 28, _mm256_extractf128_ps(_sum7, 1));
outptr0 += 32;
outptr1 += 32;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk * 8; // inch always > 0
__m256 _sum0 = _mm256_loadu_ps(biasptr);
__m256 _sum1 = _sum0;
__m256 _sum2 = _sum0;
__m256 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
_sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr += 8;
}
_mm_store_ps(outptr0, _mm256_extractf128_ps(_sum0, 0));
_mm_store_ps(outptr0 + 4, _mm256_extractf128_ps(_sum1, 0));
_mm_store_ps(outptr0 + 8, _mm256_extractf128_ps(_sum2, 0));
_mm_store_ps(outptr0 + 12, _mm256_extractf128_ps(_sum3, 0));
_mm_store_ps(outptr1, _mm256_extractf128_ps(_sum0, 1));
_mm_store_ps(outptr1 + 4, _mm256_extractf128_ps(_sum1, 1));
_mm_store_ps(outptr1 + 8, _mm256_extractf128_ps(_sum2, 1));
_mm_store_ps(outptr1 + 12, _mm256_extractf128_ps(_sum3, 1));
outptr0 += 16;
outptr1 += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk * 8; // inch always > 0
__m256 _sum = _mm256_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
_sum = _mm256_comp_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr += 8;
}
_mm_store_ps(outptr0, _mm256_extractf128_ps(_sum, 0));
_mm_store_ps(outptr1, _mm256_extractf128_ps(_sum, 1));
outptr0 += 4;
outptr1 += 4;
}
}
remain_outch_start += nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk * 8; // inch always > 0
__m128 _sum0 = _mm_loadu_ps(biasptr);
__m128 _sum1 = _sum0;
__m128 _sum2 = _sum0;
__m128 _sum3 = _sum0;
__m128 _sum4 = _sum0;
__m128 _sum5 = _sum0;
__m128 _sum6 = _sum0;
__m128 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(kptr);
__m128 _val0 = _mm_load1_ps(tmpptr);
__m128 _val1 = _mm_load1_ps(tmpptr + 1);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
__m128 _val2 = _mm_load1_ps(tmpptr + 2);
__m128 _val3 = _mm_load1_ps(tmpptr + 3);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
__m128 _val4 = _mm_load1_ps(tmpptr + 4);
__m128 _val5 = _mm_load1_ps(tmpptr + 5);
_sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5);
__m128 _val6 = _mm_load1_ps(tmpptr + 6);
__m128 _val7 = _mm_load1_ps(tmpptr + 7);
_sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr += 4;
}
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr0 + 4, _sum1);
_mm_store_ps(outptr0 + 8, _sum2);
_mm_store_ps(outptr0 + 12, _sum3);
_mm_store_ps(outptr0 + 16, _sum4);
_mm_store_ps(outptr0 + 20, _sum5);
_mm_store_ps(outptr0 + 24, _sum6);
_mm_store_ps(outptr0 + 28, _sum7);
outptr0 += 32;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk * 8; // inch always > 0
__m128 _sum0 = _mm_loadu_ps(biasptr);
__m128 _sum1 = _sum0;
__m128 _sum2 = _sum0;
__m128 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(kptr);
__m128 _val0 = _mm_load1_ps(tmpptr);
__m128 _val1 = _mm_load1_ps(tmpptr + 1);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
__m128 _val2 = _mm_load1_ps(tmpptr + 2);
__m128 _val3 = _mm_load1_ps(tmpptr + 3);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr += 4;
}
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr0 + 4, _sum1);
_mm_store_ps(outptr0 + 8, _sum2);
_mm_store_ps(outptr0 + 12, _sum3);
outptr0 += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk * 8; // inch always > 0
__m128 _sum = _mm_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(kptr);
__m128 _val0 = _mm_load1_ps(tmpptr);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr += 4;
}
_mm_store_ps(outptr0, _sum);
outptr0 += 4;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rcx, 0x70(%rsp)
movq %rdx, %r14
movq %rdi, %r13
movslq 0x2c(%rdi), %rbp
movl %ebp, %r15d
movl 0x30(%rdi), %ebx
movl 0x38(%rdi), %r12d
movq %rsi, 0x68(%rsp)
movslq 0x38(%rsi), %rax
movq %rax, 0x58(%rsp)
movq $0x0, 0x50(%rsp)
vxorpd %xmm0, %xmm0, %xmm0
vmovapd %xmm0, 0x10(%rsp)
vmovupd %xmm0, 0x1c(%rsp)
vmovapd %xmm0, 0x30(%rsp)
vmovupd %xmm0, 0x3c(%rsp)
cmpq $0x8, %rbp
movq %r15, 0x60(%rsp)
jl 0xfd113
leal (,%rbx,8), %esi
movl %ebp, %eax
shrl $0x3, %eax
movl %r15d, %ecx
andl $0x3, %ecx
btl $0x2, %ebp
adcl %eax, %ecx
movq %r8, (%rsp)
leaq 0x10(%rsp), %rdi
movl $0x20, %r8d
movl %r12d, %edx
movl $0x8, %r9d
callq 0x5a266
movl %r15d, %eax
shrl $0x3, %eax
leal (,%rbp,8), %ecx
shlq $0x2, %rcx
movl $0x80, %edx
xorl %esi, %esi
testl %r12d, %r12d
jle 0xfd0fe
movq 0x50(%rsp), %rdi
imulq %rsi, %rdi
imulq 0x20(%rsp), %rdi
addq 0x10(%rsp), %rdi
xorl %r8d, %r8d
testl %ebx, %ebx
jle 0xfd0f2
movq 0x40(%r13), %r9
movq (%r13), %r10
imulq 0x10(%r13), %r9
addq %rdx, %r10
imulq %r8, %r9
addq %r10, %r9
movl %ebx, %r10d
vmovapd -0x80(%r9), %ymm0
vmovapd -0x60(%r9), %ymm1
vmovapd -0x40(%r9), %ymm2
vmovapd -0x20(%r9), %ymm3
vmovapd (%r9), %ymm4
vmovapd 0x20(%r9), %ymm5
vmovapd 0x40(%r9), %ymm6
vmovapd 0x60(%r9), %ymm7
vunpcklps %ymm1, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklps %ymm5, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vunpckhps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
vunpcklps %ymm7, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
vunpckhps %ymm7, %ymm6, %ymm6 # ymm6 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
vunpcklpd %ymm1, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
vunpckhpd %ymm1, %ymm8, %ymm1 # ymm1 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm8 # ymm8 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vunpcklpd %ymm5, %ymm3, %ymm2 # ymm2 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
vunpckhpd %ymm5, %ymm3, %ymm3 # ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
vunpcklpd %ymm6, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
vunpckhpd %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
vinsertf128 $0x1, %xmm2, %ymm7, %ymm6
vinsertf128 $0x1, %xmm3, %ymm1, %ymm9
vinsertf128 $0x1, %xmm5, %ymm8, %ymm10
vinsertf128 $0x1, %xmm4, %ymm0, %ymm11
vperm2f128 $0x31, %ymm2, %ymm7, %ymm2 # ymm2 = ymm7[2,3],ymm2[2,3]
vperm2f128 $0x31, %ymm3, %ymm1, %ymm1 # ymm1 = ymm1[2,3],ymm3[2,3]
vperm2f128 $0x31, %ymm5, %ymm8, %ymm3 # ymm3 = ymm8[2,3],ymm5[2,3]
vperm2f128 $0x31, %ymm4, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm4[2,3]
vmovapd %ymm6, (%rdi)
vmovapd %ymm9, 0x20(%rdi)
vmovapd %ymm10, 0x40(%rdi)
vmovapd %ymm11, 0x60(%rdi)
vmovapd %ymm2, 0x80(%rdi)
vmovapd %ymm1, 0xa0(%rdi)
vmovapd %ymm3, 0xc0(%rdi)
vmovapd %ymm0, 0xe0(%rdi)
addq $0x100, %rdi # imm = 0x100
addq %rcx, %r9
decl %r10d
jne 0xfd00d
incq %r8
cmpq %r12, %r8
jne 0xfcfeb
incq %rsi
addq $0x100, %rdx # imm = 0x100
cmpq %rax, %rsi
jne 0xfcfcb
jmp 0xfd153
leal (,%rbx,4), %esi
movl %ebp, %ecx
andl $0x3, %ecx
incl %ecx
cmpl $0x4, %r15d
cmovll %ebx, %esi
cmovll %ebp, %ecx
movq %r8, (%rsp)
leaq 0x10(%rsp), %rdi
movl $0x20, %r8d
movl %r12d, %edx
movl $0x8, %r9d
callq 0x5a266
leal 0x7(%rbp), %eax
testl %ebp, %ebp
cmovnsl %ebp, %eax
sarl $0x3, %eax
shll $0x3, %eax
movl %ebp, %ecx
subl %eax, %ecx
movl %ecx, %edi
sarl $0x2, %edi
testl %edi, %edi
jle 0xfd27d
leal (,%rbp,8), %edx
movslq %edx, %rdx
movslq %eax, %rsi
movl %edi, %edi
shlq $0x5, %rsi
addq $0x60, %rsi
shlq $0x2, %rdx
xorl %r8d, %r8d
testl %r12d, %r12d
jle 0xfd26d
leal (%rax,%r8,4), %r9d
leal (%rax,%r8,4), %r10d
addl $0x7, %r10d
testl %r9d, %r9d
cmovnsl %r9d, %r10d
movl %r10d, %r11d
andl $-0x8, %r11d
subl %r11d, %r9d
sarl $0x2, %r9d
sarl $0x3, %r10d
addl %r9d, %r10d
movslq %r10d, %r9
imulq 0x50(%rsp), %r9
imulq 0x20(%rsp), %r9
addq 0x10(%rsp), %r9
xorl %r10d, %r10d
testl %ebx, %ebx
jle 0xfd261
movq 0x40(%r13), %r11
movq (%r13), %r15
imulq 0x10(%r13), %r11
addq %rsi, %r15
imulq %r10, %r11
addq %r15, %r11
movl %ebx, %r15d
vmovapd -0x60(%r11), %ymm0
vmovapd -0x40(%r11), %ymm1
vmovapd -0x20(%r11), %ymm2
vmovapd (%r11), %ymm3
vunpcklps %ymm1, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vunpckhps %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vunpcklps %ymm3, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
vunpckhps %ymm3, %ymm2, %ymm2 # ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
vunpcklpd %ymm1, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
vunpckhpd %ymm1, %ymm4, %ymm1 # ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
vunpcklpd %ymm2, %ymm0, %ymm4 # ymm4 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
vunpckhpd %ymm2, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
vinsertf128 $0x1, %xmm1, %ymm3, %ymm2
vinsertf128 $0x1, %xmm0, %ymm4, %ymm5
vperm2f128 $0x31, %ymm1, %ymm3, %ymm1 # ymm1 = ymm3[2,3],ymm1[2,3]
vperm2f128 $0x31, %ymm0, %ymm4, %ymm0 # ymm0 = ymm4[2,3],ymm0[2,3]
vmovapd %ymm2, (%r9)
vmovapd %ymm5, 0x20(%r9)
vmovapd %ymm1, 0x40(%r9)
vmovapd %ymm0, 0x60(%r9)
subq $-0x80, %r9
addq %rdx, %r11
decl %r15d
jne 0xfd1ef
incq %r10
cmpq %r12, %r10
jne 0xfd1cd
incq %r8
subq $-0x80, %rsi
cmpq %rdi, %r8
jne 0xfd185
andl $-0x4, %ecx
addl %eax, %ecx
cmpl 0x60(%rsp), %ecx
jge 0xfd34d
leal (,%rbp,8), %eax
cltq
movslq %ecx, %rcx
movq %rcx, %rdx
shlq $0x5, %rdx
shlq $0x2, %rax
testl %r12d, %r12d
jle 0xfd33d
leal 0x3(%rcx), %esi
testl %ecx, %ecx
cmovnsl %ecx, %esi
leal 0x7(%rcx), %edi
cmovnsl %ecx, %edi
andl $-0x4, %esi
movl %ecx, %r8d
subl %esi, %r8d
movl %edi, %esi
sarl $0x3, %esi
addl %r8d, %esi
andl $-0x8, %edi
movl %ecx, %r8d
subl %edi, %r8d
movl %r8d, %edi
sarb $0x7, %dil
shrb $0x6, %dil
addb %r8b, %dil
sarb $0x2, %dil
movsbl %dil, %edi
addl %esi, %edi
movslq %edi, %rsi
imulq 0x50(%rsp), %rsi
imulq 0x20(%rsp), %rsi
addq 0x10(%rsp), %rsi
xorl %edi, %edi
testl %ebx, %ebx
jle 0xfd335
movq 0x40(%r13), %r8
movq (%r13), %r9
imulq 0x10(%r13), %r8
addq %rdx, %r9
imulq %rdi, %r8
addq %r9, %r8
movl %ebx, %r9d
vmovapd (%r8), %ymm0
vmovapd %ymm0, (%rsi)
addq $0x20, %rsi
addq %rax, %r8
decl %r9d
jne 0xfd320
incq %rdi
cmpq %r12, %rdi
jne 0xfd302
incq %rcx
addq $0x20, %rdx
cmpq %rbp, %rcx
jl 0xfd2a3
movq 0x58(%rsp), %rax
movl %eax, %ecx
sarl %ecx
testl %ecx, %ecx
movq 0x60(%rsp), %r13
jle 0xfd6b3
movl %r12d, %eax
imull %ebx, %eax
shll $0x3, %eax
movl %ecx, %ecx
movq %rcx, 0x78(%rsp)
xorl %edx, %edx
vxorpd %xmm0, %xmm0, %xmm0
leaq (%rdx,%rdx), %r8
movq 0x68(%rsp), %rdi
movq 0x40(%rdi), %rcx
imulq %rcx, %r8
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rdi, %r8
addq %rsi, %r8
leaq 0x1(,%rdx,2), %r9
imulq %rcx, %r9
imulq %rdi, %r9
addq %rsi, %r9
movq %rdx, %r10
shlq $0x5, %r10
movq 0x70(%rsp), %rcx
addq %rcx, %r10
testq %rcx, %rcx
leaq 0x80(%rsp), %rcx
cmoveq %rcx, %r10
vmovupd %ymm0, 0x80(%rsp)
cmpl $0x8, %r13d
jl 0xfd52d
xorl %r11d, %r11d
xorl %esi, %esi
vmovups (%r10), %ymm1
testl %eax, %eax
jle 0xfd483
movq 0x50(%rsp), %r13
imulq 0x20(%rsp), %r13
imulq %r11, %r13
addq 0x10(%rsp), %r13
movq 0x40(%r14), %rcx
imulq %rdx, %rcx
imulq 0x10(%r14), %rcx
addq (%r14), %rcx
movl %eax, %r15d
xorl %edi, %edi
vmovaps %ymm1, %ymm6
vmovaps %ymm1, %ymm7
vmovaps %ymm1, %ymm8
vmovaps %ymm1, %ymm2
vmovaps %ymm1, %ymm3
vmovaps %ymm1, %ymm4
vmovaps %ymm1, %ymm5
vmovaps (%rcx,%rdi), %ymm9
vfmadd231ps (%r13,%rdi){1to8}, %ymm9, %ymm1 # ymm1 = (ymm9 * mem) + ymm1
vfmadd231ps 0x4(%r13,%rdi){1to8}, %ymm9, %ymm6 # ymm6 = (ymm9 * mem) + ymm6
vfmadd231ps 0x8(%r13,%rdi){1to8}, %ymm9, %ymm7 # ymm7 = (ymm9 * mem) + ymm7
vfmadd231ps 0xc(%r13,%rdi){1to8}, %ymm9, %ymm8 # ymm8 = (ymm9 * mem) + ymm8
vfmadd231ps 0x10(%r13,%rdi){1to8}, %ymm9, %ymm2 # ymm2 = (ymm9 * mem) + ymm2
vfmadd231ps 0x14(%r13,%rdi){1to8}, %ymm9, %ymm3 # ymm3 = (ymm9 * mem) + ymm3
vfmadd231ps 0x18(%r13,%rdi){1to8}, %ymm9, %ymm4 # ymm4 = (ymm9 * mem) + ymm4
vfmadd231ps 0x1c(%r13,%rdi){1to8}, %ymm9, %ymm5 # ymm5 = (ymm9 * mem) + ymm5
addq $0x20, %rdi
decl %r15d
jne 0xfd433
jmp 0xfd49f
vmovaps %ymm1, %ymm5
vmovaps %ymm1, %ymm4
vmovaps %ymm1, %ymm3
vmovaps %ymm1, %ymm2
vmovaps %ymm1, %ymm8
vmovaps %ymm1, %ymm7
vmovaps %ymm1, %ymm6
vinsertf128 $0x1, %xmm8, %ymm7, %ymm9
vinsertf128 $0x1, %xmm6, %ymm1, %ymm10
vinsertf64x4 $0x1, %ymm9, %zmm10, %zmm9
vmovups %zmm9, (%r8)
vinsertf128 $0x1, %xmm5, %ymm4, %ymm9
vinsertf128 $0x1, %xmm3, %ymm2, %ymm10
vinsertf64x4 $0x1, %ymm9, %zmm10, %zmm9
vmovups %zmm9, 0x40(%r8)
vinsertf64x4 $0x1, %ymm8, %zmm7, %zmm7
vinsertf64x4 $0x1, %ymm6, %zmm1, %zmm1
vshuff64x2 $0xdd, %zmm7, %zmm1, %zmm1 # zmm1 = zmm1[2,3,6,7],zmm7[2,3,6,7]
vmovupd %zmm1, (%r9)
vinsertf64x4 $0x1, %ymm5, %zmm4, %zmm1
vinsertf64x4 $0x1, %ymm3, %zmm2, %zmm2
vshuff64x2 $0xdd, %zmm1, %zmm2, %zmm1 # zmm1 = zmm2[2,3,6,7],zmm1[2,3,6,7]
vmovupd %zmm1, 0x40(%r9)
movl $0x80, %ecx
addq %rcx, %r8
addq %rcx, %r9
leaq 0x8(%rsi), %r13
addq $0xf, %rsi
incq %r11
cmpq %rbp, %rsi
movq %r13, %rsi
jl 0xfd3e1
jmp 0xfd530
xorl %r13d, %r13d
movl %r13d, %ecx
orl $0x3, %ecx
cmpl %ebp, %ecx
jge 0xfd625
movl %r13d, %esi
shrl $0x2, %esi
vmovups (%r10), %ymm1
testl %eax, %eax
jle 0xfd5c4
movl %r13d, %ecx
shrl $0x3, %ecx
movzbl %sil, %r11d
andl $0x1, %r11d
addl %ecx, %r11d
movq 0x50(%rsp), %rcx
movq 0x40(%r14), %rdi
imulq %rdx, %rdi
imulq 0x10(%r14), %rdi
addq (%r14), %rdi
imulq 0x20(%rsp), %rcx
imulq %r11, %rcx
addq 0x10(%rsp), %rcx
xorl %r11d, %r11d
movl %eax, %r15d
vmovaps %ymm1, %ymm4
vmovaps %ymm1, %ymm3
vmovaps %ymm1, %ymm2
vmovaps (%rdi,%r11,2), %ymm5
vfmadd231ps (%rcx,%r11){1to8}, %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vfmadd231ps 0x4(%rcx,%r11){1to8}, %ymm5, %ymm3 # ymm3 = (ymm5 * mem) + ymm3
vfmadd231ps 0x8(%rcx,%r11){1to8}, %ymm5, %ymm4 # ymm4 = (ymm5 * mem) + ymm4
vfmadd231ps 0xc(%rcx,%r11){1to8}, %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
addq $0x10, %r11
decl %r15d
jne 0xfd594
jmp 0xfd5d0
vmovaps %ymm1, %ymm2
vmovaps %ymm1, %ymm3
vmovaps %ymm1, %ymm4
vinsertf128 $0x1, %xmm1, %ymm4, %ymm5
vinsertf128 $0x1, %xmm3, %ymm2, %ymm6
vinsertf64x4 $0x1, %ymm5, %zmm6, %zmm5
vmovups %zmm5, (%r8)
vinsertf64x4 $0x1, %ymm1, %zmm4, %zmm1
vinsertf64x4 $0x1, %ymm3, %zmm2, %zmm2
vshuff64x2 $0xdd, %zmm1, %zmm2, %zmm1 # zmm1 = zmm2[2,3,6,7],zmm1[2,3,6,7]
vmovupd %zmm1, (%r9)
addq $0x40, %r8
addq $0x40, %r9
leal 0x4(%r13), %r11d
addl $0x7, %r13d
incb %sil
cmpl %ebp, %r13d
movl %r11d, %r13d
jl 0xfd544
jmp 0xfd628
movl %r13d, %r11d
cmpl %ebp, %r11d
movq 0x60(%rsp), %r13
jge 0xfd6a5
movl %r11d, %ecx
shrl $0x3, %ecx
movl %r11d, %edi
andl $0x3, %edi
btl $0x2, %r11d
adcl %ecx, %edi
vmovups (%r10), %ymm1
testl %eax, %eax
jle 0xfd68a
movq 0x40(%r14), %rcx
imulq %rdx, %rcx
imulq 0x10(%r14), %rcx
addq (%r14), %rcx
movq 0x50(%rsp), %rsi
imulq 0x20(%rsp), %rsi
imulq %rdi, %rsi
addq 0x10(%rsp), %rsi
xorl %edi, %edi
vbroadcastss (%rsi,%rdi,4), %ymm2
vfmadd231ps (%rcx), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
addq $0x20, %rcx
incq %rdi
cmpl %edi, %eax
jne 0xfd674
vmovaps %xmm1, (%r8)
vextractf128 $0x1, %ymm1, (%r9)
addq $0x10, %r8
addq $0x10, %r9
incl %r11d
cmpl %ebp, %r11d
jne 0xfd632
incq %rdx
cmpq 0x78(%rsp), %rdx
jne 0xfd379
movq 0x58(%rsp), %rcx
movq %rcx, %rax
andq $-0x2, %rax
cmpl %ecx, %eax
je 0xfd9d0
imull %r12d, %ebx
shll $0x3, %ebx
leaq 0x80(%rsp), %rcx
vxorpd %xmm0, %xmm0, %xmm0
movq 0x68(%rsp), %rsi
movq 0x40(%rsi), %rdx
imulq %rax, %rdx
imulq 0x10(%rsi), %rdx
addq (%rsi), %rdx
movq %rax, %rsi
shlq $0x4, %rsi
movq 0x70(%rsp), %rdi
addq %rdi, %rsi
testq %rdi, %rdi
cmoveq %rcx, %rsi
vmovapd %xmm0, 0x80(%rsp)
cmpl $0x8, %ebp
jl 0xfd83b
movl %eax, %edi
shrl $0x1f, %edi
addl %eax, %edi
movl %edi, %r8d
andl $-0x2, %r8d
movl %eax, %r9d
subl %r8d, %r9d
sarl %edi
addl %r9d, %edi
movslq %edi, %r8
xorl %r9d, %r9d
xorl %r10d, %r10d
vmovups (%rsi), %xmm1
testl %ebx, %ebx
jle 0xfd7db
movq 0x40(%r14), %rdi
imulq %r8, %rdi
imulq 0x10(%r14), %rdi
addq (%r14), %rdi
movq 0x50(%rsp), %r11
imulq 0x20(%rsp), %r11
imulq %r9, %r11
addq 0x10(%rsp), %r11
xorl %r15d, %r15d
movl %ebx, %r12d
vmovaps %xmm1, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps %xmm1, %xmm5
vmovaps %xmm1, %xmm6
vmovaps %xmm1, %xmm7
vmovaps %xmm1, %xmm8
vmovaps (%rdi,%r15), %xmm9
vfmadd231ps (%r11,%r15,2){1to4}, %xmm9, %xmm8 # xmm8 = (xmm9 * mem) + xmm8
vfmadd231ps 0x4(%r11,%r15,2){1to4}, %xmm9, %xmm7 # xmm7 = (xmm9 * mem) + xmm7
vfmadd231ps 0x8(%r11,%r15,2){1to4}, %xmm9, %xmm6 # xmm6 = (xmm9 * mem) + xmm6
vfmadd231ps 0xc(%r11,%r15,2){1to4}, %xmm9, %xmm5 # xmm5 = (xmm9 * mem) + xmm5
vfmadd231ps 0x10(%r11,%r15,2){1to4}, %xmm9, %xmm4 # xmm4 = (xmm9 * mem) + xmm4
vfmadd231ps 0x14(%r11,%r15,2){1to4}, %xmm9, %xmm3 # xmm3 = (xmm9 * mem) + xmm3
vfmadd231ps 0x18(%r11,%r15,2){1to4}, %xmm9, %xmm2 # xmm2 = (xmm9 * mem) + xmm2
vfmadd231ps 0x1c(%r11,%r15,2){1to4}, %xmm9, %xmm1 # xmm1 = (xmm9 * mem) + xmm1
addq $0x10, %r15
decl %r12d
jne 0xfd78b
jmp 0xfd7f7
vmovaps %xmm1, %xmm8
vmovaps %xmm1, %xmm7
vmovaps %xmm1, %xmm6
vmovaps %xmm1, %xmm5
vmovaps %xmm1, %xmm4
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm2
vmovaps %xmm8, (%rdx)
vmovaps %xmm7, 0x10(%rdx)
vmovaps %xmm6, 0x20(%rdx)
vmovaps %xmm5, 0x30(%rdx)
vmovaps %xmm4, 0x40(%rdx)
vmovaps %xmm3, 0x50(%rdx)
vmovaps %xmm2, 0x60(%rdx)
vmovaps %xmm1, 0x70(%rdx)
subq $-0x80, %rdx
leaq 0x8(%r10), %rdi
addq $0xf, %r10
incq %r9
cmpq %rbp, %r10
movq %rdi, %r10
jl 0xfd739
jmp 0xfd83d
xorl %edi, %edi
movl %edi, %r8d
orl $0x3, %r8d
cmpl %ebp, %r8d
jge 0xfd92e
movl %eax, %r8d
shrl $0x1f, %r8d
addl %eax, %r8d
movl %r8d, %r9d
andl $-0x2, %r9d
movl %eax, %r10d
subl %r9d, %r10d
sarl %r8d
addl %r10d, %r8d
movslq %r8d, %r8
movl %edi, %r9d
shrl $0x2, %r9d
vmovups (%rsi), %xmm1
testl %ebx, %ebx
jle 0xfd8f4
movl %edi, %r11d
shrl $0x3, %r11d
movzbl %r9b, %r15d
andl $0x1, %r15d
movq 0x40(%r14), %r10
imulq %r8, %r10
imulq 0x10(%r14), %r10
addq (%r14), %r10
addl %r11d, %r15d
movq 0x50(%rsp), %r11
imulq 0x20(%rsp), %r11
imulq %r15, %r11
addq 0x10(%rsp), %r11
xorl %r15d, %r15d
movl %ebx, %r12d
vmovaps %xmm1, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps (%r10,%r15), %xmm5
vfmadd231ps (%r11,%r15){1to4}, %xmm5, %xmm4 # xmm4 = (xmm5 * mem) + xmm4
vfmadd231ps 0x4(%r11,%r15){1to4}, %xmm5, %xmm3 # xmm3 = (xmm5 * mem) + xmm3
vfmadd231ps 0x8(%r11,%r15){1to4}, %xmm5, %xmm2 # xmm2 = (xmm5 * mem) + xmm2
vfmadd231ps 0xc(%r11,%r15){1to4}, %xmm5, %xmm1 # xmm1 = (xmm5 * mem) + xmm1
addq $0x10, %r15
decl %r12d
jne 0xfd8c4
jmp 0xfd900
vmovaps %xmm1, %xmm4
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm2
vmovaps %xmm4, (%rdx)
vmovaps %xmm3, 0x10(%rdx)
vmovaps %xmm2, 0x20(%rdx)
vmovaps %xmm1, 0x30(%rdx)
addq $0x40, %rdx
leal 0x4(%rdi), %r10d
addl $0x7, %edi
incb %r9b
cmpl %ebp, %edi
movl %r10d, %edi
jl 0xfd874
jmp 0xfd931
movl %edi, %r10d
cmpl %r13d, %r10d
jge 0xfd9c2
movl %eax, %edi
shrl $0x1f, %edi
addl %eax, %edi
movl %edi, %r8d
andl $-0x2, %r8d
movl %eax, %r9d
subl %r8d, %r9d
sarl %edi
addl %r9d, %edi
movslq %edi, %rdi
movl %r10d, %r8d
shrl $0x3, %r8d
movl %r10d, %r11d
andl $0x3, %r11d
btl $0x2, %r10d
adcl %r8d, %r11d
vmovups (%rsi), %xmm1
testl %ebx, %ebx
jle 0xfd9b2
movq 0x40(%r14), %r8
imulq %rdi, %r8
imulq 0x10(%r14), %r8
addq (%r14), %r8
movq 0x50(%rsp), %r9
imulq 0x20(%rsp), %r9
imulq %r11, %r9
addq 0x10(%rsp), %r9
xorl %r11d, %r11d
vbroadcastss (%r9,%r11,4), %xmm2
vfmadd231ps (%r8), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
addq $0x10, %r8
incq %r11
cmpl %r11d, %ebx
jne 0xfd99b
vmovaps %xmm1, (%rdx)
addq $0x10, %rdx
incl %r10d
cmpl %ebp, %r10d
jne 0xfd956
incq %rax
cmpq 0x58(%rsp), %rax
jl 0xfd6da
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0xfda09
lock
decl (%rax)
jne 0xfda09
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0xfd9f9
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0xfda09
testq %rsi, %rsi
je 0xfda09
movq %rsi, %rdi
vzeroupper
callq 0x563b0
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
jmp 0xfda5e
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0xfda56
lock
decl (%rax)
jne 0xfda56
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0xfda50
testq %rsi, %rsi
je 0xfda56
movq %rsi, %rdi
callq 0x563b0
jmp 0xfda56
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_sgemm_pack8to4.h
|
ncnn::conv3x3s1_winograd43_pack8_avx(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, ncnn::Option const&)
|
static void conv3x3s1_winograd43_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_pack8_avx(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
convolution_winograd_dot_pack8_avx(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt);
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_pack8_avx(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x6c0, %rsp # imm = 0x6C0
movq %r8, %r14
movq %rcx, 0x138(%rsp)
movq %rdx, 0x148(%rsp)
movq %rsi, %rbx
movl 0x38(%rdi), %r15d
movq 0x10(%rdi), %r12
movl 0x18(%rdi), %r13d
vmovq 0x2c(%rsi), %xmm0
movl 0x38(%rsi), %eax
movl %eax, 0x7c(%rsp)
movq 0x8(%rdi), %rdx
vmovups (%rdi), %xmm1
vmovaps %xmm1, 0xe0(%rsp)
movq %r12, 0xf0(%rsp)
movl %r13d, 0xf8(%rsp)
movq 0x20(%rdi), %rax
movq %rax, 0x100(%rsp)
movl 0x30(%rdi), %eax
movl 0x2c(%rdi), %ecx
vmovups 0x28(%rdi), %xmm1
vmovups %xmm1, 0x108(%rsp)
movl %r15d, 0x118(%rsp)
movq 0x40(%rdi), %rsi
movq %rsi, 0x120(%rsp)
testq %rdx, %rdx
je 0x118655
lock
incl (%rdx)
movl 0x2c(%rdi), %ecx
movl 0x30(%rdi), %eax
vbroadcastss 0x300792(%rip), %xmm1 # 0x418df0
vpaddd %xmm1, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm1
vpsrld $0x1e, %xmm1, %xmm1
vpaddd %xmm1, %xmm0, %xmm1
vbroadcastss 0x30077b(%rip), %xmm0 # 0x418df4
vmovdqa %xmm1, 0x20(%rsp)
vpand %xmm0, %xmm1, %xmm0
vpextrd $0x1, %xmm0, %edx
vmovdqa %xmm0, 0x210(%rsp)
vmovd %xmm0, %esi
movq %rsi, 0x128(%rsp)
leal 0x2(%rsi), %r9d
subl %ecx, %r9d
movq %rdx, 0x130(%rsp)
leal 0x2(%rdx), %ecx
subl %eax, %ecx
movq %r14, 0x8(%rsp)
movl $0x0, (%rsp)
leaq 0xe0(%rsp), %rsi
vpxor %xmm0, %xmm0, %xmm0
xorl %edx, %edx
xorl %r8d, %r8d
callq 0x5c159
movq %rbx, 0xd8(%rsp)
vmovdqa 0x20(%rsp), %xmm0
vpsrad $0x2, %xmm0, %xmm0
leaq 0x170(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vpextrd $0x1, %xmm0, %esi
vmovd %xmm0, %eax
imull %eax, %esi
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, (%rdi)
vmovdqu %xmm0, 0xc(%rdi)
vmovdqa %xmm0, 0x20(%rdi)
vmovdqu %xmm0, 0x2c(%rdi)
movq %r14, 0xd0(%rsp)
movq 0x10(%r14), %rax
movq %rax, (%rsp)
movl $0x24, %edx
movl %r15d, %ecx
movq %r12, 0x140(%rsp)
movq %r12, %r8
movl %r13d, 0x78(%rsp)
movl %r13d, %r9d
callq 0x5a266
movslq 0x118(%rsp), %rax
movq %rax, 0x48(%rsp)
testq %rax, %rax
jle 0x118af7
movl 0x10c(%rsp), %edx
movl 0x110(%rsp), %ecx
leal -0x2(%rcx), %eax
movq %rcx, 0x40(%rsp)
leal 0x1(%rcx), %esi
testl %eax, %eax
cmovnsl %eax, %esi
sarl $0x2, %esi
leal -0x2(%rdx), %ecx
leal 0x1(%rdx), %eax
testl %ecx, %ecx
cmovnsl %ecx, %eax
sarl $0x2, %eax
movl %eax, %ecx
imull %esi, %ecx
movq %rdx, 0x60(%rsp)
leal (,%rdx,8), %edx
movslq %edx, %rdi
leal (,%rcx,8), %edx
movslq %edx, %r8
movl %ecx, %edx
shll $0x4, %edx
movslq %edx, %r9
leal (%r8,%r8,2), %edx
movslq %edx, %rdx
shll $0x5, %ecx
movslq %ecx, %r10
leal (%r8,%r8,4), %ecx
movslq %ecx, %r11
leal (%r9,%r9,2), %ecx
movslq %ecx, %r14
cmpl $0x2, %eax
movl $0x1, %r12d
cmovgel %eax, %r12d
movl $0x1, %ecx
cmpl $0x2, %esi
cmovll %ecx, %esi
movq %rsi, 0x58(%rsp)
cltq
shlq $0x2, %rdi
shlq $0x5, %rax
movq %rax, 0x50(%rsp)
shlq $0x2, %r14
shlq $0x2, %r8
movq %r8, 0x38(%rsp)
shlq $0x2, %r9
movq %r9, 0x168(%rsp)
shlq $0x2, %rdx
movq %rdx, 0x160(%rsp)
shlq $0x2, %r11
movq %r11, 0x150(%rsp)
shlq $0x2, %r10
movq %r10, 0x158(%rsp)
movq $0x0, 0x18(%rsp)
vbroadcastss 0x2ff26c(%rip), %ymm0 # 0x417ab8
vbroadcastss 0x2ff25f(%rip), %ymm1 # 0x417ab4
vbroadcastss 0x300586(%rip), %ymm2 # 0x418de4
cmpl $0x6, 0x40(%rsp)
jl 0x118adf
movslq 0x10c(%rsp), %rdx
movq 0xe0(%rsp), %rax
movq 0xf0(%rsp), %rcx
movq 0x180(%rsp), %r11
imulq %rcx, %rdx
shlq $0x2, %rdx
movq %rdx, 0x68(%rsp)
imulq 0x120(%rsp), %rcx
imulq 0x1b0(%rsp), %r11
movq 0x18(%rsp), %rdx
imulq %rdx, %rcx
imulq %rdx, %r11
addq 0x170(%rsp), %r11
addq %rcx, %rax
addq $0xa0, %rax
movq %rax, 0x20(%rsp)
movq 0x38(%rsp), %rax
leaq (%r11,%rax), %r9
movq 0x168(%rsp), %rax
leaq (%r11,%rax), %r8
movq 0x160(%rsp), %rax
addq %r11, %rax
movq 0x150(%rsp), %rcx
leaq (%r11,%rcx), %rdx
movq 0x158(%rsp), %rcx
leaq (%r11,%rcx), %rbx
movq $0x0, 0x10(%rsp)
xorl %esi, %esi
movq %rsi, 0x70(%rsp)
cmpl $0x6, 0x60(%rsp)
jl 0x118aae
movq 0x10(%rsp), %rsi
movq 0x20(%rsp), %r15
xorl %r10d, %r10d
movq $-0xc0, %r13
movq %r15, %rcx
vmovaps -0x80(%rcx), %ymm3
vmovaps -0x60(%rcx), %ymm4
vmovaps -0x40(%rcx), %ymm5
vmovaps -0x20(%rcx), %ymm6
vmovaps -0xa0(%rcx), %ymm7
vfnmadd132ps %ymm0, %ymm6, %ymm7 # ymm7 = -(ymm7 * ymm0) + ymm6
vfmadd231ps %ymm1, %ymm4, %ymm7 # ymm7 = (ymm4 * ymm1) + ymm7
vaddps %ymm3, %ymm4, %ymm8
vaddps %ymm5, %ymm6, %ymm9
vfmadd231ps %ymm8, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm8) + ymm9
vsubps %ymm4, %ymm3, %ymm8
vsubps %ymm5, %ymm6, %ymm10
vfnmadd231ps %ymm8, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm8) + ymm10
vsubps %ymm5, %ymm3, %ymm8
vsubps %ymm4, %ymm6, %ymm4
vmovaps %ymm2, %ymm6
vfmadd213ps %ymm4, %ymm8, %ymm6 # ymm6 = (ymm8 * ymm6) + ymm4
vfnmadd213ps %ymm4, %ymm2, %ymm8 # ymm8 = -(ymm2 * ymm8) + ymm4
vfnmadd213ps (%rcx), %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm3) + mem
vfmadd231ps %ymm5, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm5) + ymm3
vmovaps %ymm7, 0x2e0(%rsp,%r13)
vmovaps %ymm9, 0x3a0(%rsp,%r13)
vmovaps %ymm10, 0x460(%rsp,%r13)
vmovaps %ymm6, 0x520(%rsp,%r13)
vmovaps %ymm8, 0x5e0(%rsp,%r13)
vmovaps %ymm3, 0x6a0(%rsp,%r13)
addq %rdi, %rcx
addq $0x20, %r13
jne 0x118935
movl $0xa0, %r13d
movq %rsi, %rcx
vmovaps 0x1a0(%rsp,%r13), %ymm3
vmovaps 0x1c0(%rsp,%r13), %ymm4
vmovaps 0x1e0(%rsp,%r13), %ymm5
vmovaps 0x200(%rsp,%r13), %ymm6
vmovaps 0x180(%rsp,%r13), %ymm7
vfnmadd132ps %ymm0, %ymm6, %ymm7 # ymm7 = -(ymm7 * ymm0) + ymm6
vfmadd231ps %ymm1, %ymm4, %ymm7 # ymm7 = (ymm4 * ymm1) + ymm7
vaddps %ymm3, %ymm4, %ymm8
vaddps %ymm5, %ymm6, %ymm9
vfmadd231ps %ymm8, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm8) + ymm9
vsubps %ymm4, %ymm3, %ymm8
vsubps %ymm5, %ymm6, %ymm10
vfnmadd231ps %ymm8, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm8) + ymm10
vsubps %ymm5, %ymm3, %ymm8
vsubps %ymm4, %ymm6, %ymm4
vmovaps %ymm2, %ymm6
vfmadd213ps %ymm4, %ymm8, %ymm6 # ymm6 = (ymm8 * ymm6) + ymm4
vfnmadd213ps %ymm4, %ymm2, %ymm8 # ymm8 = -(ymm2 * ymm8) + ymm4
vfnmadd213ps 0x220(%rsp,%r13), %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm3) + mem
vfmadd231ps %ymm5, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm5) + ymm3
vmovaps %ymm7, (%r11,%rcx)
vmovaps %ymm9, (%r9,%rcx)
vmovaps %ymm10, (%r8,%rcx)
vmovaps %ymm6, (%rax,%rcx)
vmovaps %ymm8, (%rbx,%rcx)
vmovaps %ymm3, (%rdx,%rcx)
addq $0xc0, %r13
addq %r14, %rcx
cmpq $0x520, %r13 # imm = 0x520
jne 0x1189e7
incq %r10
subq $-0x80, %r15
addq $0x20, %rsi
cmpq %r12, %r10
jne 0x11892b
movq 0x70(%rsp), %rsi
incq %rsi
movq 0x20(%rsp), %rcx
addq 0x68(%rsp), %rcx
movq %rcx, 0x20(%rsp)
movq 0x10(%rsp), %rcx
addq 0x50(%rsp), %rcx
movq %rcx, 0x10(%rsp)
cmpq 0x58(%rsp), %rsi
jne 0x11890e
movq 0x18(%rsp), %rcx
incq %rcx
movq %rcx, 0x18(%rsp)
cmpq 0x48(%rsp), %rcx
jne 0x11885e
movq 0xe8(%rsp), %rax
testq %rax, %rax
movq 0xd8(%rsp), %rbx
movq 0xd0(%rsp), %r14
movl 0x78(%rsp), %r15d
movl 0x7c(%rsp), %r12d
je 0x118b53
lock
decl (%rax)
jne 0x118b53
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x118b43
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x118b53
testq %rsi, %rsi
je 0x118b53
movq %rsi, %rdi
vzeroupper
callq 0x563b0
leaq 0x100(%rsp), %rax
xorl %edx, %edx
movq %rdx, 0x20(%rax)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, -0x20(%rax)
vmovdqu %xmm0, -0x14(%rax)
vmovdqu %xmm0, 0xc(%rax)
vmovdqa %xmm0, (%rax)
leaq 0x1c0(%rsp), %rcx
movq %rdx, 0x40(%rcx)
vmovdqa %xmm0, (%rcx)
vmovdqu %xmm0, 0xc(%rcx)
vmovdqa %xmm0, 0x20(%rcx)
vmovdqu %xmm0, 0x2c(%rcx)
leaq 0x170(%rsp), %rdi
movl %r12d, %esi
movq 0x148(%rsp), %rdx
movq %r14, %r8
vzeroupper
callq 0x12fd7b
movq 0x140(%rsp), %r8
movq $0x0, 0xc0(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x80(%rsp)
vmovdqu %xmm0, 0x8c(%rsp)
vmovdqa %xmm0, 0xa0(%rsp)
vmovdqu %xmm0, 0xac(%rsp)
vmovq 0x2c(%rbx), %xmm0
vpcmpeqd 0x210(%rsp), %xmm0, %xmm0
vpmovsxdq %xmm0, %xmm0
vpcmpeqd %xmm1, %xmm1, %xmm1
vtestpd %xmm1, %xmm0
jae 0x118c5f
leaq 0x80(%rsp), %rax
cmpq %rbx, %rax
je 0x118ca3
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x118cbb
lock
incl (%rax)
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x118cbb
lock
decl (%rax)
jne 0x118cbb
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x118cae
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x118cbb
movq 0x10(%r14), %rax
movq %rax, (%rsp)
leaq 0x80(%rsp), %rdi
movq 0x128(%rsp), %rsi
movq 0x130(%rsp), %rdx
movl %r12d, %ecx
movl %r15d, %r9d
callq 0x5a266
movl 0xb8(%rsp), %eax
movl 0xac(%rsp), %r9d
movl 0xb0(%rsp), %r13d
jmp 0x118d16
xorl %r9d, %r9d
xorl %r13d, %r13d
jmp 0x11902d
testq %rsi, %rsi
je 0x118cbb
movq %rsi, %rdi
callq 0x563b0
vmovups (%rbx), %xmm0
vmovaps %xmm0, 0x80(%rsp)
movq 0x10(%rbx), %rax
movq %rax, 0x90(%rsp)
movl 0x18(%rbx), %eax
movl %eax, 0x98(%rsp)
movq 0x20(%rbx), %rax
movq %rax, 0xa0(%rsp)
movl 0x30(%rbx), %r13d
movl 0x2c(%rbx), %r9d
vmovupd 0x28(%rbx), %xmm0
vmovupd %xmm0, 0xa8(%rsp)
movl 0x38(%rbx), %eax
movl %eax, 0xb8(%rsp)
movq 0x40(%rbx), %rcx
movq %rcx, 0xc0(%rsp)
testl %eax, %eax
jle 0x11902d
movq 0x138(%rsp), %rcx
movq (%rcx), %rbx
leal 0x3(%r13), %edx
testl %r13d, %r13d
cmovnsl %r13d, %edx
sarl $0x2, %edx
leal 0x3(%r9), %esi
testl %r9d, %r9d
cmovnsl %r9d, %esi
sarl $0x2, %esi
movl %esi, %ecx
movq %rdx, 0x58(%rsp)
imull %edx, %ecx
leal (,%rcx,8), %edx
movslq %edx, %rdi
movl %ecx, %edx
shll $0x4, %edx
movslq %edx, %r8
leal (%rdi,%rdi,2), %edx
movslq %edx, %r10
shll $0x5, %ecx
movslq %ecx, %r11
leal (%rdi,%rdi,4), %ecx
movslq %ecx, %r14
leal (%r8,%r8,2), %ecx
movslq %ecx, %r15
leal (,%r9,8), %ecx
movslq %ecx, %r12
movl %eax, %eax
movq %rax, 0x38(%rsp)
leaq (,%rsi,8), %rax
movq %rax, 0x50(%rsp)
shlq $0x2, %r15
shlq $0x2, %r12
xorl %eax, %eax
vbroadcastss 0x300040(%rip), %ymm0 # 0x418dec
vbroadcastss 0x2fecf3(%rip), %ymm1 # 0x417aa8
vbroadcastss 0x2fecee(%rip), %ymm2 # 0x417aac
movq %rbx, 0x40(%rsp)
movq %r9, 0x60(%rsp)
movq %r13, 0x48(%rsp)
testq %rbx, %rbx
movq %rax, 0x18(%rsp)
je 0x118de2
shlq $0x5, %rax
vmovups (%rbx,%rax), %ymm3
jmp 0x118de6
vxorps %xmm3, %xmm3, %xmm3
cmpl $0x4, %r13d
jl 0x118ff0
movq 0x1d0(%rsp), %rcx
movslq 0xac(%rsp), %rbx
movq 0x80(%rsp), %rax
movq 0x90(%rsp), %rdx
imulq %rdx, %rbx
shlq $0x2, %rbx
movq %rbx, 0x68(%rsp)
imulq 0x200(%rsp), %rcx
movq 0x18(%rsp), %rbx
imulq %rbx, %rcx
addq 0x1c0(%rsp), %rcx
imulq 0xc0(%rsp), %rdx
imulq %rbx, %rdx
addq %rdx, %rax
addq $0x60, %rax
movq %rax, 0x20(%rsp)
movq $0x0, 0x10(%rsp)
xorl %edx, %edx
movq %rdx, 0x70(%rsp)
cmpl $0x4, %r9d
jl 0x118fba
movq 0x20(%rsp), %r9
movq 0x10(%rsp), %rax
xorl %ebx, %ebx
movl %eax, %edx
shrl $0x3, %edx
shlq $0x5, %rdx
addq %rcx, %rdx
movq $-0xc0, %r13
vmovaps (%rdx,%rdi,4), %ymm4
vmovaps (%rdx,%r8,4), %ymm5
vmovaps (%rdx,%r10,4), %ymm6
vmovaps (%rdx,%r11,4), %ymm7
vaddps %ymm4, %ymm5, %ymm8
vsubps %ymm5, %ymm4, %ymm4
vaddps %ymm6, %ymm7, %ymm5
vsubps %ymm7, %ymm6, %ymm6
vaddps (%rdx), %ymm8, %ymm7
vaddps %ymm5, %ymm7, %ymm7
vmovaps %ymm0, %ymm9
vfmadd213ps %ymm4, %ymm6, %ymm9 # ymm9 = (ymm6 * ymm9) + ymm4
vaddps (%rdx,%r14,4), %ymm4, %ymm4
vfmadd213ps %ymm8, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm8
vfmadd231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) + ymm4
vmovaps %ymm7, 0x2e0(%rsp,%r13)
vmovaps %ymm9, 0x3a0(%rsp,%r13)
vmovaps %ymm5, 0x460(%rsp,%r13)
vmovaps %ymm4, 0x520(%rsp,%r13)
addq %r15, %rdx
addq $0x20, %r13
jne 0x118e89
movl $0xa0, %edx
movq %r9, %r13
vmovaps 0x1a0(%rsp,%rdx), %ymm4
vmovaps 0x1c0(%rsp,%rdx), %ymm5
vmovaps 0x1e0(%rsp,%rdx), %ymm6
vmovaps 0x200(%rsp,%rdx), %ymm7
vaddps %ymm4, %ymm5, %ymm8
vsubps %ymm5, %ymm4, %ymm4
vaddps %ymm6, %ymm7, %ymm5
vsubps %ymm7, %ymm6, %ymm6
vaddps 0x180(%rsp,%rdx), %ymm3, %ymm7
vaddps %ymm7, %ymm8, %ymm7
vaddps %ymm5, %ymm7, %ymm7
vmovaps %ymm0, %ymm9
vfmadd213ps %ymm4, %ymm6, %ymm9 # ymm9 = (ymm6 * ymm9) + ymm4
vaddps %ymm3, %ymm9, %ymm9
vfmadd213ps %ymm8, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm8
vaddps %ymm3, %ymm5, %ymm5
vaddps 0x220(%rsp,%rdx), %ymm4, %ymm4
vfmadd231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) + ymm4
vaddps %ymm3, %ymm4, %ymm4
vmovaps %ymm7, -0x60(%r13)
vmovaps %ymm9, -0x40(%r13)
vmovaps %ymm5, -0x20(%r13)
vmovaps %ymm4, (%r13)
addq $0xc0, %rdx
addq %r12, %r13
cmpq $0x3a0, %rdx # imm = 0x3A0
jne 0x118f0a
incq %rbx
addq $0x8, %rax
subq $-0x80, %r9
cmpq %rsi, %rbx
jne 0x118e76
movq 0x70(%rsp), %rdx
incq %rdx
movq 0x10(%rsp), %rax
addq 0x50(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x20(%rsp), %rax
addq 0x68(%rsp), %rax
movq %rax, 0x20(%rsp)
cmpq 0x58(%rsp), %rdx
movq 0x60(%rsp), %r9
jne 0x118e5b
movq 0x18(%rsp), %rax
incq %rax
cmpq 0x38(%rsp), %rax
movq 0x48(%rsp), %r13
movq 0x40(%rsp), %rbx
jne 0x118dcd
movl 0xac(%rsp), %r9d
movl 0xb0(%rsp), %r13d
movq 0xd8(%rsp), %rbx
movq 0xd0(%rsp), %r14
subl 0x30(%rbx), %r13d
subl 0x2c(%rbx), %r9d
movq %r14, (%rsp)
leaq 0x80(%rsp), %rdi
movq %rbx, %rsi
xorl %edx, %edx
movl %r13d, %ecx
xorl %r8d, %r8d
vzeroupper
callq 0x5c3a3
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x119090
lock
decl (%rax)
jne 0x119090
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x119083
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x119090
testq %rsi, %rsi
je 0x119090
movq %rsi, %rdi
callq 0x563b0
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0x1190cc
lock
decl (%rax)
jne 0x1190cc
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
je 0x1190bf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1190cc
testq %rsi, %rsi
je 0x1190cc
movq %rsi, %rdi
callq 0x563b0
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0x119108
lock
decl (%rax)
jne 0x119108
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
je 0x1190fb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x119108
testq %rsi, %rsi
je 0x119108
movq %rsi, %rdi
callq 0x563b0
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x119144
lock
decl (%rax)
jne 0x119144
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x119137
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x119144
testq %rsi, %rsi
je 0x119144
movq %rsi, %rdi
callq 0x563b0
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x11927f
jmp 0x11927f
jmp 0x11927f
jmp 0x11927f
jmp 0x11916e
movq %rax, %rbx
jmp 0x1191bd
movq %rax, %rbx
jmp 0x1191f9
movq %rax, %rbx
jmp 0x119235
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x1191bd
lock
decl (%rax)
jne 0x1191bd
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x1191b7
testq %rsi, %rsi
je 0x1191bd
movq %rsi, %rdi
callq 0x563b0
jmp 0x1191bd
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x1c8(%rsp), %rax
testq %rax, %rax
je 0x1191f9
lock
decl (%rax)
jne 0x1191f9
movq 0x1c0(%rsp), %rsi
movq 0x1e0(%rsp), %rdi
testq %rdi, %rdi
jne 0x1191f3
testq %rsi, %rsi
je 0x1191f9
movq %rsi, %rdi
callq 0x563b0
jmp 0x1191f9
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x178(%rsp), %rax
testq %rax, %rax
je 0x119235
lock
decl (%rax)
jne 0x119235
movq 0x170(%rsp), %rsi
movq 0x190(%rsp), %rdi
testq %rdi, %rdi
jne 0x11922f
testq %rsi, %rsi
je 0x119235
movq %rsi, %rdi
callq 0x563b0
jmp 0x119235
movq (%rdi), %rax
callq *0x18(%rax)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x119271
lock
decl (%rax)
jne 0x119271
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x11926b
testq %rsi, %rsi
je 0x119271
movq %rsi, %rdi
callq 0x563b0
jmp 0x119271
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
jmp 0x11927f
jmp 0x11927f
jmp 0x11927f
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_3x3_pack8.h
|
ncnn::conv1x1s2_sgemm_pack8to4_avx(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, ncnn::Option const&)
|
static void conv1x1s2_sgemm_pack8to4_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _v = _mm256_load_ps(r0);
_mm256_store_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to4_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %r8, %rax
movq %rcx, 0x10(%rsp)
movq %rdx, 0x8(%rsp)
movq %rsi, %r12
movq %rdi, %r13
movl 0x2c(%rdi), %ebx
movl 0x38(%rdi), %ebp
movq 0x10(%rdi), %r8
movl 0x18(%rdi), %r9d
movl 0x2c(%rsi), %r14d
movl 0x30(%rsi), %r15d
leaq 0x20(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq %rax, 0x18(%rsp)
movq 0x10(%rax), %rax
movq %rax, (%rsp)
movl %r14d, %esi
movl %r15d, %edx
movl %ebp, %ecx
callq 0x5a266
testl %ebp, %ebp
jle 0x11fae8
subl %r14d, %ebx
shll $0x4, %ebx
movslq %ebx, %rax
xorl %ecx, %ecx
testl %r15d, %r15d
jle 0x11fae0
movq 0x60(%rsp), %rdx
imulq %rcx, %rdx
imulq 0x30(%rsp), %rdx
addq 0x20(%rsp), %rdx
movq 0x40(%r13), %rsi
imulq %rcx, %rsi
imulq 0x10(%r13), %rsi
addq (%r13), %rsi
xorl %edi, %edi
movl %r14d, %r8d
testl %r14d, %r14d
jle 0x11fad5
vmovaps (%rsi), %ymm0
vmovaps %ymm0, (%rdx)
addq $0x40, %rsi
addq $0x20, %rdx
decl %r8d
jne 0x11fac0
leaq (%rsi,%rax,4), %rsi
incl %edi
cmpl %r15d, %edi
jne 0x11fab8
incq %rcx
cmpq %rbp, %rcx
jne 0x11fa8c
leaq 0x20(%rsp), %rdi
movq %r12, %rsi
movq 0x8(%rsp), %rdx
movq 0x10(%rsp), %rcx
movq 0x18(%rsp), %r8
vzeroupper
callq 0x11f911
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x11fb3a
lock
decl (%rax)
jne 0x11fb3a
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x11fb2d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x11fb3a
testq %rsi, %rsi
je 0x11fb3a
movq %rsi, %rdi
callq 0x563b0
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x11fb89
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x11fb81
lock
decl (%rax)
jne 0x11fb81
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x11fb7b
testq %rsi, %rsi
je 0x11fb81
movq %rsi, %rdi
callq 0x563b0
jmp 0x11fb81
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/convolution_1x1_pack8to4.h
|
virtual thunk to ncnn::Crop_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Crop_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 16 == 0 ? 16 : _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 16 == 0 ? 16 : _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Crop::forward(bottom_blob_unpacked, top_blob, opt);
}
|
movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x18258c
|
/ysh329[P]ncnn/src/layer/x86/crop_x86.cpp
|
ncnn::Crop_x86_avx512::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Crop_x86_avx512::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 16 == 0 ? 16 : _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 16 == 0 ? 16 : _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Crop::forward(bottom_blob_unpacked, top_blob, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x148, %rsp # imm = 0x148
movq %rcx, 0x78(%rsp)
movq %rdx, 0x8(%rsp)
movq %rsi, %r12
movq %rdi, 0x140(%rsp)
movl 0x2c(%rsi), %r14d
movslq %r14d, %rax
movl 0x30(%rsi), %r15d
movslq %r15d, %rcx
movl 0x34(%rsi), %edx
movl %edx, 0xf4(%rsp)
movslq %edx, %rdx
movl 0x38(%rsi), %esi
movq %rsi, 0xe0(%rsp)
movl 0x28(%r12), %ebp
movq 0x10(%r12), %rsi
movl 0x18(%r12), %edi
cmpl $0x4, %edi
je 0x185696
cmpl $0x8, %edi
je 0x185610
cmpl $0x10, %edi
jne 0x186e9f
movl %edi, 0xec(%rsp)
movq %rsi, 0xd8(%rsp)
movq 0x140(%rsp), %r8
movq (%r8), %rsi
movq -0x18(%rsi), %rdi
addq %r8, %rdi
decl %ebp
cmpl $0x3, %ebp
ja 0x185b9d
leaq 0x294277(%rip), %rsi # 0x419834
movslq (%rsi,%rbp,4), %r8
addq %rsi, %r8
jmpq *%r8
shlq $0x4, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %ecx
movl %ecx, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x50(%rsp)
movl %ecx, 0x58(%rsp)
jmp 0x1859ae
movl %edi, 0xec(%rsp)
movq %rsi, 0xd8(%rsp)
movq 0x140(%rsp), %r8
movq (%r8), %rsi
movq -0x18(%rsi), %rdi
addq %r8, %rdi
decl %ebp
cmpl $0x3, %ebp
ja 0x185a63
leaq 0x2941d1(%rip), %rsi # 0x419814
movslq (%rsi,%rbp,4), %r8
addq %rsi, %r8
jmpq *%r8
shlq $0x3, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %ecx
movl %ecx, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x50(%rsp)
movl %ecx, 0x58(%rsp)
jmp 0x1857d6
movl %edi, 0xec(%rsp)
movq %rsi, 0xd8(%rsp)
movq 0x140(%rsp), %r8
movq (%r8), %rsi
movq -0x18(%rsi), %rdi
addq %r8, %rdi
decl %ebp
cmpl $0x3, %ebp
ja 0x185b00
leaq 0x29412b(%rip), %rsi # 0x4197f4
movslq (%rsi,%rbp,4), %r8
addq %rsi, %r8
jmpq *%r8
shlq $0x2, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %ecx
movl %ecx, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x50(%rsp)
movl %ecx, 0x58(%rsp)
jmp 0x185942
movq 0xe0(%rsp), %rdx
leal (,%rdx,8), %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %esi
movl %esi, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x3, 0x48(%rsp)
movl %r14d, 0x4c(%rsp)
movl %r15d, 0x50(%rsp)
movl %esi, 0x54(%rsp)
movl %edx, 0x58(%rsp)
imulq %rax, %rcx
jmp 0x1857c5
movq 0xe0(%rsp), %rsi
leal (,%rsi,8), %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x4, 0x48(%rsp)
movl %r14d, 0x4c(%rsp)
movl %r15d, 0x50(%rsp)
movl %edx, 0x54(%rsp)
movl %esi, 0x58(%rsp)
imulq %rax, %rcx
imulq %rdx, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x60(%rsp)
jmp 0x185a88
movq 0xe0(%rsp), %rdx
leal (,%rdx,4), %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %esi
movl %esi, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x3, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movl %esi, 0x54(%rsp)
movl %edx, 0x58(%rsp)
imulq %rax, %rcx
jmp 0x185931
movq 0xe0(%rsp), %rdx
shll $0x4, %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %esi
movl %esi, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x3, 0x48(%rsp)
movl %r14d, 0x4c(%rsp)
movl %r15d, 0x50(%rsp)
movl %esi, 0x54(%rsp)
movl %edx, 0x58(%rsp)
imulq %rax, %rcx
jmp 0x18599d
shlq $0x3, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x2, 0x48(%rsp)
movl %r14d, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x54(%rsp)
imulq %rax, %rcx
movq %rcx, 0x60(%rsp)
jmp 0x185a88
movq 0xe0(%rsp), %rsi
leal (,%rsi,4), %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x4, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movl %edx, 0x54(%rsp)
movl %esi, 0x58(%rsp)
imulq %rax, %rcx
imulq %rdx, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x60(%rsp)
jmp 0x185b25
movq 0xe0(%rsp), %rsi
shll $0x4, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x4, 0x48(%rsp)
movl %r14d, 0x4c(%rsp)
movl %r15d, 0x50(%rsp)
movl %edx, 0x54(%rsp)
movl %esi, 0x58(%rsp)
imulq %rax, %rcx
imulq %rdx, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x60(%rsp)
jmp 0x185bc2
shlq $0x2, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x2, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x54(%rsp)
imulq %rax, %rcx
movq %rcx, 0x60(%rsp)
jmp 0x185b25
shlq $0x4, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x2, 0x48(%rsp)
movl %r14d, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x54(%rsp)
imulq %rax, %rcx
movq %rcx, 0x60(%rsp)
jmp 0x185bc2
movq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
leaq 0x6c(%rsp), %rax
leaq 0x70(%rsp), %r10
leaq 0x18(%rsp), %r11
leaq 0x14(%rsp), %r13
leaq 0x20(%rsp), %rsi
leaq 0x1c(%rsp), %rdx
leaq 0x74(%rsp), %rcx
leaq 0xf0(%rsp), %r8
leaq 0xd4(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r13
callq 0x180bfc
addq $0x20, %rsp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x185c47
lock
decl (%rax)
jne 0x185c47
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x185c3a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x185c47
movq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
leaq 0x6c(%rsp), %rax
leaq 0x70(%rsp), %r10
leaq 0x18(%rsp), %r11
leaq 0x14(%rsp), %r13
leaq 0x20(%rsp), %rsi
leaq 0x1c(%rsp), %rdx
leaq 0x74(%rsp), %rcx
leaq 0xf0(%rsp), %r8
leaq 0xd4(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r13
callq 0x180bfc
addq $0x20, %rsp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18625a
lock
decl (%rax)
jne 0x18625a
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18624d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18625a
movq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
leaq 0x6c(%rsp), %rax
leaq 0x70(%rsp), %r10
leaq 0x18(%rsp), %r11
leaq 0x14(%rsp), %r13
leaq 0x20(%rsp), %rsi
leaq 0x1c(%rsp), %rdx
leaq 0x74(%rsp), %rcx
leaq 0xf0(%rsp), %r8
leaq 0xd4(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r13
callq 0x180bfc
addq $0x20, %rsp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1867e3
lock
decl (%rax)
jne 0x1867e3
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1867d6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1867e3
testq %rsi, %rsi
je 0x185c47
movq %rsi, %rdi
callq 0x563b0
cmpl $0x3, %ebp
ja 0x186e98
leaq 0x293bcd(%rip), %rax # 0x419824
movslq (%rax,%rbp,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movl 0x14(%rsp), %esi
movl %esi, %edi
andl $0x7, %edi
xorl %eax, %eax
testb $0x3, %sil
sete %al
movq 0xd8(%rsp), %r9
shrq $0x3, %r9
leal (%rax,%rax), %ecx
testl %edi, %edi
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movzbl %cl, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r9
movl %esi, %eax
cltd
idivl %r8d
xorl %eax, %r14d
orl %edi, %r14d
jne 0x1868fa
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x185ccb
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xd8(%rsp), %r13
shrq $0x3, %r13
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r13
movl 0x14(%rsp), %esi
xorl %esi, %r14d
movl 0x18(%rsp), %r8d
xorl %r8d, %r15d
orl %r14d, %r15d
sete %r9b
jne 0x185f8b
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %ecx
cmovnel %eax, %ecx
movl %edi, %eax
cltd
idivl %ecx
testb $0x7, %dil
jne 0x185f8b
cmpl 0xe0(%rsp), %eax
jne 0x185f8b
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x185d9f
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xd8(%rsp), %r13
shrq $0x3, %r13
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r13
movl 0x14(%rsp), %esi
xorl %esi, %r14d
movl 0x18(%rsp), %r9d
xorl %r9d, %r15d
orl %r14d, %r15d
movl 0x70(%rsp), %ecx
movl 0xf4(%rsp), %eax
xorl %ecx, %eax
orl %r15d, %eax
sete %r10b
jne 0x1860aa
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movl %edi, %eax
cltd
idivl %r8d
testb $0x7, %dil
jne 0x1860aa
cmpl 0xe0(%rsp), %eax
jne 0x1860aa
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x185e86
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0x18(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xd8(%rsp), %rdx
shrq $0x3, %rdx
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %rdx
movq %rdx, %rcx
movl 0x14(%rsp), %esi
cmpl %r14d, %esi
jne 0x1861cd
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movl %edi, %eax
cltd
idivl %r8d
testb $0x7, %dil
jne 0x1861cd
cmpl %r15d, %eax
jne 0x1861cd
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x185f4d
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0xd4(%rsp), %eax
movl %eax, %ecx
orl %edi, %ecx
testb $0x7, %cl
jne 0x186e98
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
leal 0x7(%rdi), %ecx
testl %edi, %edi
cmovnsl %edi, %ecx
sarl $0x3, %ecx
movslq %edx, %rax
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r10
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %rdi, 0x30(%rsp)
movl %edx, 0x38(%rsp)
movq %r10, 0x40(%rsp)
movl %ecx, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r10
imulq %rdx, %r10
imulq %rdi, %rax
imulq %r10, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %r9b, %r9b
movq 0x78(%rsp), %r14
je 0x187e2f
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x187db8
testq %rax, %rax
je 0x18606f
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187d58
lock
decl (%rax)
jne 0x187d58
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x187d4b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x187d58
movl 0xd4(%rsp), %eax
movl %eax, %edx
orl %edi, %edx
testb $0x7, %dl
jne 0x186e98
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
leal 0x7(%rdi), %r8d
testl %edi, %edi
cmovnsl %edi, %r8d
sarl $0x3, %r8d
movslq %edx, %rax
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r11
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %rdi, 0x30(%rsp)
movl %edx, 0x38(%rsp)
movq %r11, 0x40(%rsp)
movl %r8d, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r11
imulq %rdx, %r11
imulq %rdi, %rax
imulq %r11, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %r10b, %r10b
je 0x186f9a
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x18702a
testq %rax, %rax
movq 0x78(%rsp), %r14
je 0x186192
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x188079
lock
decl (%rax)
jne 0x188079
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18806c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x188079
movl 0x74(%rsp), %eax
orl %edi, %eax
testb $0x7, %al
jne 0x186e98
leal 0x7(%rdi), %edx
testl %edi, %edi
cmovnsl %edi, %edx
sarl $0x3, %edx
movq 0x78(%rsp), %rax
movq 0x8(%rax), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl $0x8, %r8d
callq 0x5a14a
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x1870a8
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1870a8
movl 0x74(%rsp), %eax
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
movl 0x1c(%rsp), %ecx
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x188901
jmp 0x186e72
testq %rsi, %rsi
je 0x18625a
movq %rsi, %rdi
callq 0x563b0
cmpl $0x3, %ebp
ja 0x186e98
leaq 0x29359a(%rip), %rax # 0x419804
movslq (%rax,%rbp,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movl 0x14(%rsp), %esi
xorl %ecx, %ecx
movl %esi, %edi
andl $0x3, %edi
sete %cl
leal (%rcx,%rcx,2), %r9d
incl %r9d
movq 0xd8(%rsp), %r10
shrq $0x2, %r10
addb %cl, %cl
shlq %cl, %r10
movl %esi, %eax
cltd
idivl %r9d
xorl %eax, %r14d
orl %edi, %r14d
jne 0x186973
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x1862c5
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %cl
movq 0xd8(%rsp), %r13
shrq $0x2, %r13
addb %cl, %cl
shlq %cl, %r13
movl 0x14(%rsp), %esi
xorl %esi, %r14d
movl 0x18(%rsp), %r8d
xorl %r8d, %r15d
orl %r14d, %r15d
sete %cl
jne 0x186538
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %r9d
incl %r9d
movl %edi, %eax
cltd
idivl %r9d
testb $0x3, %dil
jne 0x186538
cmpl 0xe0(%rsp), %eax
jne 0x186538
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x186381
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %cl
movq 0xd8(%rsp), %r13
shrq $0x2, %r13
addb %cl, %cl
shlq %cl, %r13
movl 0x14(%rsp), %esi
xorl %esi, %r14d
movl 0x18(%rsp), %r9d
xorl %r9d, %r15d
orl %r14d, %r15d
movl 0x70(%rsp), %ecx
movl 0xf4(%rsp), %eax
xorl %ecx, %eax
orl %r15d, %eax
sete %r8b
jne 0x186648
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %r10d
incl %r10d
movl %edi, %eax
cltd
idivl %r10d
testb $0x3, %dil
jne 0x186648
cmpl 0xe0(%rsp), %eax
jne 0x186648
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18644e
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0x18(%rsp), %r8d
testb $0x3, %r8b
sete %cl
movq 0xd8(%rsp), %rax
shrq $0x2, %rax
addb %cl, %cl
shlq %cl, %rax
movq %rax, %rcx
movl 0x14(%rsp), %esi
cmpl %r14d, %esi
jne 0x186759
xorl %eax, %eax
testb $0x3, %r8b
sete %al
leal (%rax,%rax,2), %edi
incl %edi
movl %r8d, %eax
cltd
idivl %edi
testb $0x3, %r8b
jne 0x186759
cmpl %r15d, %eax
jne 0x186759
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x1864fa
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0xd4(%rsp), %eax
movl %eax, %edx
orl %edi, %edx
testb $0x3, %dl
jne 0x186e98
sarl $0x2, %eax
movl %edi, %edx
sarl $0x2, %edx
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %r9
imulq %r9, %rax
addq (%r12), %rax
movl 0x18(%r12), %r10d
movq 0x20(%r12), %r11
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %r9, 0x30(%rsp)
movl %r10d, 0x38(%rsp)
movq %r11, 0x40(%rsp)
movl %edx, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r10
imulq %rdx, %r10
imulq %r9, %rax
imulq %r10, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r9
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %cl, %cl
movq 0x78(%rsp), %r14
je 0x187195
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x187129
testq %rax, %rax
je 0x18660d
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x1870c9
lock
decl (%rax)
jne 0x1870c9
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x1870bc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1870c9
movl 0xd4(%rsp), %eax
movl %eax, %edx
orl %edi, %edx
testb $0x3, %dl
jne 0x186e98
sarl $0x2, %eax
movl %edi, %edx
sarl $0x2, %edx
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %r10
imulq %r10, %rax
addq (%r12), %rax
movl 0x18(%r12), %r11d
movq 0x20(%r12), %r14
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movl %r11d, 0x38(%rsp)
movq %r14, 0x40(%rsp)
movl %edx, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r11
imulq %rdx, %r11
imulq %r10, %rax
imulq %r11, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r10
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %r8b, %r8b
je 0x186e79
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x187016
testq %rax, %rax
movq 0x78(%rsp), %r14
je 0x18671e
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18771b
lock
decl (%rax)
jne 0x18771b
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18770e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18771b
movl 0x74(%rsp), %eax
orl %r8d, %eax
testb $0x3, %al
jne 0x186e98
sarl $0x2, %r8d
movq 0x78(%rsp), %rax
movq 0x8(%rax), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r8d, %edx
movl $0x4, %r8d
callq 0x5a14a
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x1870a8
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1870a8
movl 0x74(%rsp), %eax
leal 0x3(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x2, %edx
movl 0x1c(%rsp), %ecx
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x188962
jmp 0x186e72
testq %rsi, %rsi
je 0x1867e3
movq %rsi, %rdi
callq 0x563b0
cmpl $0x3, %ebp
ja 0x186e98
leaq 0x293051(%rip), %rax # 0x419844
movslq (%rax,%rbp,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movl 0x14(%rsp), %esi
testb $0xf, %sil
jne 0x186e98
sarl $0x4, %esi
cmpl %r14d, %esi
jne 0x186e8d
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x186830
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0x6c(%rsp), %edi
testb $0xf, %dil
je 0x1869ea
testb $0x7, %dil
je 0x186a0e
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %r13d
incl %r13d
jmp 0x186a14
movl 0x6c(%rsp), %edi
testb $0xf, %dil
je 0x1869f2
testb $0x7, %dil
movl 0xf4(%rsp), %edx
je 0x186bb5
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %r13d
incl %r13d
jmp 0x186bbb
movl 0x18(%rsp), %edi
testb $0xf, %dil
je 0x186a04
testb $0x7, %dil
je 0x186d6b
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %ecx
incl %ecx
jmp 0x186d70
orl 0x1c(%rsp), %esi
testb $0x7, %sil
jne 0x186e98
movq 0x78(%rsp), %rcx
movq 0x8(%rcx), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %r9, %rdx
movl $0x8, %ecx
callq 0x5a03c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x1870a8
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1870a8
movl 0x1c(%rsp), %eax
leal 0x7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x3, %ecx
xorl %ebp, %ebp
movq %r12, %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq 0x188901
jmp 0x1870a8
orl 0x1c(%rsp), %esi
testb $0x3, %sil
jne 0x186e98
movq 0x78(%rsp), %rcx
movq 0x8(%rcx), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %r10, %rdx
movl %r9d, %ecx
callq 0x5a03c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x1870a8
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1870a8
movl 0x1c(%rsp), %eax
leal 0x3(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x2, %ecx
xorl %ebp, %ebp
movq %r12, %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq 0x188962
jmp 0x1870a8
movl $0x10, %r13d
jmp 0x186a14
movl $0x10, %r13d
movl 0xf4(%rsp), %edx
jmp 0x186bbb
movl $0x10, %ecx
jmp 0x186d70
movl $0x8, %r13d
movl 0x14(%rsp), %esi
xorl %esi, %r14d
movl 0x18(%rsp), %r9d
xorl %r9d, %r15d
orl %r14d, %r15d
sete %r8b
jne 0x186a99
movl %edi, %eax
cltd
idivl %r13d
cmpl $0x10, %r13d
jne 0x186a99
cmpl 0xe0(%rsp), %eax
jne 0x186a99
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x186a5b
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0xd4(%rsp), %eax
movl %eax, %ecx
andl $0xf, %ecx
movl %r13d, %edx
xorl $0x10, %edx
orl %ecx, %edx
jne 0x186e98
sarl $0x4, %eax
leal 0xf(%rdi), %ecx
testl %edi, %edi
cmovnsl %edi, %ecx
sarl $0x4, %ecx
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r10
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %rdi, 0x30(%rsp)
movl %edx, 0x38(%rsp)
movq %r10, 0x40(%rsp)
movl %ecx, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r10
imulq %rdx, %r10
imulq %rdi, %rax
imulq %r10, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %r8b, %r8b
movq 0x78(%rsp), %r14
je 0x1874c1
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x18744a
testq %rax, %rax
je 0x186b7a
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x1873ea
lock
decl (%rax)
jne 0x1873ea
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x1873dd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1873ea
movl $0x8, %r13d
movl 0x14(%rsp), %esi
xorl %esi, %r14d
movl 0x18(%rsp), %r10d
xorl %r10d, %r15d
orl %r14d, %r15d
movl 0x70(%rsp), %ecx
xorl %ecx, %edx
orl %r15d, %edx
sete %r9b
jne 0x186c49
movl %edi, %eax
cltd
idivl %r13d
cmpl $0x10, %r13d
jne 0x186c49
cmpl 0xe0(%rsp), %eax
jne 0x186c49
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x186c0b
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x186dd2
testq %rsi, %rsi
je 0x187041
jmp 0x187039
movl 0xd4(%rsp), %eax
movl %eax, %edx
andl $0xf, %edx
movl %r13d, %r8d
xorl $0x10, %r8d
orl %edx, %r8d
jne 0x186e98
sarl $0x4, %eax
leal 0xf(%rdi), %r8d
testl %edi, %edi
cmovnsl %edi, %r8d
sarl $0x4, %r8d
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r11
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %rdi, 0x30(%rsp)
movl %edx, 0x38(%rsp)
movq %r11, 0x40(%rsp)
movl %r8d, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r11
imulq %rdx, %r11
imulq %rdi, %rax
imulq %r11, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %r9b, %r9b
je 0x186e83
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x187020
testq %rax, %rax
movq 0x78(%rsp), %r14
je 0x186d30
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187a32
lock
decl (%rax)
jne 0x187a32
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x187a25
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x187a32
movl $0x8, %ecx
movl 0x14(%rsp), %esi
cmpl %r14d, %esi
jne 0x186ddd
movl %edi, %eax
cltd
idivl %ecx
cmpl $0x10, %ecx
jne 0x186ddd
cmpl %r15d, %eax
jne 0x186ddd
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x1870a8
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x186da2
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x187041
lock
decl (%rax)
jne 0x187041
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x187034
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x187041
cmpl $0x10, %ecx
jne 0x186e98
movl 0x74(%rsp), %eax
andl $0xf, %eax
jne 0x186e98
movq 0xd8(%rsp), %rax
shrq $0x4, %rax
movl %ecx, %ecx
imulq %rax, %rcx
leal 0xf(%rdi), %edx
testl %edi, %edi
cmovnsl %edi, %edx
sarl $0x4, %edx
movq 0x78(%rsp), %rax
movq 0x8(%rax), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl $0x10, %r8d
callq 0x5a14a
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x1870a8
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1870a8
movl 0x74(%rsp), %eax
leal 0xf(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x4, %edx
movl 0x1c(%rsp), %ecx
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x18889c
xorl %ebp, %ebp
jmp 0x1870a8
movq 0x78(%rsp), %r14
jmp 0x1877eb
movq 0x78(%rsp), %r14
jmp 0x187b10
testb $0xf, 0x1c(%rsp)
je 0x186fa4
movl 0xec(%rsp), %edi
movq 0x8(%r12), %rax
vmovups (%r12), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq 0x10(%r12), %rcx
movq %rcx, 0x30(%rsp)
movl 0x18(%r12), %ecx
movl %ecx, 0x38(%rsp)
movq 0x20(%r12), %rcx
movq %rcx, 0x40(%rsp)
vmovups 0x28(%r12), %xmm0
vmovups %xmm0, 0x48(%rsp)
movl 0x38(%r12), %ecx
movl %ecx, 0x58(%rsp)
movq 0x40(%r12), %rcx
movq %rcx, 0x60(%rsp)
testq %rax, %rax
je 0x186ef5
lock
incl (%rax)
cmpl $0x1, %edi
movq 0x78(%rsp), %r14
je 0x186f30
vmovups (%r14), %zmm0
leaq 0x80(%rsp), %rcx
vmovups %zmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
movl $0x1, %edx
vzeroupper
callq 0x5c97e
movq 0x140(%rsp), %rdi
movq (%rdi), %rax
addq -0x18(%rax), %rdi
leaq 0x20(%rsp), %rsi
movq 0x8(%rsp), %rdx
movq %r14, %rcx
callq 0x17fb2e
movl %eax, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1870a8
lock
decl (%rax)
jne 0x1870a8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x186f84
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1870a8
testq %rsi, %rsi
je 0x1870a8
movq %rsi, %rdi
callq 0x563b0
jmp 0x1870a8
movq 0x78(%rsp), %r14
jmp 0x188157
movq 0xd8(%rsp), %rdx
andq $-0x10, %rdx
movq 0x78(%rsp), %rax
movq 0x8(%rax), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl $0x10, %ecx
callq 0x5a03c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x1870a8
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1870a8
movl 0x1c(%rsp), %eax
leal 0xf(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x4, %ecx
xorl %ebp, %ebp
movq %r12, %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq 0x18889c
jmp 0x1870a8
movq 0x78(%rsp), %r14
jmp 0x18777b
movq 0x78(%rsp), %r14
jmp 0x187a92
movq 0x78(%rsp), %r14
jmp 0x1880d9
testq %rsi, %rsi
je 0x187041
movq %rsi, %rdi
callq 0x563b0
movq 0x8(%rsp), %rcx
movq $0x0, 0x40(%rcx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rcx)
vmovups %xmm0, 0xc(%rcx)
vmovups %xmm0, 0x28(%rcx)
movl $0x0, 0x38(%rcx)
vmovups (%r12), %xmm0
vmovups %xmm0, (%rcx)
movq 0x10(%r12), %rax
movq %rax, 0x10(%rcx)
movl 0x18(%r12), %eax
movl %eax, 0x18(%rcx)
movq 0x20(%r12), %rax
movq %rax, 0x20(%rcx)
vmovups 0x28(%r12), %xmm0
vmovups %xmm0, 0x28(%rcx)
movl 0x38(%r12), %eax
movl %eax, 0x38(%rcx)
movq 0x40(%r12), %rax
movq %rax, 0x40(%rcx)
movl %ebp, %eax
addq $0x148, %rsp # imm = 0x148
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
testq %rsi, %rsi
je 0x1870c9
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18715d
lock
decl (%rax)
jne 0x18715d
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x187150
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18715d
testq %rsi, %rsi
je 0x18715d
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18739e
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18739e
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r8d
movl 0x6c(%rsp), %edi
leal 0x3(%rdi), %ecx
testl %edi, %edi
cmovnsl %edi, %ecx
sarl $0x2, %ecx
subq $0x8, %rsp
movq 0x10(%rsp), %rdi
movl %r8d, %edx
movq %r13, %r8
movl $0x4, %r9d
pushq 0x8(%r14)
callq 0x5a266
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18739e
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18739e
testl %eax, %eax
jle 0x18739c
xorl %r12d, %r12d
leaq 0x80(%rsp), %r14
leaq 0xf8(%rsp), %r15
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movl 0x54(%rsp), %edx
movq 0x60(%rsp), %rsi
imulq %r12, %rsi
movq 0x30(%rsp), %rdi
imulq %rdi, %rsi
addq 0x20(%rsp), %rsi
movl 0x38(%rsp), %r8d
movq 0x40(%rsp), %r9
movq %rsi, 0x80(%rsp)
movq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %r8d, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %edx, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x48(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0xa8(%rsp)
cmpl $0x4, %eax
jne 0x1872c1
movq %rcx, 0xc0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movl 0x34(%r10), %edx
movq 0x40(%r10), %rsi
imulq %r12, %rsi
movq 0x10(%r10), %rdi
imulq %rdi, %rsi
addq (%r10), %rsi
movl 0x18(%r10), %r8d
movq 0x20(%r10), %r9
movq %rsi, 0xf8(%rsp)
movq $0x0, 0x100(%rsp)
movq %rdi, 0x108(%rsp)
movl %r8d, 0x110(%rsp)
movq %r9, 0x118(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movl $0x1, 0x12c(%rsp)
movl %edx, 0x130(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x138(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x120(%rsp)
cmpl $0x4, %eax
jne 0x187374
movq %rcx, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %r14, %rdi
movq %r15, %rsi
callq 0x188962
movq 0x8(%rsp), %rax
movslq 0x38(%rax), %rax
incq %r12
cmpq %rax, %r12
jl 0x18720b
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1870a8
lock
decl (%rax)
jne 0x1870a8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x1873cf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1870a8
testq %rsi, %rsi
je 0x1870a8
jmp 0x186f8d
testq %rsi, %rsi
je 0x1873ea
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18747e
lock
decl (%rax)
jne 0x18747e
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x187471
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18747e
testq %rsi, %rsi
je 0x18747e
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x1876cf
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1876cf
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r9d
movl 0x6c(%rsp), %eax
leal 0xf(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x4, %ecx
movq 0xd8(%rsp), %rax
shrq $0x4, %rax
movl %r13d, %r8d
imulq %rax, %r8
subq $0x8, %rsp
movq 0x10(%rsp), %rdi
movl %r9d, %edx
movl $0x10, %r9d
pushq 0x8(%r14)
callq 0x5a266
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x1876cf
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x1876cf
testl %eax, %eax
jle 0x1876cd
xorl %r12d, %r12d
leaq 0x80(%rsp), %r14
leaq 0xf8(%rsp), %r15
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movl 0x54(%rsp), %edx
movq 0x60(%rsp), %rsi
imulq %r12, %rsi
movq 0x30(%rsp), %rdi
imulq %rdi, %rsi
addq 0x20(%rsp), %rsi
movl 0x38(%rsp), %r8d
movq 0x40(%rsp), %r9
movq %rsi, 0x80(%rsp)
movq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %r8d, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %edx, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x48(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0xa8(%rsp)
cmpl $0x4, %eax
jne 0x1875f2
movq %rcx, 0xc0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movl 0x34(%r10), %edx
movq 0x40(%r10), %rsi
imulq %r12, %rsi
movq 0x10(%r10), %rdi
imulq %rdi, %rsi
addq (%r10), %rsi
movl 0x18(%r10), %r8d
movq 0x20(%r10), %r9
movq %rsi, 0xf8(%rsp)
movq $0x0, 0x100(%rsp)
movq %rdi, 0x108(%rsp)
movl %r8d, 0x110(%rsp)
movq %r9, 0x118(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movl $0x1, 0x12c(%rsp)
movl %edx, 0x130(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x138(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x120(%rsp)
cmpl $0x4, %eax
jne 0x1876a5
movq %rcx, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %r14, %rdi
movq %r15, %rsi
callq 0x18889c
movq 0x8(%rsp), %rax
movslq 0x38(%rax), %rax
incq %r12
cmpq %rax, %r12
jl 0x18753c
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1870a8
lock
decl (%rax)
jne 0x1870a8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x187700
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1870a8
testq %rsi, %rsi
je 0x1870a8
jmp 0x186f8d
testq %rsi, %rsi
je 0x18771b
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x1877af
lock
decl (%rax)
jne 0x1877af
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x1877a2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1877af
testq %rsi, %rsi
je 0x1877af
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x1879e6
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1879e6
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r9d
movl 0x70(%rsp), %ecx
movl 0x6c(%rsp), %edi
leal 0x3(%rdi), %r8d
testl %edi, %edi
cmovnsl %edi, %r8d
sarl $0x2, %r8d
movq 0x8(%rsp), %rdi
movl %r9d, %edx
movq %r13, %r9
pushq 0x8(%r14)
pushq $0x4
callq 0x5a3a6
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x1879e6
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x1879e6
testl %eax, %eax
jle 0x1879e4
movl 0x70(%rsp), %ecx
xorl %r12d, %r12d
movabsq $0x100000001, %r13 # imm = 0x100000001
leaq 0x80(%rsp), %rdi
leaq 0xf8(%rsp), %rbp
xorl %r14d, %r14d
testl %ecx, %ecx
jle 0x1879d5
xorl %r15d, %r15d
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movq 0x60(%rsp), %rdx
imulq %r14, %rdx
movq 0x30(%rsp), %rsi
imulq %rsi, %rdx
addq 0x20(%rsp), %rdx
movl 0x38(%rsp), %r11d
movq 0x40(%rsp), %r8
movq %rcx, %r9
imulq %rax, %r9
movl 0xf0(%rsp), %r10d
addl %r15d, %r10d
movslq %r10d, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x80(%rsp)
movq %r12, 0x88(%rsp)
movq %rsi, 0x90(%rsp)
movl %r11d, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl $0x2, %ebx
movl %ebx, 0xa8(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movq %r13, 0xb4(%rsp)
movq %r9, 0xc0(%rsp)
movq 0x8(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rsi
imulq %rsi, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %r11d
movq 0x20(%r8), %r8
movq %r15, %r9
imulq %rax, %r9
movq %rcx, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0xf8(%rsp)
movq %r12, 0x100(%rsp)
movq %rsi, 0x108(%rsp)
movl %r11d, 0x110(%rsp)
movq %r8, 0x118(%rsp)
movl %ebx, 0x120(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movq %r13, 0x12c(%rsp)
imulq %rax, %rcx
movq %rcx, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %rbp, %rsi
movq %rdi, %rbx
callq 0x188962
movq %rbx, %rdi
xorl %r12d, %r12d
movslq 0x70(%rsp), %rcx
incq %r15
cmpq %rcx, %r15
jl 0x187878
movq 0x8(%rsp), %rax
movl 0x38(%rax), %eax
incq %r14
movslq %eax, %rdx
cmpq %rdx, %r14
jl 0x18786d
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1870a8
lock
decl (%rax)
jne 0x1870a8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x187a17
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1870a8
testq %rsi, %rsi
je 0x1870a8
jmp 0x186f8d
testq %rsi, %rsi
je 0x187a32
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x187ac6
lock
decl (%rax)
jne 0x187ac6
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x187ab9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x187ac6
testq %rsi, %rsi
je 0x187ac6
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x187d0c
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x187d0c
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r10d
movl 0x70(%rsp), %ecx
movl 0x6c(%rsp), %eax
leal 0xf(%rax), %r8d
testl %eax, %eax
cmovnsl %eax, %r8d
sarl $0x4, %r8d
movq 0xd8(%rsp), %rax
shrq $0x4, %rax
movl %r13d, %r9d
imulq %rax, %r9
movq 0x8(%rsp), %rdi
movl %r10d, %edx
pushq 0x8(%r14)
pushq $0x10
callq 0x5a3a6
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x187d0c
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x187d0c
testl %eax, %eax
jle 0x187d0a
movl 0x70(%rsp), %ecx
xorl %r12d, %r12d
movl $0x2, %ebp
movabsq $0x100000001, %r13 # imm = 0x100000001
leaq 0x80(%rsp), %rdi
leaq 0xf8(%rsp), %rsi
xorl %r14d, %r14d
testl %ecx, %ecx
jle 0x187cfb
xorl %r15d, %r15d
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movq 0x60(%rsp), %rdx
imulq %r14, %rdx
movq 0x30(%rsp), %rbx
imulq %rbx, %rdx
addq 0x20(%rsp), %rdx
movl 0x38(%rsp), %r11d
movq 0x40(%rsp), %r8
movq %rcx, %r9
imulq %rax, %r9
movl 0xf0(%rsp), %r10d
addl %r15d, %r10d
movslq %r10d, %r10
imulq %rbx, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x80(%rsp)
movq %r12, 0x88(%rsp)
movq %rbx, 0x90(%rsp)
movl %r11d, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl %ebp, 0xa8(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movq %r13, 0xb4(%rsp)
movq %r9, 0xc0(%rsp)
movq 0x8(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rbx
imulq %rbx, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %r11d
movq 0x20(%r8), %r8
movq %rcx, %r9
imulq %rax, %r9
movq %r15, %r10
imulq %rbx, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0xf8(%rsp)
movq %r12, 0x100(%rsp)
movq %rbx, 0x108(%rsp)
movl %r11d, 0x110(%rsp)
movq %r8, 0x118(%rsp)
movl %ebp, 0x120(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movq %r13, 0x12c(%rsp)
movq %r9, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %rsi, %r12
movq %rdi, %rbx
callq 0x18889c
movq %rbx, %rdi
movq %r12, %rsi
xorl %r12d, %r12d
movslq 0x70(%rsp), %rcx
incq %r15
cmpq %rcx, %r15
jl 0x187ba4
movq 0x8(%rsp), %rax
movl 0x38(%rax), %eax
incq %r14
movslq %eax, %rdx
cmpq %rdx, %r14
jl 0x187b99
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1870a8
lock
decl (%rax)
jne 0x1870a8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x187d3d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1870a8
testq %rsi, %rsi
je 0x1870a8
jmp 0x186f8d
testq %rsi, %rsi
je 0x187d58
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x187dec
lock
decl (%rax)
jne 0x187dec
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x187ddf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x187dec
testq %rsi, %rsi
je 0x187dec
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18802d
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18802d
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r8d
movl 0x6c(%rsp), %eax
leal 0x7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x3, %ecx
subq $0x8, %rsp
movq 0x10(%rsp), %rdi
movl %r8d, %edx
movq %r13, %r8
movl $0x8, %r9d
pushq 0x8(%r14)
callq 0x5a266
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18802d
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18802d
testl %eax, %eax
jle 0x18802b
xorl %r12d, %r12d
leaq 0x80(%rsp), %r14
leaq 0xf8(%rsp), %r15
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movl 0x54(%rsp), %edx
movq 0x60(%rsp), %rsi
imulq %r12, %rsi
movq 0x30(%rsp), %rdi
imulq %rdi, %rsi
addq 0x20(%rsp), %rsi
movl 0x38(%rsp), %r8d
movq 0x40(%rsp), %r9
movq %rsi, 0x80(%rsp)
movq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %r8d, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %edx, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x48(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0xa8(%rsp)
cmpl $0x4, %eax
jne 0x187f50
movq %rcx, 0xc0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movl 0x34(%r10), %edx
movq 0x40(%r10), %rsi
imulq %r12, %rsi
movq 0x10(%r10), %rdi
imulq %rdi, %rsi
addq (%r10), %rsi
movl 0x18(%r10), %r8d
movq 0x20(%r10), %r9
movq %rsi, 0xf8(%rsp)
movq $0x0, 0x100(%rsp)
movq %rdi, 0x108(%rsp)
movl %r8d, 0x110(%rsp)
movq %r9, 0x118(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movl $0x1, 0x12c(%rsp)
movl %edx, 0x130(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x138(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x120(%rsp)
cmpl $0x4, %eax
jne 0x188003
movq %rcx, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %r14, %rdi
movq %r15, %rsi
callq 0x188901
movq 0x8(%rsp), %rax
movslq 0x38(%rax), %rax
incq %r12
cmpq %rax, %r12
jl 0x187e9a
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1870a8
lock
decl (%rax)
jne 0x1870a8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18805e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1870a8
testq %rsi, %rsi
je 0x1870a8
jmp 0x186f8d
testq %rsi, %rsi
je 0x188079
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18810d
lock
decl (%rax)
jne 0x18810d
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x188100
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18810d
testq %rsi, %rsi
je 0x18810d
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x188340
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x188340
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r9d
movl 0x70(%rsp), %ecx
movl 0x6c(%rsp), %eax
leal 0x7(%rax), %r8d
testl %eax, %eax
cmovnsl %eax, %r8d
sarl $0x3, %r8d
movq 0x8(%rsp), %rdi
movl %r9d, %edx
movq %r13, %r9
pushq 0x8(%r14)
pushq $0x8
callq 0x5a3a6
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x188340
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x188340
testl %eax, %eax
jle 0x18833e
movl 0x70(%rsp), %ecx
xorl %r12d, %r12d
movl $0x2, %ebp
movabsq $0x100000001, %r13 # imm = 0x100000001
leaq 0x80(%rsp), %rdi
leaq 0xf8(%rsp), %rbx
xorl %r14d, %r14d
testl %ecx, %ecx
jle 0x18832f
xorl %r15d, %r15d
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movq 0x60(%rsp), %rdx
imulq %r14, %rdx
movq 0x30(%rsp), %rsi
imulq %rsi, %rdx
addq 0x20(%rsp), %rdx
movl 0x38(%rsp), %r11d
movq 0x40(%rsp), %r8
movq %rcx, %r9
imulq %rax, %r9
movl 0xf0(%rsp), %r10d
addl %r15d, %r10d
movslq %r10d, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x80(%rsp)
movq %r12, 0x88(%rsp)
movq %rsi, 0x90(%rsp)
movl %r11d, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl %ebp, 0xa8(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movq %r13, 0xb4(%rsp)
movq %r9, 0xc0(%rsp)
movq 0x8(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rsi
imulq %rsi, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %r11d
movq 0x20(%r8), %r8
movq %rcx, %r9
imulq %rax, %r9
movq %r15, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0xf8(%rsp)
movq %r12, 0x100(%rsp)
movq %rsi, 0x108(%rsp)
movl %r11d, 0x110(%rsp)
movq %r8, 0x118(%rsp)
movl %ebp, 0x120(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movq %r13, 0x12c(%rsp)
movq %r9, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %rbx, %rsi
movq %rdi, %r12
callq 0x188901
movq %r12, %rdi
xorl %r12d, %r12d
movslq 0x70(%rsp), %rcx
incq %r15
cmpq %rcx, %r15
jl 0x1881db
movq 0x8(%rsp), %rax
movl 0x38(%rax), %eax
incq %r14
movslq %eax, %rdx
cmpq %rdx, %r14
jl 0x1881d0
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x1870a8
lock
decl (%rax)
jne 0x1870a8
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x188371
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1870a8
testq %rsi, %rsi
je 0x1870a8
jmp 0x186f8d
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x1885e6
lock
decl (%rax)
jne 0x1885e6
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x1883c7
testq %rsi, %rsi
je 0x1885e6
movq %rsi, %rdi
callq 0x563b0
jmp 0x1885e6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1885e6
jmp 0x188894
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x18862d
lock
decl (%rax)
jne 0x18862d
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x18841f
testq %rsi, %rsi
je 0x18862d
movq %rsi, %rdi
callq 0x563b0
jmp 0x18862d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18862d
jmp 0x188894
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x188674
lock
decl (%rax)
jne 0x188674
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x188477
testq %rsi, %rsi
je 0x188674
movq %rsi, %rdi
callq 0x563b0
jmp 0x188674
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x188674
jmp 0x188894
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x188702
lock
decl (%rax)
jne 0x188702
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x1884cf
testq %rsi, %rsi
je 0x188702
movq %rsi, %rdi
callq 0x563b0
jmp 0x188702
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x188702
jmp 0x188894
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x1886bb
lock
decl (%rax)
jne 0x1886bb
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x188527
testq %rsi, %rsi
je 0x1886bb
movq %rsi, %rdi
callq 0x563b0
jmp 0x1886bb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1886bb
jmp 0x188894
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x188749
lock
decl (%rax)
jne 0x188749
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x18857f
testq %rsi, %rsi
je 0x188749
movq %rsi, %rdi
callq 0x563b0
jmp 0x188749
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x188749
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x1885e3
jmp 0x18862a
jmp 0x188671
jmp 0x1886ff
jmp 0x1886b8
jmp 0x188746
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18861a
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x188661
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1886a8
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1886ef
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x188736
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18877d
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
jmp 0x188894
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x1887d8
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x188818
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18884a
testq %rsi, %rsi
je 0x18888c
jmp 0x18887c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18888c
jmp 0x188894
jmp 0x188856
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18888c
lock
decl (%rax)
jne 0x18888c
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x188886
testq %rsi, %rsi
je 0x18888c
movq %rsi, %rdi
callq 0x563b0
jmp 0x18888c
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_avx512.cpp
|
ncnn::crop_pack16_avx512(ncnn::Mat const&, ncnn::Mat&, int, int)
|
static void crop_pack16_avx512(const Mat& src, Mat& dst, int top, int left)
{
int w = dst.w;
int h = dst.h;
int right = src.w - dst.w - left;
const float* ptr = src.row(top) + left * 16;
float* outptr = dst;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
__m512 _p = _mm512_loadu_ps(ptr);
_mm512_storeu_ps(outptr, _p);
ptr += 16;
outptr += 16;
}
ptr += (left + right) * 16;
}
}
|
movl 0x30(%rsi), %eax
testl %eax, %eax
jle 0x1888fd
movl 0x2c(%rsi), %r8d
movslq %r8d, %r10
movq (%rsi), %rsi
movslq 0x2c(%rdi), %r9
movslq %edx, %rdx
imulq %r9, %rdx
imulq 0x10(%rdi), %rdx
addq (%rdi), %rdx
shll $0x4, %ecx
movslq %ecx, %rcx
leaq (%rdx,%rcx,4), %rcx
subq %r10, %r9
shlq $0x4, %r9
xorl %edx, %edx
movl %r8d, %edi
testl %r8d, %r8d
jle 0x1888f3
vmovups (%rcx), %zmm0
vmovups %zmm0, (%rsi)
addq $0x40, %rcx
addq $0x40, %rsi
decl %edi
jne 0x1888db
leaq (%rcx,%r9,4), %rcx
incl %edx
cmpl %eax, %edx
jne 0x1888d3
vzeroupper
retq
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_avx512.cpp
|
ncnn::crop_pack8_avx(ncnn::Mat const&, ncnn::Mat&, int, int)
|
static void crop_pack8_avx(const Mat& src, Mat& dst, int top, int left)
{
int w = dst.w;
int h = dst.h;
int right = src.w - dst.w - left;
const float* ptr = src.row(top) + left * 8;
float* outptr = dst;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
__m256 _p = _mm256_loadu_ps(ptr);
_mm256_storeu_ps(outptr, _p);
ptr += 8;
outptr += 8;
}
ptr += (left + right) * 8;
}
}
|
movl 0x30(%rsi), %eax
testl %eax, %eax
jle 0x18895e
movl 0x2c(%rsi), %r8d
movslq %r8d, %r10
movq (%rsi), %rsi
movslq 0x2c(%rdi), %r9
movslq %edx, %rdx
imulq %r9, %rdx
imulq 0x10(%rdi), %rdx
addq (%rdi), %rdx
shll $0x3, %ecx
movslq %ecx, %rcx
leaq (%rdx,%rcx,4), %rcx
subq %r10, %r9
shlq $0x3, %r9
xorl %edx, %edx
movl %r8d, %edi
testl %r8d, %r8d
jle 0x188954
vmovups (%rcx), %ymm0
vmovups %ymm0, (%rsi)
addq $0x20, %rcx
addq $0x20, %rsi
decl %edi
jne 0x188940
leaq (%rcx,%r9,4), %rcx
incl %edx
cmpl %eax, %edx
jne 0x188938
vzeroupper
retq
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_avx512.cpp
|
ncnn::Crop_x86_avx512::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const
|
int Crop_x86_avx512::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& reference_blob = bottom_blobs[1];
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int ref_elempack = reference_blob.elempack;
Mat& top_blob = top_blobs[0];
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 16 == 0 ? 16 : _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 16 == 0 ? 16 : _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
Mat reference_blob_unpacked = reference_blob;
if (ref_elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(reference_blob, reference_blob_unpacked, 1, opt_pack1);
}
std::vector<Mat> bottom_blobs_unpacked(2);
bottom_blobs_unpacked[0] = bottom_blob_unpacked;
bottom_blobs_unpacked[1] = reference_blob_unpacked;
return Crop::forward(bottom_blobs_unpacked, top_blobs, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x168, %rsp # imm = 0x168
movq %rcx, 0xd8(%rsp)
movq %rdi, %rbp
movq (%rsi), %r12
leaq 0x48(%r12), %rax
movq %rax, 0x158(%rsp)
movl 0x2c(%r12), %eax
movl %eax, 0x60(%rsp)
cltq
movl 0x30(%r12), %ecx
movl %ecx, 0xbc(%rsp)
movslq %ecx, %rcx
movl 0x34(%r12), %esi
movl %esi, 0x154(%rsp)
movslq %esi, %r9
movl 0x38(%r12), %esi
movq %rsi, 0xd0(%rsp)
movl 0x28(%r12), %r14d
movq 0x10(%r12), %rsi
movl 0x18(%r12), %r8d
movslq 0x60(%r12), %rdi
movq %rdi, 0xf0(%rsp)
movl %r8d, %edi
movq %rdx, 0x160(%rsp)
movq (%rdx), %rdx
movq %rdx, 0x8(%rsp)
cmpl $0x4, %r8d
je 0x188b9d
cmpl $0x8, %edi
je 0x188b0b
cmpl $0x10, %edi
jne 0x18b2c8
movl %edi, 0xfc(%rsp)
movq %rsi, 0xe8(%rsp)
movq (%rbp), %rsi
movq -0x18(%rsi), %rsi
leaq (%rsi,%rbp), %rdi
cmpl $0xffffff17, 0xd0(%rbp,%rsi) # imm = 0xFFFFFF17
jne 0x188cfb
leal -0x1(%r14), %esi
cmpl $0x3, %esi
ja 0x189b8b
leaq 0x290e3b(%rip), %r8 # 0x4198f4
movslq (%r8,%rsi,4), %rsi
addq %r8, %rsi
jmpq *%rsi
shlq $0x4, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %ecx
movl %ecx, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl %ecx, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
jmp 0x189285
movl %edi, 0xfc(%rsp)
movq %rsi, 0xe8(%rsp)
movq (%rbp), %rsi
movq -0x18(%rsi), %rsi
leaq (%rsi,%rbp), %rdi
cmpl $0xffffff17, 0xd0(%rbp,%rsi) # imm = 0xFFFFFF17
jne 0x188c2f
leal -0x1(%r14), %esi
cmpl $0x3, %esi
ja 0x189463
leaq 0x290d69(%rip), %r8 # 0x4198b4
movslq (%r8,%rsi,4), %rsi
addq %r8, %rsi
jmpq *%rsi
shlq $0x3, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %ecx
movl %ecx, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl %ecx, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
jmp 0x188e99
movl %edi, 0xfc(%rsp)
movq %rsi, 0xe8(%rsp)
movq (%rbp), %rsi
movq -0x18(%rsi), %rsi
leaq (%rsi,%rbp), %rdi
cmpl $0xffffff17, 0xd0(%rbp,%rsi) # imm = 0xFFFFFF17
jne 0x188c95
leal -0x1(%r14), %esi
cmpl $0x3, %esi
ja 0x1897f7
leaq 0x290c97(%rip), %r8 # 0x419874
movslq (%r8,%rsi,4), %rsi
addq %r8, %rsi
jmpq *%rsi
shlq $0x2, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %ecx
movl %ecx, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl %ecx, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
jmp 0x18919e
leal -0x1(%r14), %esi
cmpl $0x3, %esi
ja 0x18951a
leaq 0x290c51(%rip), %r8 # 0x419894
movslq (%r8,%rsi,4), %rsi
addq %r8, %rsi
jmpq *%rsi
shlq $0x3, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %ecx
movl %ecx, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl %ecx, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
jmp 0x188f68
leal -0x1(%r14), %esi
cmpl $0x3, %esi
ja 0x1898ae
leaq 0x290bab(%rip), %r8 # 0x419854
movslq (%r8,%rsi,4), %rsi
addq %r8, %rsi
jmpq *%rsi
shlq $0x2, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %ecx
movl %ecx, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl %ecx, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
jmp 0x18920d
leal -0x1(%r14), %esi
cmpl $0x3, %esi
ja 0x189c42
leaq 0x290bc5(%rip), %r8 # 0x4198d4
movslq (%r8,%rsi,4), %rsi
addq %r8, %rsi
jmpq *%rsi
shlq $0x4, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %ecx
movl %ecx, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl %ecx, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
jmp 0x1893a7
movq 0xd0(%rsp), %rdx
leal (,%rdx,8), %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %esi
movl %esi, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x3, 0x38(%rsp)
movl 0x60(%rsp), %r8d
movl %r8d, 0x3c(%rsp)
movl 0xbc(%rsp), %r8d
movl %r8d, 0x40(%rsp)
movl %esi, 0x44(%rsp)
movl %edx, 0x48(%rsp)
imulq %rax, %rcx
jmp 0x188e88
movq 0xd0(%rsp), %rdx
leal (,%rdx,8), %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %esi
movl %esi, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x3, 0x38(%rsp)
movl 0x60(%rsp), %r8d
movl %r8d, 0x3c(%rsp)
movl 0xbc(%rsp), %r8d
movl %r8d, 0x40(%rsp)
movl %esi, 0x44(%rsp)
movl %edx, 0x48(%rsp)
imulq %rax, %rcx
jmp 0x188f57
movq 0xd0(%rsp), %rdx
leal (,%rdx,8), %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x4, 0x38(%rsp)
movl 0x60(%rsp), %edx
movl %edx, 0x3c(%rsp)
movl 0xbc(%rsp), %edx
movl %edx, 0x40(%rsp)
movl %r9d, 0x44(%rsp)
movl %esi, 0x48(%rsp)
imulq %rax, %rcx
imulq %r9, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x50(%rsp)
jmp 0x189488
movq 0xd0(%rsp), %rdx
leal (,%rdx,4), %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %esi
movl %esi, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x3, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movl %esi, 0x44(%rsp)
movl %edx, 0x48(%rsp)
imulq %rax, %rcx
jmp 0x18918d
movq 0xd0(%rsp), %rdx
leal (,%rdx,8), %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x4, 0x38(%rsp)
movl 0x60(%rsp), %edx
movl %edx, 0x3c(%rsp)
movl 0xbc(%rsp), %edx
movl %edx, 0x40(%rsp)
movl %r9d, 0x44(%rsp)
movl %esi, 0x48(%rsp)
imulq %rax, %rcx
imulq %r9, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x50(%rsp)
jmp 0x18953f
movq 0xd0(%rsp), %rdx
leal (,%rdx,4), %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %esi
movl %esi, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x3, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movl %esi, 0x44(%rsp)
movl %edx, 0x48(%rsp)
imulq %rax, %rcx
jmp 0x1891fc
movq 0xd0(%rsp), %rdx
shll $0x4, %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %esi
movl %esi, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x3, 0x38(%rsp)
movl 0x60(%rsp), %r8d
movl %r8d, 0x3c(%rsp)
movl 0xbc(%rsp), %r8d
movl %r8d, 0x40(%rsp)
movl %esi, 0x44(%rsp)
movl %edx, 0x48(%rsp)
imulq %rax, %rcx
jmp 0x189274
shlq $0x3, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x2, 0x38(%rsp)
movl 0x60(%rsp), %edx
movl %edx, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x44(%rsp)
imulq %rax, %rcx
movq %rcx, 0x50(%rsp)
jmp 0x189488
shlq $0x3, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x2, 0x38(%rsp)
movl 0x60(%rsp), %edx
movl %edx, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x44(%rsp)
imulq %rax, %rcx
movq %rcx, 0x50(%rsp)
jmp 0x18953f
movq 0xd0(%rsp), %rdx
shll $0x4, %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, %esi
movl %esi, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x3, 0x38(%rsp)
movl 0x60(%rsp), %r8d
movl %r8d, 0x3c(%rsp)
movl 0xbc(%rsp), %r8d
movl %r8d, 0x40(%rsp)
movl %esi, 0x44(%rsp)
movl %edx, 0x48(%rsp)
imulq %rax, %rcx
jmp 0x189396
movq 0xd0(%rsp), %rdx
leal (,%rdx,4), %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x4, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movl %r9d, 0x44(%rsp)
movl %esi, 0x48(%rsp)
imulq %rax, %rcx
imulq %r9, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x50(%rsp)
jmp 0x18981c
movq 0xd0(%rsp), %rdx
leal (,%rdx,4), %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x4, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movl %r9d, 0x44(%rsp)
movl %esi, 0x48(%rsp)
imulq %rax, %rcx
imulq %r9, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x50(%rsp)
jmp 0x1898d3
movq 0xd0(%rsp), %rdx
movl %edx, %esi
shll $0x4, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x4, 0x38(%rsp)
movl 0x60(%rsp), %edx
movl %edx, 0x3c(%rsp)
movl 0xbc(%rsp), %edx
movl %edx, 0x40(%rsp)
movl %r9d, 0x44(%rsp)
movl %esi, 0x48(%rsp)
imulq %rax, %rcx
imulq %r9, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x50(%rsp)
jmp 0x189bb0
shlq $0x2, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x2, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x44(%rsp)
imulq %rax, %rcx
movq %rcx, 0x50(%rsp)
jmp 0x18981c
shlq $0x2, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x2, 0x38(%rsp)
movl %eax, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x44(%rsp)
imulq %rax, %rcx
movq %rcx, 0x50(%rsp)
jmp 0x1898d3
movq 0xd0(%rsp), %rdx
movl %edx, %esi
shll $0x4, %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x4, 0x38(%rsp)
movl 0x60(%rsp), %edx
movl %edx, 0x3c(%rsp)
movl 0xbc(%rsp), %edx
movl %edx, 0x40(%rsp)
movl %r9d, 0x44(%rsp)
movl %esi, 0x48(%rsp)
imulq %rax, %rcx
imulq %r9, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x50(%rsp)
jmp 0x189c67
shlq $0x4, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x2, 0x38(%rsp)
movl 0x60(%rsp), %edx
movl %edx, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x44(%rsp)
imulq %rax, %rcx
movq %rcx, 0x50(%rsp)
jmp 0x189bb0
shlq $0x4, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $0x4, 0x20(%rsp)
movl $0x1, 0x28(%rsp)
movq $0x0, 0x30(%rsp)
movl $0x2, 0x38(%rsp)
movl 0x60(%rsp), %edx
movl %edx, 0x3c(%rsp)
movl %ecx, 0x40(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x44(%rsp)
imulq %rax, %rcx
movq %rcx, 0x50(%rsp)
jmp 0x189c67
movq $0x0, 0x50(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovups %xmm0, 0x1c(%rsp)
vmovaps %xmm0, 0x30(%rsp)
vmovups %xmm0, 0x3c(%rsp)
movq 0x158(%rsp), %rax
movq (%rax), %rdx
subq $0x8, %rsp
leaq 0xc8(%rsp), %rax
leaq 0xcc(%rsp), %r10
leaq 0x74(%rsp), %r11
leaq 0x6c(%rsp), %r15
leaq 0xd4(%rsp), %r13
leaq 0x18(%rsp), %rsi
leaq 0x70(%rsp), %rcx
leaq 0xd0(%rsp), %r8
leaq 0xec(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r15
pushq %r13
callq 0x18234c
addq $0x30, %rsp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18a01c
lock
decl (%rax)
jne 0x18a01c
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x189f1f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18a01c
movq $0x0, 0x50(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovups %xmm0, 0x1c(%rsp)
vmovaps %xmm0, 0x30(%rsp)
vmovups %xmm0, 0x3c(%rsp)
movl 0x70(%r12), %eax
decl %eax
cmpl $0x3, %eax
ja 0x18973e
leaq 0x29034e(%rip), %rcx # 0x4198a4
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movslq 0x74(%r12), %rax
imulq 0xf0(%rsp), %rax
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, %ecx
movl %ecx, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl %ecx, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0xa0(%rsp)
movl %ecx, 0xa8(%rsp)
jmp 0x1896bc
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
movl 0x80(%r12), %edx
imull 0xf0(%rsp), %edx
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, %esi
movl %esi, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x3, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl %esi, 0xa4(%rsp)
movl %edx, 0xa8(%rsp)
imulq %rax, %rcx
jmp 0x1896ab
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
movslq 0x7c(%r12), %rdx
movl 0x80(%r12), %esi
imull 0xf0(%rsp), %esi
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x4, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl %edx, 0xa4(%rsp)
movl %esi, 0xa8(%rsp)
imulq %rax, %rcx
imulq %rdx, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0xb0(%rsp)
jmp 0x189768
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
imulq 0xf0(%rsp), %rcx
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x2, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0xa4(%rsp)
imulq %rax, %rcx
movq %rcx, 0xb0(%rsp)
jmp 0x189768
movq $0x0, 0xb0(%rsp)
vmovaps %xmm0, 0x70(%rsp)
vmovups %xmm0, 0x7c(%rsp)
vmovaps %xmm0, 0x90(%rsp)
vmovups %xmm0, 0x9c(%rsp)
subq $0x8, %rsp
leaq 0xc8(%rsp), %rax
leaq 0xcc(%rsp), %r10
leaq 0x74(%rsp), %r11
leaq 0x6c(%rsp), %r15
leaq 0xd4(%rsp), %r13
leaq 0x18(%rsp), %rsi
leaq 0x78(%rsp), %rdx
leaq 0x70(%rsp), %rcx
leaq 0xd0(%rsp), %r8
leaq 0xec(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r15
pushq %r13
callq 0x182440
addq $0x30, %rsp
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x189f3a
lock
decl (%rax)
jne 0x189f3a
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x189f2d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x189f3a
movq $0x0, 0x50(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovups %xmm0, 0x1c(%rsp)
vmovaps %xmm0, 0x30(%rsp)
vmovups %xmm0, 0x3c(%rsp)
movq 0x158(%rsp), %rax
movq (%rax), %rdx
subq $0x8, %rsp
leaq 0xc8(%rsp), %rax
leaq 0xcc(%rsp), %r10
leaq 0x74(%rsp), %r11
leaq 0x6c(%rsp), %r15
leaq 0xd4(%rsp), %r13
leaq 0x18(%rsp), %rsi
leaq 0x70(%rsp), %rcx
leaq 0xd0(%rsp), %r8
leaq 0xec(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r15
pushq %r13
callq 0x18234c
addq $0x30, %rsp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18a6d3
lock
decl (%rax)
jne 0x18a6d3
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x189f6f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18a6d3
movq $0x0, 0x50(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovups %xmm0, 0x1c(%rsp)
vmovaps %xmm0, 0x30(%rsp)
vmovups %xmm0, 0x3c(%rsp)
movl 0x70(%r12), %eax
decl %eax
cmpl $0x3, %eax
ja 0x189ad2
leaq 0x28ff7a(%rip), %rcx # 0x419864
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movslq 0x74(%r12), %rax
imulq 0xf0(%rsp), %rax
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, %ecx
movl %ecx, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl %ecx, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0xa0(%rsp)
movl %ecx, 0xa8(%rsp)
jmp 0x189a50
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
movl 0x80(%r12), %edx
imull 0xf0(%rsp), %edx
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, %esi
movl %esi, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x3, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl %esi, 0xa4(%rsp)
movl %edx, 0xa8(%rsp)
imulq %rax, %rcx
jmp 0x189a3f
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
movslq 0x7c(%r12), %rdx
movl 0x80(%r12), %esi
imull 0xf0(%rsp), %esi
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x4, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl %edx, 0xa4(%rsp)
movl %esi, 0xa8(%rsp)
imulq %rax, %rcx
imulq %rdx, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0xb0(%rsp)
jmp 0x189afc
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
imulq 0xf0(%rsp), %rcx
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x2, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0xa4(%rsp)
imulq %rax, %rcx
movq %rcx, 0xb0(%rsp)
jmp 0x189afc
movq $0x0, 0xb0(%rsp)
vmovaps %xmm0, 0x70(%rsp)
vmovups %xmm0, 0x7c(%rsp)
vmovaps %xmm0, 0x90(%rsp)
vmovups %xmm0, 0x9c(%rsp)
subq $0x8, %rsp
leaq 0xc8(%rsp), %rax
leaq 0xcc(%rsp), %r10
leaq 0x74(%rsp), %r11
leaq 0x6c(%rsp), %r15
leaq 0xd4(%rsp), %r13
leaq 0x18(%rsp), %rsi
leaq 0x78(%rsp), %rdx
leaq 0x70(%rsp), %rcx
leaq 0xd0(%rsp), %r8
leaq 0xec(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r15
pushq %r13
callq 0x182440
addq $0x30, %rsp
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x189f8a
lock
decl (%rax)
jne 0x189f8a
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x189f7d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x189f8a
movq $0x0, 0x50(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovups %xmm0, 0x1c(%rsp)
vmovaps %xmm0, 0x30(%rsp)
vmovups %xmm0, 0x3c(%rsp)
movq 0x158(%rsp), %rax
movq (%rax), %rdx
subq $0x8, %rsp
leaq 0xc8(%rsp), %rax
leaq 0xcc(%rsp), %r10
leaq 0x74(%rsp), %r11
leaq 0x6c(%rsp), %r13
leaq 0xd4(%rsp), %r15
leaq 0x18(%rsp), %rsi
leaq 0x70(%rsp), %rcx
leaq 0xd0(%rsp), %r8
leaq 0xec(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r13
pushq %r15
callq 0x18234c
addq $0x30, %rsp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18ad02
lock
decl (%rax)
jne 0x18ad02
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x189fbf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18ad02
movq $0x0, 0x50(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovups %xmm0, 0x1c(%rsp)
vmovaps %xmm0, 0x30(%rsp)
vmovups %xmm0, 0x3c(%rsp)
movl 0x70(%r12), %eax
decl %eax
cmpl $0x3, %eax
ja 0x189e66
leaq 0x28fc66(%rip), %rcx # 0x4198e4
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movslq 0x74(%r12), %rax
imulq 0xf0(%rsp), %rax
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, %ecx
movl %ecx, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl %ecx, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0xa0(%rsp)
movl %ecx, 0xa8(%rsp)
jmp 0x189de4
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
movl 0x80(%r12), %edx
imull 0xf0(%rsp), %edx
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, %esi
movl %esi, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x3, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl %esi, 0xa4(%rsp)
movl %edx, 0xa8(%rsp)
imulq %rax, %rcx
jmp 0x189dd3
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
movslq 0x7c(%r12), %rdx
movl 0x80(%r12), %esi
imull 0xf0(%rsp), %esi
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x4, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl %edx, 0xa4(%rsp)
movl %esi, 0xa8(%rsp)
imulq %rax, %rcx
imulq %rdx, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0xb0(%rsp)
jmp 0x189e90
movslq 0x74(%r12), %rax
movslq 0x78(%r12), %rcx
imulq 0xf0(%rsp), %rcx
vmovaps %xmm0, 0x70(%rsp)
movq $0x4, 0x80(%rsp)
movl $0x1, 0x88(%rsp)
movq $0x0, 0x90(%rsp)
movl $0x2, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0xa4(%rsp)
imulq %rax, %rcx
movq %rcx, 0xb0(%rsp)
jmp 0x189e90
movq $0x0, 0xb0(%rsp)
vmovaps %xmm0, 0x70(%rsp)
vmovups %xmm0, 0x7c(%rsp)
vmovaps %xmm0, 0x90(%rsp)
vmovups %xmm0, 0x9c(%rsp)
subq $0x8, %rsp
leaq 0xc8(%rsp), %rax
leaq 0xcc(%rsp), %r10
leaq 0x74(%rsp), %r11
leaq 0x6c(%rsp), %r15
leaq 0xd4(%rsp), %r13
leaq 0x18(%rsp), %rsi
leaq 0x78(%rsp), %rdx
leaq 0x70(%rsp), %rcx
leaq 0xd0(%rsp), %r8
leaq 0xec(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r15
pushq %r13
callq 0x182440
addq $0x30, %rsp
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x189fda
lock
decl (%rax)
jne 0x189fda
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x189fcd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x189fda
testq %rsi, %rsi
je 0x18a01c
jmp 0x18a014
testq %rsi, %rsi
je 0x189f3a
movq %rsi, %rdi
callq 0x563b0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18a01c
lock
decl (%rax)
jne 0x18a01c
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18a00f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18a01c
testq %rsi, %rsi
je 0x18a6d3
jmp 0x18a6cb
testq %rsi, %rsi
je 0x189f8a
movq %rsi, %rdi
callq 0x563b0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18a6d3
lock
decl (%rax)
jne 0x18a6d3
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18a6c6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18a6d3
testq %rsi, %rsi
je 0x18ad02
jmp 0x18acfa
testq %rsi, %rsi
je 0x189fda
movq %rsi, %rdi
callq 0x563b0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18ad02
lock
decl (%rax)
jne 0x18ad02
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18acf5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18ad02
testq %rsi, %rsi
je 0x18a01c
movq %rsi, %rdi
callq 0x563b0
decl %r14d
cmpl $0x3, %r14d
ja 0x18b2c1
leaq 0x28f894(%rip), %rax # 0x4198c4
movslq (%rax,%r14,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movl 0x64(%rsp), %esi
movl %esi, %edi
andl $0x7, %edi
xorl %eax, %eax
testb $0x3, %sil
sete %al
movq 0xe8(%rsp), %r9
shrq $0x3, %r9
leal (%rax,%rax), %ecx
testl %edi, %edi
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movzbl %cl, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r9
movl %esi, %eax
cltd
idivl %r8d
movl 0x60(%rsp), %ecx
xorl %eax, %ecx
orl %edi, %ecx
jne 0x18a64a
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18a0a6
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xc0(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xe8(%rsp), %r15
shrq $0x3, %r15
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r15
movl 0x64(%rsp), %esi
movl 0x60(%rsp), %eax
xorl %esi, %eax
movl 0x6c(%rsp), %r8d
movl 0xbc(%rsp), %ecx
xorl %r8d, %ecx
orl %eax, %ecx
sete %r9b
jne 0x18a385
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %ecx
cmovnel %eax, %ecx
movl %edi, %eax
cltd
idivl %ecx
testb $0x7, %dil
jne 0x18a385
cmpl 0xd0(%rsp), %eax
jne 0x18a385
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18a186
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xc0(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xe8(%rsp), %r15
shrq $0x3, %r15
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r15
movl 0x64(%rsp), %esi
movl 0x60(%rsp), %eax
xorl %esi, %eax
movl 0x6c(%rsp), %r9d
movl 0xbc(%rsp), %edx
xorl %r9d, %edx
orl %eax, %edx
movl 0xc4(%rsp), %ecx
movl 0x154(%rsp), %eax
xorl %ecx, %eax
orl %edx, %eax
sete %r10b
jne 0x18a4a1
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movl %edi, %eax
cltd
idivl %r8d
testb $0x7, %dil
jne 0x18a4a1
cmpl 0xd0(%rsp), %eax
jne 0x18a4a1
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18a27b
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xe8(%rsp), %rdx
shrq $0x3, %rdx
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %rdx
movq %rdx, %rcx
movl 0x64(%rsp), %esi
cmpl 0x60(%rsp), %esi
jne 0x18a5c1
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movl %edi, %eax
cltd
idivl %r8d
testb $0x7, %dil
jne 0x18a5c1
cmpl 0xbc(%rsp), %eax
jne 0x18a5c1
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18a347
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xcc(%rsp), %eax
movl %eax, %ecx
orl %edi, %ecx
testb $0x7, %cl
jne 0x18b2c1
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
leal 0x7(%rdi), %ecx
testl %edi, %edi
cmovnsl %edi, %ecx
sarl $0x3, %ecx
movslq %edx, %rax
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r10
movq %rax, 0x10(%rsp)
movq $0x0, 0x18(%rsp)
movq %rdi, 0x20(%rsp)
movl %edx, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movl %ecx, 0x48(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r10
imulq %rdx, %r10
imulq %rdi, %rax
imulq %r10, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x50(%rsp)
vmovups %xmm0, 0x38(%rsp)
testb %r9b, %r9b
movq 0xd8(%rsp), %r14
je 0x18c4af
leaq 0x70(%rsp), %r12
leaq 0x10(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x78(%rsp), %rax
cmpq %r12, 0x8(%rsp)
je 0x18c438
testq %rax, %rax
je 0x18a466
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18c3de
lock
decl (%rax)
jne 0x18c3de
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18c3d1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18c3de
movl 0xcc(%rsp), %eax
movl %eax, %edx
orl %edi, %edx
testb $0x7, %dl
jne 0x18b2c1
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
leal 0x7(%rdi), %r8d
testl %edi, %edi
cmovnsl %edi, %r8d
sarl $0x3, %r8d
movslq %edx, %rax
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r11
movq %rax, 0x10(%rsp)
movq $0x0, 0x18(%rsp)
movq %rdi, 0x20(%rsp)
movl %edx, 0x28(%rsp)
movq %r11, 0x30(%rsp)
movl %r8d, 0x48(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r11
imulq %rdx, %r11
imulq %rdi, %rax
imulq %r11, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x50(%rsp)
vmovups %xmm0, 0x38(%rsp)
testb %r10b, %r10b
je 0x18b62d
leaq 0x70(%rsp), %r12
leaq 0x10(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x78(%rsp), %rax
cmpq %r12, 0x8(%rsp)
je 0x18b6bc
testq %rax, %rax
movq 0xd8(%rsp), %r14
je 0x18a586
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18c6f3
lock
decl (%rax)
jne 0x18c6f3
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18c6e6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18c6f3
movl 0xc8(%rsp), %eax
orl %edi, %eax
testb $0x7, %al
jne 0x18b2c1
leal 0x7(%rdi), %edx
testl %edi, %edi
cmovnsl %edi, %edx
sarl $0x3, %edx
movq 0xd8(%rsp), %rax
movq 0x8(%rax), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl $0x8, %r8d
callq 0x5a14a
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18b73d
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18b73d
movl 0xc8(%rsp), %eax
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
movl 0x68(%rsp), %ecx
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x188901
jmp 0x18b2a2
orl 0x68(%rsp), %esi
testb $0x7, %sil
jne 0x18b2c1
movq 0xd8(%rsp), %rcx
movq 0x8(%rcx), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %r9, %rdx
movl $0x8, %ecx
callq 0x5a03c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18b73d
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18b73d
movl 0x68(%rsp), %eax
leal 0x7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x3, %ecx
xorl %ebp, %ebp
movq %r12, %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq 0x188901
jmp 0x18b73d
testq %rsi, %rsi
je 0x18a6d3
movq %rsi, %rdi
callq 0x563b0
decl %r14d
cmpl $0x3, %r14d
ja 0x18b2c1
leaq 0x28f19d(%rip), %rax # 0x419884
movslq (%rax,%r14,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movl 0x64(%rsp), %esi
xorl %ecx, %ecx
movl %esi, %edi
andl $0x3, %edi
sete %cl
leal (%rcx,%rcx,2), %r9d
incl %r9d
movq 0xe8(%rsp), %r10
shrq $0x2, %r10
addb %cl, %cl
shlq %cl, %r10
movl %esi, %eax
cltd
idivl %r9d
movl 0x60(%rsp), %ecx
xorl %eax, %ecx
orl %edi, %ecx
jne 0x18ac7b
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18a744
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xc0(%rsp), %r8d
testb $0x3, %r8b
sete %cl
movq 0xe8(%rsp), %r15
shrq $0x2, %r15
addb %cl, %cl
shlq %cl, %r15
movl 0x64(%rsp), %esi
movl 0x60(%rsp), %eax
xorl %esi, %eax
movl 0x6c(%rsp), %ecx
movl 0xbc(%rsp), %edx
xorl %ecx, %edx
orl %eax, %edx
sete %dil
jne 0x18a9d9
xorl %eax, %eax
testb $0x3, %r8b
sete %al
leal (%rax,%rax,2), %r9d
incl %r9d
movl %r8d, %eax
cltd
idivl %r9d
testb $0x3, %r8b
jne 0x18a9d9
cmpl 0xd0(%rsp), %eax
jne 0x18a9d9
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18a80d
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xc0(%rsp), %r8d
testb $0x3, %r8b
sete %cl
movq 0xe8(%rsp), %r15
shrq $0x2, %r15
addb %cl, %cl
shlq %cl, %r15
movl 0x64(%rsp), %esi
movl 0x60(%rsp), %eax
xorl %esi, %eax
movl 0x6c(%rsp), %r9d
movl 0xbc(%rsp), %edx
xorl %r9d, %edx
orl %eax, %edx
movl 0xc4(%rsp), %ecx
movl 0x154(%rsp), %eax
xorl %ecx, %eax
orl %edx, %eax
sete %dil
jne 0x18aae7
xorl %eax, %eax
testb $0x3, %r8b
sete %al
leal (%rax,%rax,2), %r10d
incl %r10d
movl %r8d, %eax
cltd
idivl %r10d
testb $0x3, %r8b
jne 0x18aae7
cmpl 0xd0(%rsp), %eax
jne 0x18aae7
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18a8ea
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0x6c(%rsp), %r8d
testb $0x3, %r8b
sete %cl
movq 0xe8(%rsp), %rax
shrq $0x2, %rax
addb %cl, %cl
shlq %cl, %rax
movq %rax, %rcx
movl 0x64(%rsp), %esi
cmpl 0x60(%rsp), %esi
jne 0x18abf5
xorl %eax, %eax
testb $0x3, %r8b
sete %al
leal (%rax,%rax,2), %edi
incl %edi
movl %r8d, %eax
cltd
idivl %edi
testb $0x3, %r8b
jne 0x18abf5
cmpl 0xbc(%rsp), %eax
jne 0x18abf5
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18a99b
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xcc(%rsp), %eax
movl %eax, %edx
orl %r8d, %edx
testb $0x3, %dl
jne 0x18b2c1
sarl $0x2, %eax
sarl $0x2, %r8d
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %r9
imulq %r9, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r10
movq %rax, 0x10(%rsp)
movq $0x0, 0x18(%rsp)
movq %r9, 0x20(%rsp)
movl %edx, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movl %r8d, 0x48(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r10
imulq %rdx, %r10
imulq %r9, %rax
imulq %r10, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r9
movq %rax, 0x50(%rsp)
vmovups %xmm0, 0x38(%rsp)
testb %dil, %dil
movq 0xd8(%rsp), %r14
je 0x18b831
leaq 0x70(%rsp), %r12
leaq 0x10(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x78(%rsp), %rax
cmpq %r12, 0x8(%rsp)
je 0x18b7b8
testq %rax, %rax
je 0x18aaac
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b75e
lock
decl (%rax)
jne 0x18b75e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18b751
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b75e
movl 0xcc(%rsp), %eax
movl %eax, %edx
orl %r8d, %edx
testb $0x3, %dl
jne 0x18b2c1
sarl $0x2, %eax
sarl $0x2, %r8d
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %r10
imulq %r10, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r11
movq %rax, 0x10(%rsp)
movq $0x0, 0x18(%rsp)
movq %r10, 0x20(%rsp)
movl %edx, 0x28(%rsp)
movq %r11, 0x30(%rsp)
movl %r8d, 0x48(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r11
imulq %rdx, %r11
imulq %r10, %rax
imulq %r11, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r10
movq %rax, 0x50(%rsp)
vmovups %xmm0, 0x38(%rsp)
testb %dil, %dil
je 0x18b2a9
leaq 0x70(%rsp), %r12
leaq 0x10(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x78(%rsp), %rax
cmpq %r12, 0x8(%rsp)
je 0x18b6af
testq %rax, %rax
movq 0xd8(%rsp), %r14
je 0x18abba
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18bda3
lock
decl (%rax)
jne 0x18bda3
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18bd96
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18bda3
movl 0xc8(%rsp), %eax
orl %r8d, %eax
testb $0x3, %al
jne 0x18b2c1
sarl $0x2, %r8d
movq 0xd8(%rsp), %rax
movq 0x8(%rax), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r8d, %edx
movl $0x4, %r8d
callq 0x5a14a
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18b73d
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18b73d
movl 0xc8(%rsp), %eax
leal 0x3(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x2, %edx
movl 0x68(%rsp), %ecx
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x188962
jmp 0x18b2a2
orl 0x68(%rsp), %esi
testb $0x3, %sil
jne 0x18b2c1
movq 0xd8(%rsp), %rcx
movq 0x8(%rcx), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %r10, %rdx
movl %r9d, %ecx
callq 0x5a03c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18b73d
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18b73d
movl 0x68(%rsp), %eax
leal 0x3(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x2, %ecx
xorl %ebp, %ebp
movq %r12, %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq 0x188962
jmp 0x18b73d
testq %rsi, %rsi
je 0x18ad02
movq %rsi, %rdi
callq 0x563b0
decl %r14d
cmpl $0x3, %r14d
ja 0x18b2c1
leaq 0x28ebee(%rip), %rax # 0x419904
movslq (%rax,%r14,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movl 0x64(%rsp), %esi
testb $0xf, %sil
jne 0x18b2c1
sarl $0x4, %esi
cmpl 0x60(%rsp), %esi
jne 0x18b2b6
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18ad54
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xc0(%rsp), %edi
testb $0xf, %dil
je 0x18ae0e
testb $0x7, %dil
je 0x18ae2b
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %r14d
incl %r14d
jmp 0x18ae31
movl 0xc0(%rsp), %edi
testb $0xf, %dil
je 0x18ae16
testb $0x7, %dil
je 0x18afd0
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %r14d
incl %r14d
jmp 0x18afd6
movl 0x6c(%rsp), %edi
testb $0xf, %dil
je 0x18ae21
testb $0x7, %dil
je 0x18b18d
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %ecx
incl %ecx
jmp 0x18b192
movl $0x10, %r14d
jmp 0x18ae31
movl $0x10, %r14d
jmp 0x18afd6
movl $0x10, %ecx
jmp 0x18b192
movl $0x8, %r14d
movl 0x64(%rsp), %esi
movl 0x60(%rsp), %eax
xorl %esi, %eax
movl 0x6c(%rsp), %r9d
movl 0xbc(%rsp), %ecx
xorl %r9d, %ecx
orl %eax, %ecx
sete %r8b
jne 0x18aebf
movl %edi, %eax
cltd
idivl %r14d
cmpl $0x10, %r14d
jne 0x18aebf
cmpl 0xd0(%rsp), %eax
jne 0x18aebf
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18ae81
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xcc(%rsp), %eax
movl %eax, %ecx
andl $0xf, %ecx
movl %r14d, %edx
xorl $0x10, %edx
orl %ecx, %edx
jne 0x18b2c1
sarl $0x4, %eax
leal 0xf(%rdi), %ecx
testl %edi, %edi
cmovnsl %edi, %ecx
sarl $0x4, %ecx
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r10
movq %rax, 0x10(%rsp)
movq $0x0, 0x18(%rsp)
movq %rdi, 0x20(%rsp)
movl %edx, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movl %ecx, 0x48(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r10
imulq %rdx, %r10
imulq %rdi, %rax
imulq %r10, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x50(%rsp)
vmovups %xmm0, 0x38(%rsp)
testb %r8b, %r8b
je 0x18bb48
leaq 0x70(%rsp), %r12
leaq 0x10(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x78(%rsp), %rax
cmpq %r12, 0x8(%rsp)
je 0x18bad1
testq %rax, %rax
je 0x18af95
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18ba77
lock
decl (%rax)
jne 0x18ba77
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18ba6a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18ba77
movl $0x8, %r14d
movl 0x64(%rsp), %esi
movl 0x60(%rsp), %eax
xorl %esi, %eax
movl 0x6c(%rsp), %r10d
movl 0xbc(%rsp), %edx
xorl %r10d, %edx
orl %eax, %edx
movl 0xc4(%rsp), %ecx
movl 0x154(%rsp), %eax
xorl %ecx, %eax
orl %edx, %eax
sete %r9b
jne 0x18b076
movl %edi, %eax
cltd
idivl %r14d
cmpl $0x10, %r14d
jne 0x18b076
cmpl 0xd0(%rsp), %eax
jne 0x18b076
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18b038
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18b1f9
testq %rsi, %rsi
je 0x18b6d6
jmp 0x18b6ce
movl 0xcc(%rsp), %eax
movl %eax, %edx
andl $0xf, %edx
movl %r14d, %r8d
xorl $0x10, %r8d
orl %edx, %r8d
jne 0x18b2c1
sarl $0x4, %eax
leal 0xf(%rdi), %r8d
testl %edi, %edi
cmovnsl %edi, %r8d
sarl $0x4, %r8d
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r11
movq %rax, 0x10(%rsp)
movq $0x0, 0x18(%rsp)
movq %rdi, 0x20(%rsp)
movl %edx, 0x28(%rsp)
movq %r11, 0x30(%rsp)
movl %r8d, 0x48(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r11
imulq %rdx, %r11
imulq %rdi, %rax
imulq %r11, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x50(%rsp)
vmovups %xmm0, 0x38(%rsp)
testb %r9b, %r9b
je 0x18c192
leaq 0x70(%rsp), %r12
leaq 0x10(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x78(%rsp), %rax
cmpq %r12, 0x8(%rsp)
je 0x18c111
testq %rax, %rax
je 0x18b152
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18c0b7
lock
decl (%rax)
jne 0x18c0b7
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18c0aa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18c0b7
movl $0x8, %ecx
movl 0x64(%rsp), %esi
cmpl 0x60(%rsp), %esi
jne 0x18b204
movl %edi, %eax
cltd
idivl %ecx
cmpl $0x10, %ecx
jne 0x18b204
cmpl 0xbc(%rsp), %eax
jne 0x18b204
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18b73d
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18b1c9
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18b6d6
lock
decl (%rax)
jne 0x18b6d6
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18b6c9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b6d6
cmpl $0x10, %ecx
jne 0x18b2c1
movl 0xc8(%rsp), %eax
andl $0xf, %eax
jne 0x18b2c1
movq 0xe8(%rsp), %rax
shrq $0x4, %rax
movl %ecx, %ecx
imulq %rax, %rcx
leal 0xf(%rdi), %edx
testl %edi, %edi
cmovnsl %edi, %edx
sarl $0x4, %edx
movq 0xd8(%rsp), %rax
movq 0x8(%rax), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl $0x10, %r8d
callq 0x5a14a
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18b73d
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18b73d
movl 0xc8(%rsp), %eax
leal 0xf(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x4, %edx
movl 0x68(%rsp), %ecx
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x18889c
xorl %ebp, %ebp
jmp 0x18b73d
movq 0xd8(%rsp), %r14
jmp 0x18be7e
testb $0xf, 0x68(%rsp)
je 0x18b63a
movl 0xfc(%rsp), %edi
movq 0x8(%r12), %rax
vmovups (%r12), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq 0x10(%r12), %rcx
movq %rcx, 0x20(%rsp)
movl 0x18(%r12), %ecx
movl %ecx, 0x28(%rsp)
movq 0x20(%r12), %rcx
movq %rcx, 0x30(%rsp)
vmovups 0x28(%r12), %xmm0
vmovups %xmm0, 0x38(%rsp)
movl 0x38(%r12), %ecx
movl %ecx, 0x48(%rsp)
movq 0x40(%r12), %rcx
movq %rcx, 0x50(%rsp)
testq %rax, %rax
je 0x18b31e
lock
incl (%rax)
cmpl $0x1, %edi
movq 0xd8(%rsp), %r14
je 0x18b359
vmovups (%r14), %zmm0
leaq 0x70(%rsp), %rcx
vmovups %zmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
leaq 0x10(%rsp), %rsi
movq %r12, %rdi
movl $0x1, %edx
vzeroupper
callq 0x5c97e
movq 0x50(%r12), %rax
vmovups 0x48(%r12), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x58(%r12), %rcx
movq %rcx, 0x80(%rsp)
movl 0x60(%r12), %ecx
movl %ecx, 0x88(%rsp)
movq 0x68(%r12), %rcx
movq %rcx, 0x90(%rsp)
vmovups 0x70(%r12), %xmm0
vmovups %xmm0, 0x98(%rsp)
movl 0x80(%r12), %ecx
movl %ecx, 0xa8(%rsp)
movq 0x88(%r12), %rcx
movq %rcx, 0xb0(%rsp)
testq %rax, %rax
je 0x18b3c8
lock
incl (%rax)
cmpl $0x1, 0xf0(%rsp)
je 0x18b408
vmovups (%r14), %zmm0
leaq 0x100(%rsp), %rcx
vmovups %zmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
leaq 0x70(%rsp), %rsi
movq 0x158(%rsp), %rdi
movl $0x1, %edx
vzeroupper
callq 0x5c97e
leaq 0x100(%rsp), %rdi
leaq 0x68(%rsp), %rdx
movl $0x2, %esi
callq 0x6781a
movq 0x100(%rsp), %rbx
leaq 0x10(%rsp), %rax
cmpq %rax, %rbx
je 0x18b4d8
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18b442
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x18b471
lock
decl (%rax)
jne 0x18b471
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x18b464
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b471
testq %rsi, %rsi
je 0x18b471
movq %rsi, %rdi
callq 0x563b0
movq $0x0, 0x40(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0xc(%rbx)
vmovups %xmm0, 0x28(%rbx)
movl $0x0, 0x38(%rbx)
vmovaps 0x10(%rsp), %xmm0
vmovups %xmm0, (%rbx)
movq 0x20(%rsp), %rax
movq %rax, 0x10(%rbx)
movl 0x28(%rsp), %eax
movl %eax, 0x18(%rbx)
movq 0x30(%rsp), %rax
movq %rax, 0x20(%rbx)
vmovups 0x38(%rsp), %xmm0
vmovups %xmm0, 0x28(%rbx)
movl 0x48(%rsp), %eax
movl %eax, 0x38(%rbx)
movq 0x50(%rsp), %rax
movq %rax, 0x40(%rbx)
movq 0x100(%rsp), %rbx
leaq 0x48(%rbx), %rax
leaq 0x70(%rsp), %rcx
cmpq %rcx, %rax
je 0x18b57e
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18b4f7
lock
incl (%rax)
movq 0x50(%rbx), %rax
testq %rax, %rax
je 0x18b527
lock
decl (%rax)
jne 0x18b527
movq 0x48(%rbx), %rsi
movq 0x68(%rbx), %rdi
testq %rdi, %rdi
je 0x18b51a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b527
testq %rsi, %rsi
je 0x18b527
movq %rsi, %rdi
callq 0x563b0
vmovaps 0x70(%rsp), %xmm0
vmovups %xmm0, 0x48(%rbx)
movq 0x80(%rsp), %rax
movq %rax, 0x58(%rbx)
movl 0x88(%rsp), %eax
movl %eax, 0x60(%rbx)
movq 0x90(%rsp), %rax
movq %rax, 0x68(%rbx)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x70(%rbx)
movl 0xa8(%rsp), %eax
movl %eax, 0x80(%rbx)
movq 0xb0(%rsp), %rax
movq %rax, 0x88(%rbx)
movq (%rbp), %rax
addq -0x18(%rax), %rbp
leaq 0x100(%rsp), %rsi
movq %rbp, %rdi
movq 0x160(%rsp), %rdx
movq %r14, %rcx
callq 0x1811f2
movl %eax, %ebp
leaq 0x100(%rsp), %rdi
callq 0x678ac
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18b5e6
lock
decl (%rax)
jne 0x18b5e6
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x18b5d9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b5e6
testq %rsi, %rsi
je 0x18b5e6
movq %rsi, %rdi
callq 0x563b0
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18b73d
lock
decl (%rax)
jne 0x18b73d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18b617
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b73d
testq %rsi, %rsi
je 0x18b73d
movq %rsi, %rdi
callq 0x563b0
jmp 0x18b73d
movq 0xd8(%rsp), %r14
jmp 0x18c7ce
movq 0xe8(%rsp), %rdx
andq $-0x10, %rdx
movq 0xd8(%rsp), %rax
movq 0x8(%rax), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl $0x10, %ecx
callq 0x5a03c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18b73d
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18b73d
movl 0x68(%rsp), %eax
leal 0xf(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x4, %ecx
xorl %ebp, %ebp
movq %r12, %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq 0x18889c
jmp 0x18b73d
movq 0xd8(%rsp), %r14
jmp 0x18bdfd
movq 0xd8(%rsp), %r14
jmp 0x18c74d
testq %rsi, %rsi
je 0x18b6d6
movq %rsi, %rdi
callq 0x563b0
movq 0x8(%rsp), %rcx
movq $0x0, 0x40(%rcx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rcx)
vmovups %xmm0, 0xc(%rcx)
vmovups %xmm0, 0x28(%rcx)
movl $0x0, 0x38(%rcx)
vmovups (%r12), %xmm0
vmovups %xmm0, (%rcx)
movq 0x10(%r12), %rax
movq %rax, 0x10(%rcx)
movl 0x18(%r12), %eax
movl %eax, 0x18(%rcx)
movq 0x20(%r12), %rax
movq %rax, 0x20(%rcx)
vmovups 0x28(%r12), %xmm0
vmovups %xmm0, 0x28(%rcx)
movl 0x38(%r12), %eax
movl %eax, 0x38(%rcx)
movq 0x40(%r12), %rax
movq %rax, 0x40(%rcx)
movl %ebp, %eax
addq $0x168, %rsp # imm = 0x168
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
testq %rsi, %rsi
je 0x18b75e
movq %rsi, %rdi
callq 0x563b0
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x80(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x88(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18b7e9
lock
decl (%rax)
jne 0x18b7e9
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x18b7dc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b7e9
testq %rsi, %rsi
je 0x18b7e9
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18ba2b
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18ba2b
movl 0x64(%rsp), %esi
movl 0x6c(%rsp), %ecx
movl 0xc0(%rsp), %eax
leal 0x3(%rax), %r8d
testl %eax, %eax
cmovnsl %eax, %r8d
sarl $0x2, %r8d
subq $0x8, %rsp
movq 0x10(%rsp), %rdi
movl %ecx, %edx
movl %r8d, %ecx
movq %r15, %r8
movl $0x4, %r9d
pushq 0x8(%r14)
callq 0x5a266
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18ba2b
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18ba2b
testl %eax, %eax
jle 0x18ba29
xorl %r12d, %r12d
leaq 0x70(%rsp), %r14
leaq 0x100(%rsp), %r15
movslq 0x3c(%rsp), %rax
movslq 0x40(%rsp), %rcx
movl 0x44(%rsp), %edx
movq 0x50(%rsp), %rsi
imulq %r12, %rsi
movq 0x20(%rsp), %rdi
imulq %rdi, %rsi
addq 0x10(%rsp), %rsi
movl 0x28(%rsp), %r8d
movq 0x30(%rsp), %r9
movq %rsi, 0x70(%rsp)
movq $0x0, 0x78(%rsp)
movq %rdi, 0x80(%rsp)
movl %r8d, 0x88(%rsp)
movq %r9, 0x90(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl $0x1, 0xa4(%rsp)
movl %edx, 0xa8(%rsp)
imulq %rax, %rcx
movq %rcx, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xb0(%rsp)
movl 0x38(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0x98(%rsp)
cmpl $0x4, %eax
jne 0x18b94b
movq %rcx, 0xb0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movl 0x34(%r10), %edx
movq 0x40(%r10), %rsi
imulq %r12, %rsi
movq 0x10(%r10), %rdi
imulq %rdi, %rsi
addq (%r10), %rsi
movl 0x18(%r10), %r8d
movq 0x20(%r10), %r9
movq %rsi, 0x100(%rsp)
movq $0x0, 0x108(%rsp)
movq %rdi, 0x110(%rsp)
movl %r8d, 0x118(%rsp)
movq %r9, 0x120(%rsp)
movl %eax, 0x12c(%rsp)
movl %ecx, 0x130(%rsp)
movl $0x1, 0x134(%rsp)
movl %edx, 0x138(%rsp)
imulq %rax, %rcx
movq %rcx, %rax
imulq %rdi, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x140(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x128(%rsp)
cmpl $0x4, %eax
jne 0x18b9fe
movq %rcx, 0x140(%rsp)
movl 0xc8(%rsp), %edx
movl 0x68(%rsp), %ecx
movq %r14, %rdi
movq %r15, %rsi
callq 0x188962
movq 0x8(%rsp), %rax
movslq 0x38(%rax), %rax
incq %r12
cmpq %rax, %r12
jl 0x18b89b
xorl %ebp, %ebp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18b73d
lock
decl (%rax)
jne 0x18b73d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18ba5c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b73d
testq %rsi, %rsi
je 0x18b73d
jmp 0x18b620
testq %rsi, %rsi
je 0x18ba77
movq %rsi, %rdi
callq 0x563b0
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x80(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x88(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18bb02
lock
decl (%rax)
jne 0x18bb02
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x18baf5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18bb02
testq %rsi, %rsi
je 0x18bb02
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18bd57
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18bd57
movl 0x64(%rsp), %esi
movl 0x6c(%rsp), %r9d
movl 0xc0(%rsp), %eax
leal 0xf(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x4, %ecx
movq 0xe8(%rsp), %rax
shrq $0x4, %rax
movl %r14d, %r8d
imulq %rax, %r8
subq $0x8, %rsp
movq 0x10(%rsp), %rdi
movl %r9d, %edx
movl $0x10, %r9d
movq 0xe0(%rsp), %rax
pushq 0x8(%rax)
callq 0x5a266
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18bd57
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18bd57
testl %eax, %eax
jle 0x18bd55
xorl %r12d, %r12d
leaq 0x70(%rsp), %r14
leaq 0x100(%rsp), %r15
movslq 0x3c(%rsp), %rax
movslq 0x40(%rsp), %rcx
movl 0x44(%rsp), %edx
movq 0x50(%rsp), %rsi
imulq %r12, %rsi
movq 0x20(%rsp), %rdi
imulq %rdi, %rsi
addq 0x10(%rsp), %rsi
movl 0x28(%rsp), %r8d
movq 0x30(%rsp), %r9
movq %rsi, 0x70(%rsp)
movq $0x0, 0x78(%rsp)
movq %rdi, 0x80(%rsp)
movl %r8d, 0x88(%rsp)
movq %r9, 0x90(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl $0x1, 0xa4(%rsp)
movl %edx, 0xa8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xb0(%rsp)
movl 0x38(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0x98(%rsp)
cmpl $0x4, %eax
jne 0x18bc77
movq %rcx, 0xb0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movl 0x34(%r10), %edx
movq 0x40(%r10), %rsi
imulq %r12, %rsi
movq 0x10(%r10), %rdi
imulq %rdi, %rsi
addq (%r10), %rsi
movl 0x18(%r10), %r8d
movq 0x20(%r10), %r9
movq %rsi, 0x100(%rsp)
movq $0x0, 0x108(%rsp)
movq %rdi, 0x110(%rsp)
movl %r8d, 0x118(%rsp)
movq %r9, 0x120(%rsp)
movl %eax, 0x12c(%rsp)
movl %ecx, 0x130(%rsp)
movl $0x1, 0x134(%rsp)
movl %edx, 0x138(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x140(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x128(%rsp)
cmpl $0x4, %eax
jne 0x18bd2a
movq %rcx, 0x140(%rsp)
movl 0xc8(%rsp), %edx
movl 0x68(%rsp), %ecx
movq %r14, %rdi
movq %r15, %rsi
callq 0x18889c
movq 0x8(%rsp), %rax
movslq 0x38(%rax), %rax
incq %r12
cmpq %rax, %r12
jl 0x18bbc7
xorl %ebp, %ebp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18b73d
lock
decl (%rax)
jne 0x18b73d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18bd88
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b73d
testq %rsi, %rsi
je 0x18b73d
jmp 0x18b620
testq %rsi, %rsi
je 0x18bda3
movq %rsi, %rdi
callq 0x563b0
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x80(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x88(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18be2e
lock
decl (%rax)
jne 0x18be2e
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x18be21
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18be2e
testq %rsi, %rsi
je 0x18be2e
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18c06b
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18c06b
movl 0x64(%rsp), %esi
movl 0x6c(%rsp), %r9d
movl 0xc4(%rsp), %ecx
movl 0xc0(%rsp), %eax
leal 0x3(%rax), %r8d
testl %eax, %eax
cmovnsl %eax, %r8d
sarl $0x2, %r8d
movq 0x8(%rsp), %rdi
movl %r9d, %edx
movq %r15, %r9
pushq 0x8(%r14)
pushq $0x4
callq 0x5a3a6
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18c06b
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18c06b
testl %eax, %eax
jle 0x18c069
movl 0xc4(%rsp), %ecx
xorl %r12d, %r12d
movabsq $0x100000001, %r13 # imm = 0x100000001
leaq 0x70(%rsp), %rdi
leaq 0x100(%rsp), %rbp
xorl %r14d, %r14d
testl %ecx, %ecx
jle 0x18c05a
xorl %r15d, %r15d
movslq 0x3c(%rsp), %rax
movslq 0x40(%rsp), %rcx
movq 0x50(%rsp), %rdx
imulq %r14, %rdx
movq 0x20(%rsp), %rsi
imulq %rsi, %rdx
addq 0x10(%rsp), %rdx
movl 0x28(%rsp), %r11d
movq 0x30(%rsp), %r8
movq %rcx, %r9
imulq %rax, %r9
movl 0xe4(%rsp), %r10d
addl %r15d, %r10d
movslq %r10d, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x70(%rsp)
movq %r12, 0x78(%rsp)
movq %rsi, 0x80(%rsp)
movl %r11d, 0x88(%rsp)
movq %r8, 0x90(%rsp)
movl $0x2, %ebx
movl %ebx, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movq %r13, 0xa4(%rsp)
movq %r9, 0xb0(%rsp)
movq 0x8(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rsi
imulq %rsi, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %r11d
movq 0x20(%r8), %r8
movq %r15, %r9
imulq %rax, %r9
movq %rcx, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x100(%rsp)
movq %r12, 0x108(%rsp)
movq %rsi, 0x110(%rsp)
movl %r11d, 0x118(%rsp)
movq %r8, 0x120(%rsp)
movl %ebx, 0x128(%rsp)
movl %eax, 0x12c(%rsp)
movl %ecx, 0x130(%rsp)
movq %r13, 0x134(%rsp)
imulq %rax, %rcx
movq %rcx, 0x140(%rsp)
movl 0xc8(%rsp), %edx
movl 0x68(%rsp), %ecx
movq %rbp, %rsi
movq %rdi, %rbx
callq 0x188962
movq %rbx, %rdi
xorl %r12d, %r12d
movslq 0xc4(%rsp), %rcx
incq %r15
cmpq %rcx, %r15
jl 0x18befd
movq 0x8(%rsp), %rax
movl 0x38(%rax), %eax
incq %r14
movslq %eax, %rdx
cmpq %rdx, %r14
jl 0x18bef2
xorl %ebp, %ebp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18b73d
lock
decl (%rax)
jne 0x18b73d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18c09c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b73d
testq %rsi, %rsi
je 0x18b73d
jmp 0x18b620
testq %rsi, %rsi
je 0x18c0b7
movq %rsi, %rdi
callq 0x563b0
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x80(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x88(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18c142
lock
decl (%rax)
jne 0x18c142
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x18c135
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18c142
testq %rsi, %rsi
je 0x18c142
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18c392
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18c392
movl 0x64(%rsp), %esi
movl 0x6c(%rsp), %r10d
movl 0xc4(%rsp), %ecx
movl 0xc0(%rsp), %eax
leal 0xf(%rax), %r8d
testl %eax, %eax
cmovnsl %eax, %r8d
sarl $0x4, %r8d
movq 0xe8(%rsp), %rax
shrq $0x4, %rax
movl %r14d, %r9d
imulq %rax, %r9
movq 0x8(%rsp), %rdi
movl %r10d, %edx
movq 0xd8(%rsp), %rax
pushq 0x8(%rax)
pushq $0x10
callq 0x5a3a6
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18c392
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18c392
testl %eax, %eax
jle 0x18c390
movl 0xc4(%rsp), %ecx
xorl %r12d, %r12d
movl $0x2, %ebp
movabsq $0x100000001, %r13 # imm = 0x100000001
leaq 0x70(%rsp), %rdi
leaq 0x100(%rsp), %rsi
xorl %r14d, %r14d
testl %ecx, %ecx
jle 0x18c381
xorl %r15d, %r15d
movslq 0x3c(%rsp), %rax
movslq 0x40(%rsp), %rcx
movq 0x50(%rsp), %rdx
imulq %r14, %rdx
movq 0x20(%rsp), %r11
imulq %r11, %rdx
addq 0x10(%rsp), %rdx
movl 0x28(%rsp), %ebx
movq 0x30(%rsp), %r8
movq %rcx, %r9
imulq %rax, %r9
movl 0xe4(%rsp), %r10d
addl %r15d, %r10d
movslq %r10d, %r10
imulq %r11, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x70(%rsp)
movq %r12, 0x78(%rsp)
movq %r11, 0x80(%rsp)
movl %ebx, 0x88(%rsp)
movq %r8, 0x90(%rsp)
movl %ebp, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movq %r13, 0xa4(%rsp)
movq %r9, 0xb0(%rsp)
movq 0x8(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %r11
imulq %r11, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %ebx
movq 0x20(%r8), %r8
movq %rcx, %r9
imulq %rax, %r9
movq %r15, %r10
imulq %r11, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x100(%rsp)
movq %r12, 0x108(%rsp)
movq %r11, 0x110(%rsp)
movl %ebx, 0x118(%rsp)
movq %r8, 0x120(%rsp)
movl %ebp, 0x128(%rsp)
movl %eax, 0x12c(%rsp)
movl %ecx, 0x130(%rsp)
movq %r13, 0x134(%rsp)
movq %r9, 0x140(%rsp)
movl 0xc8(%rsp), %edx
movl 0x68(%rsp), %ecx
movq %rdi, %r12
movq %rsi, %rbx
callq 0x18889c
movq %rbx, %rsi
movq %r12, %rdi
xorl %r12d, %r12d
movslq 0xc4(%rsp), %rcx
incq %r15
cmpq %rcx, %r15
jl 0x18c22d
movq 0x8(%rsp), %rax
movl 0x38(%rax), %eax
incq %r14
movslq %eax, %rdx
cmpq %rdx, %r14
jl 0x18c222
xorl %ebp, %ebp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18b73d
lock
decl (%rax)
jne 0x18b73d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18c3c3
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b73d
testq %rsi, %rsi
je 0x18b73d
jmp 0x18b620
testq %rsi, %rsi
je 0x18c3de
movq %rsi, %rdi
callq 0x563b0
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x80(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x88(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18c469
lock
decl (%rax)
jne 0x18c469
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x18c45c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18c469
testq %rsi, %rsi
je 0x18c469
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18c6a7
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18c6a7
movl 0x64(%rsp), %esi
movl 0x6c(%rsp), %r8d
movl 0xc0(%rsp), %eax
leal 0x7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x3, %ecx
subq $0x8, %rsp
movq 0x10(%rsp), %rdi
movl %r8d, %edx
movq %r15, %r8
movl $0x8, %r9d
pushq 0x8(%r14)
callq 0x5a266
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18c6a7
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18c6a7
testl %eax, %eax
jle 0x18c6a5
xorl %r12d, %r12d
leaq 0x70(%rsp), %r14
leaq 0x100(%rsp), %r15
movslq 0x3c(%rsp), %rax
movslq 0x40(%rsp), %rcx
movl 0x44(%rsp), %edx
movq 0x50(%rsp), %rsi
imulq %r12, %rsi
movq 0x20(%rsp), %rdi
imulq %rdi, %rsi
addq 0x10(%rsp), %rsi
movl 0x28(%rsp), %r8d
movq 0x30(%rsp), %r9
movq %rsi, 0x70(%rsp)
movq $0x0, 0x78(%rsp)
movq %rdi, 0x80(%rsp)
movl %r8d, 0x88(%rsp)
movq %r9, 0x90(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movl $0x1, 0xa4(%rsp)
movl %edx, 0xa8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xb0(%rsp)
movl 0x38(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0x98(%rsp)
cmpl $0x4, %eax
jne 0x18c5c7
movq %rcx, 0xb0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movl 0x34(%r10), %edx
movq 0x40(%r10), %rsi
imulq %r12, %rsi
movq 0x10(%r10), %rdi
imulq %rdi, %rsi
addq (%r10), %rsi
movl 0x18(%r10), %r8d
movq 0x20(%r10), %r9
movq %rsi, 0x100(%rsp)
movq $0x0, 0x108(%rsp)
movq %rdi, 0x110(%rsp)
movl %r8d, 0x118(%rsp)
movq %r9, 0x120(%rsp)
movl %eax, 0x12c(%rsp)
movl %ecx, 0x130(%rsp)
movl $0x1, 0x134(%rsp)
movl %edx, 0x138(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x140(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x128(%rsp)
cmpl $0x4, %eax
jne 0x18c67a
movq %rcx, 0x140(%rsp)
movl 0xc8(%rsp), %edx
movl 0x68(%rsp), %ecx
movq %r14, %rdi
movq %r15, %rsi
callq 0x188901
movq 0x8(%rsp), %rax
movslq 0x38(%rax), %rax
incq %r12
cmpq %rax, %r12
jl 0x18c517
xorl %ebp, %ebp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18b73d
lock
decl (%rax)
jne 0x18b73d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18c6d8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b73d
testq %rsi, %rsi
je 0x18b73d
jmp 0x18b620
testq %rsi, %rsi
je 0x18c6f3
movq %rsi, %rdi
callq 0x563b0
movq 0x78(%rsp), %rax
vmovaps 0x70(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x80(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x88(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0x98(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xa8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xb0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18c77e
lock
decl (%rax)
jne 0x18c77e
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x18c771
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18c77e
testq %rsi, %rsi
je 0x18c77e
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18c9ba
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18c9ba
movl 0x64(%rsp), %esi
movl 0x6c(%rsp), %r9d
movl 0xc4(%rsp), %ecx
movl 0xc0(%rsp), %eax
leal 0x7(%rax), %r8d
testl %eax, %eax
cmovnsl %eax, %r8d
sarl $0x3, %r8d
movq 0x8(%rsp), %rdi
movl %r9d, %edx
movq %r15, %r9
pushq 0x8(%r14)
pushq $0x8
callq 0x5a3a6
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18c9ba
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18c9ba
testl %eax, %eax
jle 0x18c9b8
movl 0xc4(%rsp), %ecx
xorl %r12d, %r12d
movl $0x2, %ebp
movabsq $0x100000001, %r13 # imm = 0x100000001
leaq 0x70(%rsp), %rdi
leaq 0x100(%rsp), %rsi
xorl %r14d, %r14d
testl %ecx, %ecx
jle 0x18c9a9
xorl %r15d, %r15d
movslq 0x3c(%rsp), %rax
movslq 0x40(%rsp), %rcx
movq 0x50(%rsp), %rdx
imulq %r14, %rdx
movq 0x20(%rsp), %rbx
imulq %rbx, %rdx
addq 0x10(%rsp), %rdx
movl 0x28(%rsp), %r11d
movq 0x30(%rsp), %r8
movq %rcx, %r9
imulq %rax, %r9
movl 0xe4(%rsp), %r10d
addl %r15d, %r10d
movslq %r10d, %r10
imulq %rbx, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x70(%rsp)
movq %r12, 0x78(%rsp)
movq %rbx, 0x80(%rsp)
movl %r11d, 0x88(%rsp)
movq %r8, 0x90(%rsp)
movl %ebp, 0x98(%rsp)
movl %eax, 0x9c(%rsp)
movl %ecx, 0xa0(%rsp)
movq %r13, 0xa4(%rsp)
movq %r9, 0xb0(%rsp)
movq 0x8(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rbx
imulq %rbx, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %r11d
movq 0x20(%r8), %r8
movq %rcx, %r9
imulq %rax, %r9
movq %r15, %r10
imulq %rbx, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x100(%rsp)
movq %r12, 0x108(%rsp)
movq %rbx, 0x110(%rsp)
movl %r11d, 0x118(%rsp)
movq %r8, 0x120(%rsp)
movl %ebp, 0x128(%rsp)
movl %eax, 0x12c(%rsp)
movl %ecx, 0x130(%rsp)
movq %r13, 0x134(%rsp)
movq %r9, 0x140(%rsp)
movl 0xc8(%rsp), %edx
movl 0x68(%rsp), %ecx
movq %rsi, %r12
movq %rdi, %rbx
callq 0x188901
movq %rbx, %rdi
movq %r12, %rsi
xorl %r12d, %r12d
movslq 0xc4(%rsp), %rcx
incq %r15
cmpq %rcx, %r15
jl 0x18c852
movq 0x8(%rsp), %rax
movl 0x38(%rax), %eax
incq %r14
movslq %eax, %rdx
cmpq %rdx, %r14
jl 0x18c847
xorl %ebp, %ebp
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18b73d
lock
decl (%rax)
jne 0x18b73d
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x18c9eb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18b73d
testq %rsi, %rsi
je 0x18b73d
jmp 0x18b620
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18cc3c
lock
decl (%rax)
jne 0x18cc3c
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18ca3b
testq %rsi, %rsi
je 0x18cc3c
movq %rsi, %rdi
callq 0x563b0
jmp 0x18cc3c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18cc3c
jmp 0x18d104
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18cc83
lock
decl (%rax)
jne 0x18cc83
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18ca8d
testq %rsi, %rsi
je 0x18cc83
movq %rsi, %rdi
callq 0x563b0
jmp 0x18cc83
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18cc83
jmp 0x18d104
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18ccca
lock
decl (%rax)
jne 0x18ccca
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cadf
testq %rsi, %rsi
je 0x18ccca
movq %rsi, %rdi
callq 0x563b0
jmp 0x18ccca
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18ccca
jmp 0x18d104
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18cd58
lock
decl (%rax)
jne 0x18cd58
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cb31
testq %rsi, %rsi
je 0x18cd58
movq %rsi, %rdi
callq 0x563b0
jmp 0x18cd58
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18cd58
jmp 0x18d104
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18cd11
lock
decl (%rax)
jne 0x18cd11
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cb83
testq %rsi, %rsi
je 0x18cd11
movq %rsi, %rdi
callq 0x563b0
jmp 0x18cd11
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18cd11
jmp 0x18d104
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18cd9f
lock
decl (%rax)
jne 0x18cd9f
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cbd5
testq %rsi, %rsi
je 0x18cd9f
movq %rsi, %rdi
callq 0x563b0
jmp 0x18cd9f
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18cd9f
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18cc39
jmp 0x18cc80
jmp 0x18ccc7
jmp 0x18cd55
jmp 0x18cd0e
jmp 0x18cd9c
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cc70
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18ccb7
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18ccfe
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cd45
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cd8c
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cdd3
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18ce49
lock
decl (%rax)
jne 0x18ce49
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18ce43
testq %rsi, %rsi
je 0x18ce49
movq %rsi, %rdi
callq 0x563b0
jmp 0x18ce49
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18ce7d
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
jmp 0x18d104
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cec9
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18cf12
lock
decl (%rax)
jne 0x18cf12
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cf0c
testq %rsi, %rsi
je 0x18cf12
movq %rsi, %rdi
callq 0x563b0
jmp 0x18cf12
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cf46
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
jmp 0x18d104
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cf92
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18cfdb
lock
decl (%rax)
jne 0x18cfdb
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18cfd5
testq %rsi, %rsi
je 0x18cfdb
movq %rsi, %rdi
callq 0x563b0
jmp 0x18cfdb
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18d00f
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
jmp 0x18d104
movq %rax, %rbx
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18d05b
testq %rsi, %rsi
je 0x18d0fa
jmp 0x18d0ea
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d0fa
jmp 0x18d104
jmp 0x18d104
jmp 0x18d104
jmp 0x18d07c
movq %rax, %rbx
jmp 0x18d0c7
movq %rax, %rbx
jmp 0x18d091
movq %rax, %rbx
leaq 0x100(%rsp), %rdi
callq 0x678ac
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x18d0c7
lock
decl (%rax)
jne 0x18d0c7
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x18d0c1
testq %rsi, %rsi
je 0x18d0c7
movq %rsi, %rdi
callq 0x563b0
jmp 0x18d0c7
movq (%rdi), %rax
callq *0x18(%rax)
movq 0x18(%rsp), %rax
testq %rax, %rax
je 0x18d0fa
lock
decl (%rax)
jne 0x18d0fa
movq 0x10(%rsp), %rsi
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
jne 0x18d0f4
testq %rsi, %rsi
je 0x18d0fa
movq %rsi, %rdi
callq 0x563b0
jmp 0x18d0fa
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
jmp 0x18d104
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_avx512.cpp
|
ncnn::Crop_x86_fma::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Crop_x86_fma::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 16 == 0 ? 16 : _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 16 == 0 ? 16 : _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Crop::forward(bottom_blob_unpacked, top_blob, opt);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x148, %rsp # imm = 0x148
movq %rcx, 0x78(%rsp)
movq %rdx, 0x8(%rsp)
movq %rsi, %r12
movq %rdi, 0x140(%rsp)
movl 0x2c(%rsi), %r15d
movslq %r15d, %rax
movl 0x30(%rsi), %r14d
movslq %r14d, %rcx
movl 0x34(%rsi), %edx
movl %edx, 0xf4(%rsp)
movslq %edx, %rdx
movl 0x38(%rsi), %esi
movq %rsi, 0xe0(%rsp)
movl 0x28(%r12), %ebp
movq 0x10(%r12), %rsi
movl 0x18(%r12), %edi
cmpl $0x4, %edi
je 0x18d253
cmpl $0x8, %edi
jne 0x18e26d
movl %edi, 0xec(%rsp)
movq %rsi, 0xd8(%rsp)
movq 0x140(%rsp), %r8
movq (%r8), %rsi
movq -0x18(%rsi), %rdi
addq %r8, %rdi
decl %ebp
cmpl $0x3, %ebp
ja 0x18d50e
leaq 0x28c750(%rip), %rsi # 0x419950
movslq (%rsi,%rbp,4), %r8
addq %rsi, %r8
jmpq *%r8
shlq $0x3, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %ecx
movl %ecx, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x50(%rsp)
movl %ecx, 0x58(%rsp)
jmp 0x18d3eb
movl %edi, 0xec(%rsp)
movq %rsi, 0xd8(%rsp)
movq 0x140(%rsp), %r8
movq (%r8), %rsi
movq -0x18(%rsi), %rdi
addq %r8, %rdi
decl %ebp
cmpl $0x3, %ebp
ja 0x18d5ab
leaq 0x28c6aa(%rip), %rsi # 0x419930
movslq (%rsi,%rbp,4), %r8
addq %rsi, %r8
jmpq *%r8
shlq $0x2, %rax
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %ecx
movl %ecx, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x50(%rsp)
movl %ecx, 0x58(%rsp)
jmp 0x18d4af
movq 0xe0(%rsp), %rdx
leal (,%rdx,8), %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %esi
movl %esi, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x3, 0x48(%rsp)
movl %r15d, 0x4c(%rsp)
movl %r14d, 0x50(%rsp)
movl %esi, 0x54(%rsp)
movl %edx, 0x58(%rsp)
imulq %rax, %rcx
jmp 0x18d3da
movq 0xe0(%rsp), %rdx
leal (,%rdx,4), %edx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, %esi
movl %esi, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x3, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movl %esi, 0x54(%rsp)
movl %edx, 0x58(%rsp)
imulq %rax, %rcx
jmp 0x18d49e
movq 0xe0(%rsp), %rsi
leal (,%rsi,8), %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x4, 0x48(%rsp)
movl %r15d, 0x4c(%rsp)
movl %r14d, 0x50(%rsp)
movl %edx, 0x54(%rsp)
movl %esi, 0x58(%rsp)
imulq %rax, %rcx
imulq %rdx, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x60(%rsp)
jmp 0x18d533
shlq $0x3, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x2, 0x48(%rsp)
movl %r15d, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x54(%rsp)
imulq %rax, %rcx
movq %rcx, 0x60(%rsp)
jmp 0x18d533
movq 0xe0(%rsp), %rsi
leal (,%rsi,4), %esi
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x4, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movl %edx, 0x54(%rsp)
movl %esi, 0x58(%rsp)
imulq %rax, %rcx
imulq %rdx, %rcx
addq $0x3, %rcx
movabsq $0x3ffffffffffffffc, %rax # imm = 0x3FFFFFFFFFFFFFFC
andq %rcx, %rax
movq %rax, 0x60(%rsp)
jmp 0x18d5d0
shlq $0x2, %rcx
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x4, 0x30(%rsp)
movl $0x1, 0x38(%rsp)
movq $0x0, 0x40(%rsp)
movl $0x2, 0x48(%rsp)
movl %eax, 0x4c(%rsp)
movl %ecx, 0x50(%rsp)
movabsq $0x100000001, %rdx # imm = 0x100000001
movq %rdx, 0x54(%rsp)
imulq %rax, %rcx
movq %rcx, 0x60(%rsp)
jmp 0x18d5d0
movq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
leaq 0x6c(%rsp), %rax
leaq 0x70(%rsp), %r10
leaq 0x18(%rsp), %r11
leaq 0x14(%rsp), %r13
leaq 0x20(%rsp), %rsi
leaq 0x1c(%rsp), %rdx
leaq 0x74(%rsp), %rcx
leaq 0xf0(%rsp), %r8
leaq 0xd4(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r13
callq 0x180bfc
addq $0x20, %rsp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18d655
lock
decl (%rax)
jne 0x18d655
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18d648
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18d655
movq $0x0, 0x60(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups %xmm0, 0x2c(%rsp)
vmovaps %xmm0, 0x40(%rsp)
vmovups %xmm0, 0x4c(%rsp)
leaq 0x6c(%rsp), %rax
leaq 0x70(%rsp), %r10
leaq 0x18(%rsp), %r11
leaq 0x14(%rsp), %r13
leaq 0x20(%rsp), %rsi
leaq 0x1c(%rsp), %rdx
leaq 0x74(%rsp), %rcx
leaq 0xf0(%rsp), %r8
leaq 0xd4(%rsp), %r9
pushq %rax
pushq %r10
pushq %r11
pushq %r13
callq 0x180bfc
addq $0x20, %rsp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18dc68
lock
decl (%rax)
jne 0x18dc68
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18dc5b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18dc68
testq %rsi, %rsi
je 0x18d655
movq %rsi, %rdi
callq 0x563b0
cmpl $0x3, %ebp
ja 0x18e266
leaq 0x28c2fb(%rip), %rax # 0x419960
movslq (%rax,%rbp,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movl 0x14(%rsp), %esi
movl %esi, %edi
andl $0x7, %edi
xorl %eax, %eax
testb $0x3, %sil
sete %al
movq 0xd8(%rsp), %r9
shrq $0x3, %r9
leal (%rax,%rax), %ecx
testl %edi, %edi
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movzbl %cl, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r9
movl %esi, %eax
cltd
idivl %r8d
xorl %eax, %r15d
orl %edi, %r15d
jne 0x18e1e3
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18e475
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18d6d9
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e40e
lock
decl (%rax)
jne 0x18e40e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18df35
testq %rsi, %rsi
je 0x18e40e
jmp 0x18e406
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xd8(%rsp), %r13
shrq $0x3, %r13
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r13
movl 0x14(%rsp), %esi
xorl %esi, %r15d
movl 0x18(%rsp), %r8d
xorl %r8d, %r14d
orl %r15d, %r14d
sete %r9b
jne 0x18d999
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %ecx
cmovnel %eax, %ecx
movl %edi, %eax
cltd
idivl %ecx
testb $0x7, %dil
jne 0x18d999
cmpl 0xe0(%rsp), %eax
jne 0x18d999
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18e475
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18d7ad
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e40e
lock
decl (%rax)
jne 0x18e40e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18df35
testq %rsi, %rsi
je 0x18e40e
jmp 0x18e406
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xd8(%rsp), %r13
shrq $0x3, %r13
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %r13
movl 0x14(%rsp), %esi
xorl %esi, %r15d
movl 0x18(%rsp), %r9d
xorl %r9d, %r14d
orl %r15d, %r14d
movl 0x70(%rsp), %ecx
movl 0xf4(%rsp), %eax
xorl %ecx, %eax
orl %r14d, %eax
sete %r10b
jne 0x18dab8
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movl %edi, %eax
cltd
idivl %r8d
testb $0x7, %dil
jne 0x18dab8
cmpl 0xe0(%rsp), %eax
jne 0x18dab8
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18e475
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18d894
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e40e
lock
decl (%rax)
jne 0x18e40e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18df35
testq %rsi, %rsi
je 0x18e40e
jmp 0x18e406
movl 0x18(%rsp), %edi
testb $0x3, %dil
sete %al
movq 0xd8(%rsp), %rdx
shrq $0x3, %rdx
addb %al, %al
testb $0x7, %dil
movzbl %al, %eax
movl $0x3, %ecx
cmovnel %eax, %ecx
shlq %cl, %rdx
movq %rdx, %rcx
movl 0x14(%rsp), %esi
cmpl %r15d, %esi
jne 0x18dbdb
xorl %eax, %eax
testb $0x3, %dil
sete %al
testb $0x7, %dil
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movl %edi, %eax
cltd
idivl %r8d
testb $0x7, %dil
jne 0x18dbdb
cmpl %r14d, %eax
jne 0x18dbdb
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18e475
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18d95b
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e40e
lock
decl (%rax)
jne 0x18e40e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18df35
testq %rsi, %rsi
je 0x18e40e
jmp 0x18e406
movl 0xd4(%rsp), %eax
movl %eax, %ecx
orl %edi, %ecx
testb $0x7, %cl
jne 0x18e266
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
leal 0x7(%rdi), %ecx
testl %edi, %edi
cmovnsl %edi, %ecx
sarl $0x3, %ecx
movslq %edx, %rax
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r10
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %rdi, 0x30(%rsp)
movl %edx, 0x38(%rsp)
movq %r10, 0x40(%rsp)
movl %ecx, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r10
imulq %rdx, %r10
imulq %rdi, %rax
imulq %r10, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %r9b, %r9b
movq 0x78(%rsp), %r14
je 0x18eba1
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x18eb2a
testq %rax, %rax
je 0x18da7d
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18eaca
lock
decl (%rax)
jne 0x18eaca
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18eabd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18eaca
movl 0xd4(%rsp), %eax
movl %eax, %edx
orl %edi, %edx
testb $0x7, %dl
jne 0x18e266
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
leal 0x7(%rdi), %r8d
testl %edi, %edi
cmovnsl %edi, %r8d
sarl $0x3, %r8d
movslq %edx, %rax
imulq 0x40(%r12), %rax
movq 0x10(%r12), %rdi
imulq %rdi, %rax
addq (%r12), %rax
movl 0x18(%r12), %edx
movq 0x20(%r12), %r11
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %rdi, 0x30(%rsp)
movl %edx, 0x38(%rsp)
movq %r11, 0x40(%rsp)
movl %r8d, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r11
imulq %rdx, %r11
imulq %rdi, %rax
imulq %r11, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %r10b, %r10b
je 0x18e3e3
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x18e3f7
testq %rax, %rax
movq 0x78(%rsp), %r14
je 0x18dba0
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18edeb
lock
decl (%rax)
jne 0x18edeb
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18edde
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18edeb
movl 0x74(%rsp), %eax
orl %edi, %eax
testb $0x7, %al
jne 0x18e266
leal 0x7(%rdi), %edx
testl %edi, %edi
cmovnsl %edi, %edx
sarl $0x3, %edx
movq 0x78(%rsp), %rax
movq 0x8(%rax), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl $0x8, %r8d
callq 0x5a14a
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18e475
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18e475
movl 0x74(%rsp), %eax
leal 0x7(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x3, %edx
movl 0x1c(%rsp), %ecx
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x18f469
jmp 0x18e1dc
testq %rsi, %rsi
je 0x18dc68
movq %rsi, %rdi
callq 0x563b0
cmpl $0x3, %ebp
ja 0x18e266
leaq 0x28bcc8(%rip), %rax # 0x419940
movslq (%rax,%rbp,4), %rcx
addq %rax, %rcx
jmpq *%rcx
movl 0x14(%rsp), %esi
xorl %ecx, %ecx
movl %esi, %edi
andl $0x3, %edi
sete %cl
leal (%rcx,%rcx,2), %r9d
incl %r9d
movq 0xd8(%rsp), %r10
shrq $0x2, %r10
addb %cl, %cl
shlq %cl, %r10
movl %esi, %eax
cltd
idivl %r9d
xorl %eax, %r15d
orl %edi, %r15d
jne 0x18e258
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18e475
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18dcd3
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e40e
lock
decl (%rax)
jne 0x18e40e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18df35
testq %rsi, %rsi
je 0x18e40e
jmp 0x18e406
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %cl
movq 0xd8(%rsp), %r13
shrq $0x2, %r13
addb %cl, %cl
shlq %cl, %r13
movl 0x14(%rsp), %esi
xorl %esi, %r15d
movl 0x18(%rsp), %r8d
xorl %r8d, %r14d
orl %r15d, %r14d
sete %cl
jne 0x18df40
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %r9d
incl %r9d
movl %edi, %eax
cltd
idivl %r9d
testb $0x3, %dil
jne 0x18df40
cmpl 0xe0(%rsp), %eax
jne 0x18df40
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18e475
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18dd8f
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e40e
lock
decl (%rax)
jne 0x18e40e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18df35
testq %rsi, %rsi
je 0x18e40e
jmp 0x18e406
movl 0x6c(%rsp), %edi
testb $0x3, %dil
sete %cl
movq 0xd8(%rsp), %r13
shrq $0x2, %r13
addb %cl, %cl
shlq %cl, %r13
movl 0x14(%rsp), %esi
xorl %esi, %r15d
movl 0x18(%rsp), %r9d
xorl %r9d, %r14d
orl %r15d, %r14d
movl 0x70(%rsp), %ecx
movl 0xf4(%rsp), %eax
xorl %ecx, %eax
orl %r14d, %eax
sete %r8b
jne 0x18e050
xorl %eax, %eax
testb $0x3, %dil
sete %al
leal (%rax,%rax,2), %r10d
incl %r10d
movl %edi, %eax
cltd
idivl %r10d
testb $0x3, %dil
jne 0x18e050
cmpl 0xe0(%rsp), %eax
jne 0x18e050
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18e475
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18de5c
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e40e
lock
decl (%rax)
jne 0x18e40e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
jne 0x18df35
testq %rsi, %rsi
je 0x18e40e
jmp 0x18e406
movl 0x18(%rsp), %r8d
testb $0x3, %r8b
sete %cl
movq 0xd8(%rsp), %rdi
shrq $0x2, %rdi
addb %cl, %cl
shlq %cl, %rdi
movl 0x14(%rsp), %esi
cmpl %r15d, %esi
jne 0x18e161
xorl %eax, %eax
testb $0x3, %r8b
sete %al
leal (%rax,%rax,2), %ecx
incl %ecx
movl %r8d, %eax
cltd
idivl %ecx
testb $0x3, %r8b
jne 0x18e161
cmpl %r14d, %eax
jne 0x18e161
xorl %ebp, %ebp
cmpq %r12, 0x8(%rsp)
je 0x18e475
movq 0x8(%r12), %rax
testq %rax, %rax
je 0x18df05
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e40e
lock
decl (%rax)
jne 0x18e40e
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18e401
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e40e
movl 0xd4(%rsp), %eax
movl %eax, %edx
orl %edi, %edx
testb $0x3, %dl
jne 0x18e266
sarl $0x2, %eax
movl %edi, %edx
sarl $0x2, %edx
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %r9
imulq %r9, %rax
addq (%r12), %rax
movl 0x18(%r12), %r10d
movq 0x20(%r12), %r11
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %r9, 0x30(%rsp)
movl %r10d, 0x38(%rsp)
movq %r11, 0x40(%rsp)
movl %edx, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r10
imulq %rdx, %r10
imulq %r9, %rax
imulq %r10, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r9
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %cl, %cl
movq 0x78(%rsp), %r14
je 0x18e562
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x18e4f6
testq %rax, %rax
je 0x18e015
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e496
lock
decl (%rax)
jne 0x18e496
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18e489
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e496
movl 0xd4(%rsp), %eax
movl %eax, %edx
orl %edi, %edx
testb $0x3, %dl
jne 0x18e266
sarl $0x2, %eax
movl %edi, %edx
sarl $0x2, %edx
cltq
imulq 0x40(%r12), %rax
movq 0x10(%r12), %r10
imulq %r10, %rax
addq (%r12), %rax
movl 0x18(%r12), %r11d
movq 0x20(%r12), %r14
movq %rax, 0x20(%rsp)
movq $0x0, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movl %r11d, 0x38(%rsp)
movq %r14, 0x40(%rsp)
movl %edx, 0x58(%rsp)
vmovups 0x28(%r12), %xmm0
movslq 0x34(%r12), %rax
movslq 0x2c(%r12), %rdx
movslq 0x30(%r12), %r11
imulq %rdx, %r11
imulq %r10, %rax
imulq %r11, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %r10
movq %rax, 0x60(%rsp)
vmovups %xmm0, 0x48(%rsp)
testb %r8b, %r8b
je 0x18e370
leaq 0x80(%rsp), %r12
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
xorl %edx, %edx
callq 0x59e38
movq 0x88(%rsp), %rax
cmpq 0x8(%rsp), %r12
je 0x18e3ed
testq %rax, %rax
movq 0x78(%rsp), %r14
je 0x18e126
lock
incl (%rax)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
testq %rax, %rax
je 0x18e7b7
lock
decl (%rax)
jne 0x18e7b7
movq 0x8(%rsp), %rax
movq (%rax), %rsi
movq 0x20(%rax), %rdi
testq %rdi, %rdi
je 0x18e7aa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e7b7
movl 0x74(%rsp), %eax
orl %r8d, %eax
testb $0x3, %al
jne 0x18e266
movq %rdi, %rcx
sarl $0x2, %r8d
movq 0x78(%rsp), %rax
movq 0x8(%rax), %r9
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %r8d, %edx
movl $0x4, %r8d
callq 0x5a14a
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18e475
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18e475
movl 0x74(%rsp), %eax
leal 0x3(%rax), %edx
testl %eax, %eax
cmovnsl %eax, %edx
sarl $0x2, %edx
movl 0x1c(%rsp), %ecx
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x18f4ca
xorl %ebp, %ebp
jmp 0x18e475
orl 0x1c(%rsp), %esi
testb $0x7, %sil
jne 0x18e266
movq 0x78(%rsp), %rcx
movq 0x8(%rcx), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %r9, %rdx
movl $0x8, %ecx
callq 0x5a03c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18e475
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18e475
movl 0x1c(%rsp), %eax
leal 0x7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x3, %ecx
xorl %ebp, %ebp
movq %r12, %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq 0x18f469
jmp 0x18e475
orl 0x1c(%rsp), %esi
testb $0x3, %sil
je 0x18e37a
movl 0xec(%rsp), %edi
movq 0x8(%r12), %rax
vmovups (%r12), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq 0x10(%r12), %rcx
movq %rcx, 0x30(%rsp)
movl 0x18(%r12), %ecx
movl %ecx, 0x38(%rsp)
movq 0x20(%r12), %rcx
movq %rcx, 0x40(%rsp)
vmovups 0x28(%r12), %xmm0
vmovups %xmm0, 0x48(%rsp)
movl 0x38(%r12), %ecx
movl %ecx, 0x58(%rsp)
movq 0x40(%r12), %rcx
movq %rcx, 0x60(%rsp)
testq %rax, %rax
je 0x18e2c3
lock
incl (%rax)
cmpl $0x1, %edi
movq 0x78(%rsp), %r14
je 0x18e306
vmovups (%r14), %ymm0
vmovups 0x20(%r14), %ymm1
leaq 0x80(%rsp), %rcx
vmovups %ymm1, 0x20(%rcx)
vmovups %ymm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
leaq 0x20(%rsp), %rsi
movq %r12, %rdi
movl $0x1, %edx
vzeroupper
callq 0x5c97e
movq 0x140(%rsp), %rdi
movq (%rdi), %rax
addq -0x18(%rax), %rdi
leaq 0x20(%rsp), %rsi
movq 0x8(%rsp), %rdx
movq %r14, %rcx
callq 0x17fb2e
movl %eax, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18e475
lock
decl (%rax)
jne 0x18e475
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18e35a
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e475
testq %rsi, %rsi
je 0x18e475
movq %rsi, %rdi
callq 0x563b0
jmp 0x18e475
movq 0x78(%rsp), %r14
jmp 0x18e887
movq 0x78(%rsp), %rcx
movq 0x8(%rcx), %r8
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
movl %eax, %esi
movq %r10, %rdx
movl %r9d, %ecx
callq 0x5a03c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x18e475
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18e475
movl 0x1c(%rsp), %eax
leal 0x3(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x2, %ecx
xorl %ebp, %ebp
movq %r12, %rdi
movq 0x8(%rsp), %rsi
xorl %edx, %edx
callq 0x18f4ca
jmp 0x18e475
movq 0x78(%rsp), %r14
jmp 0x18eec9
movq 0x78(%rsp), %r14
jmp 0x18e817
movq 0x78(%rsp), %r14
jmp 0x18ee4b
testq %rsi, %rsi
je 0x18e40e
movq %rsi, %rdi
callq 0x563b0
movq 0x8(%rsp), %rcx
movq $0x0, 0x40(%rcx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%rcx)
vmovups %xmm0, 0xc(%rcx)
vmovups %xmm0, 0x28(%rcx)
movl $0x0, 0x38(%rcx)
vmovups (%r12), %xmm0
vmovups %xmm0, (%rcx)
movq 0x10(%r12), %rax
movq %rax, 0x10(%rcx)
movl 0x18(%r12), %eax
movl %eax, 0x18(%rcx)
movq 0x20(%r12), %rax
movq %rax, 0x20(%rcx)
vmovups 0x28(%r12), %xmm0
vmovups %xmm0, 0x28(%rcx)
movl 0x38(%r12), %eax
movl %eax, 0x38(%rcx)
movq 0x40(%r12), %rax
movq %rax, 0x40(%rcx)
movl %ebp, %eax
addq $0x148, %rsp # imm = 0x148
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
testq %rsi, %rsi
je 0x18e496
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18e52a
lock
decl (%rax)
jne 0x18e52a
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x18e51d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e52a
testq %rsi, %rsi
je 0x18e52a
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18e76b
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18e76b
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r8d
movl 0x6c(%rsp), %edi
leal 0x3(%rdi), %ecx
testl %edi, %edi
cmovnsl %edi, %ecx
sarl $0x2, %ecx
subq $0x8, %rsp
movq 0x10(%rsp), %rdi
movl %r8d, %edx
movq %r13, %r8
movl $0x4, %r9d
pushq 0x8(%r14)
callq 0x5a266
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18e76b
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18e76b
testl %eax, %eax
jle 0x18e769
xorl %r12d, %r12d
leaq 0x80(%rsp), %r14
leaq 0xf8(%rsp), %r15
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movl 0x54(%rsp), %edx
movq 0x60(%rsp), %rsi
imulq %r12, %rsi
movq 0x30(%rsp), %rdi
imulq %rdi, %rsi
addq 0x20(%rsp), %rsi
movl 0x38(%rsp), %r8d
movq 0x40(%rsp), %r9
movq %rsi, 0x80(%rsp)
movq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %r8d, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %edx, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x48(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0xa8(%rsp)
cmpl $0x4, %eax
jne 0x18e68e
movq %rcx, 0xc0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movl 0x34(%r10), %edx
movq 0x40(%r10), %rsi
imulq %r12, %rsi
movq 0x10(%r10), %rdi
imulq %rdi, %rsi
addq (%r10), %rsi
movl 0x18(%r10), %r8d
movq 0x20(%r10), %r9
movq %rsi, 0xf8(%rsp)
movq $0x0, 0x100(%rsp)
movq %rdi, 0x108(%rsp)
movl %r8d, 0x110(%rsp)
movq %r9, 0x118(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movl $0x1, 0x12c(%rsp)
movl %edx, 0x130(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x138(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x120(%rsp)
cmpl $0x4, %eax
jne 0x18e741
movq %rcx, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %r14, %rdi
movq %r15, %rsi
callq 0x18f4ca
movq 0x8(%rsp), %rax
movslq 0x38(%rax), %rax
incq %r12
cmpq %rax, %r12
jl 0x18e5d8
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18e475
lock
decl (%rax)
jne 0x18e475
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18e79c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e475
testq %rsi, %rsi
je 0x18e475
jmp 0x18e363
testq %rsi, %rsi
je 0x18e7b7
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18e84b
lock
decl (%rax)
jne 0x18e84b
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x18e83e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e84b
testq %rsi, %rsi
je 0x18e84b
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18ea7e
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18ea7e
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r9d
movl 0x70(%rsp), %ecx
movl 0x6c(%rsp), %edi
leal 0x3(%rdi), %r8d
testl %edi, %edi
cmovnsl %edi, %r8d
sarl $0x2, %r8d
movq 0x8(%rsp), %rdi
movl %r9d, %edx
movq %r13, %r9
pushq 0x8(%r14)
pushq $0x4
callq 0x5a3a6
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18ea7e
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18ea7e
testl %eax, %eax
jle 0x18ea7c
movl 0x70(%rsp), %ecx
xorl %r12d, %r12d
movabsq $0x100000001, %r13 # imm = 0x100000001
leaq 0x80(%rsp), %rdi
leaq 0xf8(%rsp), %rbp
xorl %r14d, %r14d
testl %ecx, %ecx
jle 0x18ea6d
xorl %r15d, %r15d
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movq 0x60(%rsp), %rdx
imulq %r14, %rdx
movq 0x30(%rsp), %rsi
imulq %rsi, %rdx
addq 0x20(%rsp), %rdx
movl 0x38(%rsp), %r11d
movq 0x40(%rsp), %r8
movq %rcx, %r9
imulq %rax, %r9
movl 0xf0(%rsp), %r10d
addl %r15d, %r10d
movslq %r10d, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x80(%rsp)
movq %r12, 0x88(%rsp)
movq %rsi, 0x90(%rsp)
movl %r11d, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl $0x2, %ebx
movl %ebx, 0xa8(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movq %r13, 0xb4(%rsp)
movq %r9, 0xc0(%rsp)
movq 0x8(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rsi
imulq %rsi, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %r11d
movq 0x20(%r8), %r8
movq %rcx, %r9
imulq %rax, %r9
movq %r15, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0xf8(%rsp)
movq %r12, 0x100(%rsp)
movq %rsi, 0x108(%rsp)
movl %r11d, 0x110(%rsp)
movq %r8, 0x118(%rsp)
movl %ebx, 0x120(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movq %r13, 0x12c(%rsp)
movq %r9, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %rbp, %rsi
movq %rdi, %rbx
callq 0x18f4ca
movq %rbx, %rdi
xorl %r12d, %r12d
movslq 0x70(%rsp), %rcx
incq %r15
cmpq %rcx, %r15
jl 0x18e914
movq 0x8(%rsp), %rax
movl 0x38(%rax), %eax
incq %r14
movslq %eax, %rdx
cmpq %rdx, %r14
jl 0x18e909
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18e475
lock
decl (%rax)
jne 0x18e475
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18eaaf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e475
testq %rsi, %rsi
je 0x18e475
jmp 0x18e363
testq %rsi, %rsi
je 0x18eaca
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18eb5e
lock
decl (%rax)
jne 0x18eb5e
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x18eb51
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18eb5e
testq %rsi, %rsi
je 0x18eb5e
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18ed9f
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18ed9f
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r8d
movl 0x6c(%rsp), %eax
leal 0x7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $0x3, %ecx
subq $0x8, %rsp
movq 0x10(%rsp), %rdi
movl %r8d, %edx
movq %r13, %r8
movl $0x8, %r9d
pushq 0x8(%r14)
callq 0x5a266
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18ed9f
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18ed9f
testl %eax, %eax
jle 0x18ed9d
xorl %r12d, %r12d
leaq 0x80(%rsp), %r14
leaq 0xf8(%rsp), %r15
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movl 0x54(%rsp), %edx
movq 0x60(%rsp), %rsi
imulq %r12, %rsi
movq 0x30(%rsp), %rdi
imulq %rdi, %rsi
addq 0x20(%rsp), %rsi
movl 0x38(%rsp), %r8d
movq 0x40(%rsp), %r9
movq %rsi, 0x80(%rsp)
movq $0x0, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movl %r8d, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movl $0x1, 0xb4(%rsp)
movl %edx, 0xb8(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0xc0(%rsp)
movl 0x48(%rsp), %eax
leal -0x1(%rax), %edx
movl %edx, 0xa8(%rsp)
cmpl $0x4, %eax
jne 0x18ecc2
movq %rcx, 0xc0(%rsp)
movq 0x8(%rsp), %r10
movslq 0x2c(%r10), %rax
movslq 0x30(%r10), %rcx
movl 0x34(%r10), %edx
movq 0x40(%r10), %rsi
imulq %r12, %rsi
movq 0x10(%r10), %rdi
imulq %rdi, %rsi
addq (%r10), %rsi
movl 0x18(%r10), %r8d
movq 0x20(%r10), %r9
movq %rsi, 0xf8(%rsp)
movq $0x0, 0x100(%rsp)
movq %rdi, 0x108(%rsp)
movl %r8d, 0x110(%rsp)
movq %r9, 0x118(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movl $0x1, 0x12c(%rsp)
movl %edx, 0x130(%rsp)
imulq %rax, %rcx
movq %rdi, %rax
imulq %rcx, %rax
addq $0xf, %rax
andq $-0x10, %rax
xorl %edx, %edx
divq %rdi
movq %rax, 0x138(%rsp)
movl 0x28(%r10), %eax
leal -0x1(%rax), %edx
movl %edx, 0x120(%rsp)
cmpl $0x4, %eax
jne 0x18ed75
movq %rcx, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %r14, %rdi
movq %r15, %rsi
callq 0x18f469
movq 0x8(%rsp), %rax
movslq 0x38(%rax), %rax
incq %r12
cmpq %rax, %r12
jl 0x18ec0c
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18e475
lock
decl (%rax)
jne 0x18e475
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18edd0
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e475
testq %rsi, %rsi
je 0x18e475
jmp 0x18e363
testq %rsi, %rsi
je 0x18edeb
movq %rsi, %rdi
callq 0x563b0
movq 0x88(%rsp), %rax
vmovaps 0x80(%rsp), %xmm0
movq 0x8(%rsp), %rdx
vmovups %xmm0, (%rdx)
movq 0x90(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movl 0x98(%rsp), %ecx
movl %ecx, 0x18(%rdx)
movq 0xa0(%rsp), %rcx
movq %rcx, 0x20(%rdx)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x28(%rdx)
movl 0xb8(%rsp), %ecx
movl %ecx, 0x38(%rdx)
movq 0xc0(%rsp), %rcx
movq %rcx, 0x40(%rdx)
testq %rax, %rax
je 0x18ee7f
lock
decl (%rax)
jne 0x18ee7f
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x18ee72
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18ee7f
testq %rsi, %rsi
je 0x18ee7f
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18f0b5
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x18f0b5
movl 0x14(%rsp), %esi
movl 0x18(%rsp), %r9d
movl 0x70(%rsp), %ecx
movl 0x6c(%rsp), %eax
leal 0x7(%rax), %r8d
testl %eax, %eax
cmovnsl %eax, %r8d
sarl $0x3, %r8d
movq 0x8(%rsp), %rdi
movl %r9d, %edx
movq %r13, %r9
pushq 0x8(%r14)
pushq $0x8
callq 0x5a3a6
addq $0x10, %rsp
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x8(%rsp), %rax
cmpq $0x0, (%rax)
je 0x18f0b5
movq 0x8(%rsp), %rcx
movslq 0x38(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
testq %rcx, %rcx
je 0x18f0b5
testl %eax, %eax
jle 0x18f0b3
movl 0x70(%rsp), %ecx
xorl %r12d, %r12d
movabsq $0x100000001, %rbx # imm = 0x100000001
leaq 0x80(%rsp), %rdi
leaq 0xf8(%rsp), %rbp
xorl %r14d, %r14d
movl $0x2, %r13d
testl %ecx, %ecx
jle 0x18f0a4
xorl %r15d, %r15d
movslq 0x4c(%rsp), %rax
movslq 0x50(%rsp), %rcx
movq 0x60(%rsp), %rdx
imulq %r14, %rdx
movq 0x30(%rsp), %rsi
imulq %rsi, %rdx
addq 0x20(%rsp), %rdx
movl 0x38(%rsp), %r11d
movq 0x40(%rsp), %r8
movq %rcx, %r9
imulq %rax, %r9
movl 0xf0(%rsp), %r10d
addl %r15d, %r10d
movslq %r10d, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0x80(%rsp)
movq %r12, 0x88(%rsp)
movq %rsi, 0x90(%rsp)
movl %r11d, 0x98(%rsp)
movq %r8, 0xa0(%rsp)
movl %r13d, 0xa8(%rsp)
movl %eax, 0xac(%rsp)
movl %ecx, 0xb0(%rsp)
movq %rbx, 0xb4(%rsp)
movq %r9, 0xc0(%rsp)
movq 0x8(%rsp), %r8
movslq 0x2c(%r8), %rax
movslq 0x30(%r8), %rcx
movq 0x40(%r8), %rdx
imulq %r14, %rdx
movq 0x10(%r8), %rsi
imulq %rsi, %rdx
addq (%r8), %rdx
movl 0x18(%r8), %r11d
movq 0x20(%r8), %r8
movq %rcx, %r9
imulq %rax, %r9
movq %r15, %r10
imulq %rsi, %r10
imulq %r9, %r10
addq %rdx, %r10
movq %r10, 0xf8(%rsp)
movq %r12, 0x100(%rsp)
movq %rsi, 0x108(%rsp)
movl %r11d, 0x110(%rsp)
movq %r8, 0x118(%rsp)
movl %r13d, 0x120(%rsp)
movl %eax, 0x124(%rsp)
movl %ecx, 0x128(%rsp)
movq %rbx, 0x12c(%rsp)
movq %r9, 0x138(%rsp)
movl 0x74(%rsp), %edx
movl 0x1c(%rsp), %ecx
movq %rbp, %rsi
movq %rdi, %r12
callq 0x18f469
movq %r12, %rdi
xorl %r12d, %r12d
movslq 0x70(%rsp), %rcx
incq %r15
cmpq %rcx, %r15
jl 0x18ef4e
movq 0x8(%rsp), %rax
movl 0x38(%rax), %eax
incq %r14
movslq %eax, %rdx
cmpq %rdx, %r14
jl 0x18ef43
xorl %ebp, %ebp
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18e475
lock
decl (%rax)
jne 0x18e475
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x18f0e6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18e475
testq %rsi, %rsi
je 0x18e475
jmp 0x18e363
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x18f28d
lock
decl (%rax)
jne 0x18f28d
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f13c
testq %rsi, %rsi
je 0x18f28d
movq %rsi, %rdi
callq 0x563b0
jmp 0x18f28d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f28d
jmp 0x18f461
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x18f2d4
lock
decl (%rax)
jne 0x18f2d4
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f194
testq %rsi, %rsi
je 0x18f2d4
movq %rsi, %rdi
callq 0x563b0
jmp 0x18f2d4
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f2d4
jmp 0x18f461
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x18f31b
lock
decl (%rax)
jne 0x18f31b
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f1ec
testq %rsi, %rsi
je 0x18f31b
movq %rsi, %rdi
callq 0x563b0
jmp 0x18f31b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f31b
jmp 0x18f461
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x18f362
lock
decl (%rax)
jne 0x18f362
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f244
testq %rsi, %rsi
je 0x18f362
movq %rsi, %rdi
callq 0x563b0
jmp 0x18f362
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f362
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
jmp 0x18f28a
jmp 0x18f2d1
jmp 0x18f318
jmp 0x18f35f
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18f459
lock
decl (%rax)
jne 0x18f459
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f2c1
testq %rsi, %rsi
je 0x18f459
jmp 0x18f449
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f459
jmp 0x18f461
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18f459
lock
decl (%rax)
jne 0x18f459
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f308
testq %rsi, %rsi
je 0x18f459
jmp 0x18f449
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f459
jmp 0x18f461
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18f459
lock
decl (%rax)
jne 0x18f459
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f34f
testq %rsi, %rsi
je 0x18f459
jmp 0x18f449
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f459
jmp 0x18f461
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18f459
lock
decl (%rax)
jne 0x18f459
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f396
testq %rsi, %rsi
je 0x18f459
jmp 0x18f449
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f459
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
jmp 0x18f461
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18f459
lock
decl (%rax)
jne 0x18f459
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f3e5
testq %rsi, %rsi
je 0x18f459
jmp 0x18f449
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f459
jmp 0x18f461
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18f459
lock
decl (%rax)
jne 0x18f459
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f417
testq %rsi, %rsi
je 0x18f459
jmp 0x18f449
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x18f459
jmp 0x18f461
jmp 0x18f423
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x18f459
lock
decl (%rax)
jne 0x18f459
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x18f453
testq %rsi, %rsi
je 0x18f459
movq %rsi, %rdi
callq 0x563b0
jmp 0x18f459
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_fma.cpp
|
ncnn::crop_pack4_sse(ncnn::Mat const&, ncnn::Mat&, int, int)
|
static void crop_pack4_sse(const Mat& src, Mat& dst, int top, int left)
{
int w = dst.w;
int h = dst.h;
int right = src.w - dst.w - left;
const float* ptr = src.row(top) + left * 4;
float* outptr = dst;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
__m128 _p = _mm_loadu_ps(ptr);
_mm_storeu_ps(outptr, _p);
ptr += 4;
outptr += 4;
}
ptr += (left + right) * 4;
}
}
|
movl 0x30(%rsi), %eax
testl %eax, %eax
jle 0x18f527
movl 0x2c(%rsi), %r8d
movslq %r8d, %r10
movq (%rsi), %rsi
movslq 0x2c(%rdi), %r9
movslq %edx, %rdx
imulq %r9, %rdx
imulq 0x10(%rdi), %rdx
addq (%rdi), %rdx
shll $0x2, %ecx
movslq %ecx, %rcx
leaq (%rdx,%rcx,4), %rcx
subq %r10, %r9
shlq $0x2, %r9
xorl %edx, %edx
movl %r8d, %edi
testl %r8d, %r8d
jle 0x18f51d
vmovups (%rcx), %xmm0
vmovups %xmm0, (%rsi)
addq $0x10, %rcx
addq $0x10, %rsi
decl %edi
jne 0x18f509
leaq (%rcx,%r9,4), %rcx
incl %edx
cmpl %eax, %edx
jne 0x18f501
retq
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_fma.cpp
|
virtual thunk to ncnn::Crop_x86_fma::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Crop_x86_fma::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 16 == 0 ? 16 : _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 16 == 0 ? 16 : _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
resolve_crop_roi(bottom_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
if (dims == 1)
{
int out_elempack = _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
return Crop::forward(bottom_blob_unpacked, top_blob, opt);
}
|
movq (%rdi), %rax
addq -0x48(%rax), %rdi
jmp 0x18d160
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_fma.cpp
|
virtual thunk to ncnn::Crop_x86_fma::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const
|
int Crop_x86_fma::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& reference_blob = bottom_blobs[1];
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int ref_elempack = reference_blob.elempack;
Mat& top_blob = top_blobs[0];
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 16 == 0 ? 16 : _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 16 == 0 ? 16 : _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
Mat reference_blob_unpacked = reference_blob;
if (ref_elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(reference_blob, reference_blob_unpacked, 1, opt_pack1);
}
std::vector<Mat> bottom_blobs_unpacked(2);
bottom_blobs_unpacked[0] = bottom_blob_unpacked;
bottom_blobs_unpacked[1] = reference_blob_unpacked;
return Crop::forward(bottom_blobs_unpacked, top_blobs, opt);
}
|
movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x18f534
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_fma.cpp
|
ncnn::crop_pack8_avx(ncnn::Mat const&, ncnn::Mat&, int, int)
|
static void crop_pack8_avx(const Mat& src, Mat& dst, int top, int left)
{
int w = dst.w;
int h = dst.h;
int right = src.w - dst.w - left;
const float* ptr = src.row(top) + left * 8;
float* outptr = dst;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
__m256 _p = _mm256_loadu_ps(ptr);
_mm256_storeu_ps(outptr, _p);
ptr += 8;
outptr += 8;
}
ptr += (left + right) * 8;
}
}
|
movl 0x30(%rsi), %eax
testl %eax, %eax
jle 0x194a1e
movl 0x2c(%rsi), %r8d
movslq %r8d, %r10
movq (%rsi), %rsi
movslq 0x2c(%rdi), %r9
movslq %edx, %rdx
imulq %r9, %rdx
imulq 0x10(%rdi), %rdx
addq (%rdi), %rdx
shll $0x3, %ecx
movslq %ecx, %rcx
leaq (%rdx,%rcx,4), %rcx
subq %r10, %r9
shlq $0x3, %r9
xorl %edx, %edx
movl %r8d, %edi
testl %r8d, %r8d
jle 0x194a14
vmovups (%rcx), %ymm0
vmovups %ymm0, (%rsi)
addq $0x20, %rcx
addq $0x20, %rsi
decl %edi
jne 0x194a00
leaq (%rcx,%r9,4), %rcx
incl %edx
cmpl %eax, %edx
jne 0x1949f8
vzeroupper
retq
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_avx.cpp
|
virtual thunk to ncnn::Crop_x86_avx::forward(std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>>&, ncnn::Option const&) const
|
int Crop_x86_avx::forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const
{
const Mat& bottom_blob = bottom_blobs[0];
const Mat& reference_blob = bottom_blobs[1];
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
int dims = bottom_blob.dims;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int ref_elempack = reference_blob.elempack;
Mat& top_blob = top_blobs[0];
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 16 == 0 ? 16 : _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 16 == 0 ? 16 : _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 16 == 0 && out_elempack == 16)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack16_avx512(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 16 == 0 ? 16 : _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 16)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 16 == 0 && out_elempack == 16)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack16_avx512(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX512F__
if (elempack == 8)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 8 == 0 ? 8 : _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 8 == 0 ? 8 : _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 8 == 0 && out_elempack == 8)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack8_avx(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 8 == 0 ? 8 : _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 8)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 8 == 0 && out_elempack == 8)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack8_avx(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __AVX__
if (elempack == 4)
{
int _woffset, _hoffset, _doffset, _coffset;
int _outw, _outh, _outd, _outc;
if (woffset == -233)
{
resolve_crop_roi(bottom_blob.shape(), (const int*)reference_blob, _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
else
{
resolve_crop_roi(bottom_blob.shape(), reference_blob.shape(), _woffset, _hoffset, _doffset, _coffset, _outw, _outh, _outd, _outc);
}
if (dims == 1)
{
int out_elempack = _outw % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw / out_elempack == w && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_woffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, 0, _woffset / elempack);
return 0;
}
}
if (dims == 2)
{
int out_elempack = _outh % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh / out_elempack == h && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_hoffset % 4 == 0 && out_elempack == 4)
{
top_blob.create(_outw, _outh / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
crop_pack4_sse(bottom_blob, top_blob, _hoffset / elempack, _woffset);
return 0;
}
}
if (dims == 3)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
const Mat m = bottom_blob_sliced.channel(q);
Mat borderm = top_blob.channel(q);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
return 0;
}
}
if (dims == 4)
{
int out_elempack = _outc % 4 == 0 ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
if (_outw == w && _outh == h && _outd == d && _outc / out_elempack == channels && out_elempack == 4)
{
top_blob = bottom_blob;
return 0;
}
if (_coffset % 4 == 0 && out_elempack == 4)
{
const Mat bottom_blob_sliced = bottom_blob.channel_range(_coffset / out_elempack, _outc / out_elempack);
if (_outw == w && _outh == h && _outd == d)
{
top_blob = bottom_blob_sliced.clone();
if (top_blob.empty())
return -100;
}
top_blob.create(_outw, _outh, _outd, _outc / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++)
{
for (int z = 0; z < _outd; z++)
{
const Mat m = bottom_blob_sliced.channel(q).depth(z + _doffset);
Mat borderm = top_blob.channel(q).depth(z);
crop_pack4_sse(m, borderm, _hoffset, _woffset);
}
}
return 0;
}
}
}
#endif // __SSE2__
Mat bottom_blob_unpacked = bottom_blob;
if (elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(bottom_blob, bottom_blob_unpacked, 1, opt_pack1);
}
Mat reference_blob_unpacked = reference_blob;
if (ref_elempack != 1)
{
Option opt_pack1 = opt;
opt_pack1.blob_allocator = opt.workspace_allocator;
convert_packing(reference_blob, reference_blob_unpacked, 1, opt_pack1);
}
std::vector<Mat> bottom_blobs_unpacked(2);
bottom_blobs_unpacked[0] = bottom_blob_unpacked;
bottom_blobs_unpacked[1] = reference_blob_unpacked;
return Crop::forward(bottom_blobs_unpacked, top_blobs, opt);
}
|
movq (%rdi), %rax
addq -0x40(%rax), %rdi
jmp 0x194a8c
|
/ysh329[P]ncnn/build_O3/src/layer/x86/crop_x86_avx.cpp
|
ncnn::Deconvolution_x86_avx512::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Deconvolution_x86_avx512::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// deconvolv with NxN kernel
// value = value + bias
int w = bottom_blob.w;
int h = bottom_blob.h;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
// NCNN_LOGE("Deconvolution input %d x %d pad = %d %d ksize=%d %d stride=%d %d", w, h, pad_w, pad_h, kernel_w, kernel_h, stride_w, stride_h);
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int outw = (w - 1) * stride_w + kernel_extent_w + output_pad_right;
int outh = (h - 1) * stride_h + kernel_extent_h + output_pad_bottom;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
Mat top_blob_bordered;
if (pad_left > 0 || pad_right > 0 || pad_top > 0 || pad_bottom > 0 || (output_w > 0 && output_h > 0))
{
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.workspace_allocator);
}
else
{
top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
}
if (top_blob_bordered.empty())
return -100;
const int maxk = kernel_w * kernel_h;
#if __SSE2__
#if __AVX__
#if __AVX512F__
if (elempack == 16 && out_elempack == 16)
{
{
deconvolution_pack16_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 8 && out_elempack == 16)
{
{
deconvolution_pack8to16_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 16 && out_elempack == 8)
{
{
deconvolution_pack16to8_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 4 && out_elempack == 16)
{
{
deconvolution_pack4to16_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 16 && out_elempack == 4)
{
{
deconvolution_pack16to4_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 1 && out_elempack == 16)
{
{
deconvolution_pack1to16_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 16 && out_elempack == 1)
{
{
deconvolution_pack16to1_avx512(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
#endif // __AVX512F__
if (elempack == 8 && out_elempack == 8)
{
{
deconvolution_pack8_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 4 && out_elempack == 8)
{
{
deconvolution_pack4to8_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 8 && out_elempack == 4)
{
{
deconvolution_pack8to4_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 1 && out_elempack == 8)
{
{
deconvolution_pack1to8_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 8 && out_elempack == 1)
{
{
deconvolution_pack8to1_avx(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
#endif // __AVX__
if (elempack == 4 && out_elempack == 4)
{
{
deconvolution_pack4_sse(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 1 && out_elempack == 4)
{
{
deconvolution_pack1to4_sse(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
if (elempack == 4 && out_elempack == 1)
{
{
deconvolution_pack4to1_sse(bottom_blob, top_blob_bordered, weight_data_tm, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt);
}
}
#endif // __SSE2__
if (elempack == 1 && out_elempack == 1)
{
{
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < num_output; p++)
{
float* outptr = top_blob_bordered.channel(p);
// shadowed variable for less openmp task args
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int channels = bottom_blob.c;
const int outw = top_blob_bordered.w;
const int outh = top_blob_bordered.h;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[p];
}
const float* kptr = (const float*)weight_data_tm.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
float w = kptr[k];
sum += val * w;
}
}
kptr += maxk;
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
}
cut_padding(top_blob_bordered, top_blob, opt);
if (top_blob.empty())
return -100;
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x208, %rsp # imm = 0x208
movq %rdx, %rbp
movl 0x2c(%rsi), %ebx
movl 0x30(%rsi), %r14d
movq 0x10(%rsi), %rax
movq (%rdi), %r8
movq -0x18(%r8), %rdx
movl 0xd4(%rdi,%rdx), %r9d
decl %r9d
imull 0xdc(%rdi,%rdx), %r9d
movl %r9d, 0x12c(%rsp)
movq %rsi, 0xa8(%rsp)
movslq 0x18(%rsi), %rsi
movq %rsi, 0xd0(%rsp)
decl %ebx
imull 0xe4(%rdi,%rdx), %ebx
movl 0xd8(%rdi,%rdx), %esi
decl %esi
imull 0xe0(%rdi,%rdx), %esi
movl %esi, 0x19c(%rsp)
decl %r14d
imull 0xe8(%rdi,%rdx), %r14d
movl 0xfc(%rdi,%rdx), %r15d
movl 0x100(%rdi,%rdx), %r12d
movl $0x1, 0xc8(%rsp)
movq %rcx, 0x1f0(%rsp)
cmpb $0x1, 0x27(%rcx)
movq %rdi, 0x18(%rsp)
jne 0x19b601
movl 0xd0(%rdi,%rdx), %edx
testb $0xf, %dl
je 0x19b5e4
testb $0x7, %dl
je 0x19b5f1
xorl %esi, %esi
testb $0x3, %dl
sete %sil
leal (%rsi,%rsi,2), %ecx
incl %ecx
movl %ecx, 0xc8(%rsp)
jmp 0x19b5fc
movl $0x10, 0xc8(%rsp)
jmp 0x19b5fc
movl $0x8, 0xc8(%rsp)
movq 0x18(%rsp), %rdi
xorl %edx, %edx
divq 0xd0(%rsp)
movq %rax, %r13
movq $0x0, 0x120(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovups %xmm0, 0xec(%rsp)
vmovaps %xmm0, 0x100(%rsp)
vmovups %xmm0, 0x10c(%rsp)
movq -0x18(%r8), %rcx
leaq (%rdi,%rcx), %rax
movl $0x10, %r8d
cmpl $0x0, 0xec(%rdi,%rcx)
jg 0x19b762
cmpl $0x0, 0xf0(%rax)
jg 0x19b762
cmpl $0x0, 0xf4(%rax)
jg 0x19b762
cmpl $0x0, 0xf8(%rax)
jg 0x19b762
cmpl $0x0, 0x104(%rax)
jle 0x19b69b
cmpl $0x0, 0x108(%rax)
jg 0x19b762
leaq 0xe0(%rsp), %rax
cmpq %rbp, %rax
je 0x19b754
movq 0x8(%rbp), %rax
testq %rax, %rax
je 0x19b6f4
lock
incl (%rax)
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x19b6f4
lock
decl (%rax)
jne 0x19b6f4
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x19b6e7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x19b6f4
testq %rsi, %rsi
je 0x19b6f4
movq %rsi, %rdi
callq 0x563b0
vmovups (%rbp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movq 0x10(%rbp), %rax
movq %rax, 0xf0(%rsp)
movl 0x18(%rbp), %eax
movl %eax, 0xf8(%rsp)
movq 0x20(%rbp), %rax
movq %rax, 0x100(%rsp)
vmovups 0x28(%rbp), %xmm0
vmovups %xmm0, 0x108(%rsp)
movl 0x38(%rbp), %eax
movl %eax, 0x118(%rsp)
movq 0x40(%rbp), %rax
movq %rax, 0x120(%rsp)
movq 0x18(%rsp), %rax
movq (%rax), %rax
movq -0x18(%rax), %rcx
addq 0x18(%rsp), %rcx
movl $0x8, %r8d
movq %rcx, %rax
addl 0x12c(%rsp), %ebx
addl 0x19c(%rsp), %r14d
leal (%r15,%rbx), %esi
incl %esi
leal (%r12,%r14), %ecx
incl %ecx
movl 0xc8(%rsp), %r9d
movl %r9d, %edx
imulq %rdx, %r13
movl 0xd0(%rax), %eax
cltd
idivl %r9d
movq 0x1f0(%rsp), %rdx
movq (%rdx,%r8), %rdx
movq %rdx, (%rsp)
leaq 0xe0(%rsp), %rdi
movl %ecx, %edx
movl %eax, %ecx
movq %r13, %r8
callq 0x5a266
movl $0xffffff9c, %r13d # imm = 0xFFFFFF9C
cmpq $0x0, 0xe0(%rsp)
je 0x1a2e9c
movslq 0x118(%rsp), %rdx
movq 0x120(%rsp), %rax
movq %rdx, 0x88(%rsp)
imulq %rdx, %rax
testq %rax, %rax
je 0x1a2e9c
movq %rbp, 0x200(%rsp)
movq 0x18(%rsp), %rax
movq (%rax), %rcx
movq %rcx, 0xd8(%rsp)
movq -0x18(%rcx), %rdx
movl 0xd4(%rax,%rdx), %ecx
movq %rcx, 0x148(%rsp)
movq %rdx, 0x10(%rsp)
movl 0xd8(%rax,%rdx), %eax
movq %rax, 0x130(%rsp)
cmpl $0x10, 0xd0(%rsp)
jne 0x19c062
cmpl $0x10, 0xc8(%rsp)
jne 0x19c062
cmpl $0x0, 0x88(%rsp)
jle 0x1a2e50
movq 0x18(%rsp), %rcx
movq 0x10(%rsp), %rax
movl 0xdc(%rcx,%rax), %r8d
movl 0xe0(%rcx,%rax), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rcx,%rax), %r10d
movl 0xe8(%rcx,%rax), %edx
movl %edx, 0xb0(%rsp)
movl 0x114(%rcx,%rax), %edi
movq 0x1a8(%rcx,%rax), %r9
movq 0x148(%rsp), %rdx
movl %edx, %eax
imull 0x130(%rsp), %eax
shll $0x8, %eax
cltq
movq %rax, 0x78(%rsp)
vmovaps 0x27e578(%rip), %zmm0 # 0x419e40
vmovaps 0x27e5ae(%rip), %zmm1 # 0x419e80
vmovaps 0x27e5e4(%rip), %zmm2 # 0x419ec0
vmovaps 0x27e61a(%rip), %zmm6 # 0x419f00
vmovaps 0x27e790(%rip), %zmm4 # 0x41a080
vbroadcastss 0x27b75a(%rip), %zmm3 # 0x417054
vxorps 0x27eb3c(%rip), %zmm3, %zmm7 # 0x41a440
vxorps 0x27eaf2(%rip), %zmm3, %zmm9 # 0x41a400
vmovaps 0x27e628(%rip), %zmm10 # 0x419f40
vmovaps 0x27e65e(%rip), %zmm11 # 0x419f80
vmovaps 0x27e694(%rip), %zmm12 # 0x419fc0
vmovaps 0x27e6ca(%rip), %zmm13 # 0x41a000
vmovaps 0x27e700(%rip), %zmm14 # 0x41a040
vmovdqa64 0x27e776(%rip), %zmm15 # 0x41a0c0
vxorps %zmm3, %zmm4, %zmm3
vmovups %zmm3, 0x1a0(%rsp)
movq %rdx, %r12
shlq $0x8, %r12
movl $0x1, %eax
subl %edx, %eax
imull %r8d, %eax
movl %eax, 0x80(%rsp)
movq $0x0, 0x68(%rsp)
decl %edi
vxorps %xmm29, %xmm29, %xmm29
vbroadcastss 0x279389(%rip), %zmm30 # 0x414d18
movq %rdi, 0x38(%rsp)
movq %r9, 0x30(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x150(%rsp)
testl %eax, %eax
jle 0x19c047
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %esi
movl 0x30(%rax), %edx
movl %edx, 0x58(%rsp)
movl 0x10c(%rsp), %edx
movl %edx, 0x28(%rsp)
movq 0x120(%rsp), %r11
movq 0x68(%rsp), %rdx
imulq %rdx, %r11
imulq 0xf0(%rsp), %r11
addq 0xe0(%rsp), %r11
movl 0x38(%rax), %eax
movq %rax, 0x90(%rsp)
shlq $0x6, %rdx
movq %rdx, 0x50(%rsp)
xorl %r14d, %r14d
cmpl $0x0, 0x28(%rsp)
jle 0x19c036
xorl %edx, %edx
movl 0x80(%rsp), %eax
movl %eax, 0x24(%rsp)
testq %r9, %r9
je 0x19ba30
movq 0x50(%rsp), %rax
vmovups (%r9,%rax), %zmm5
jmp 0x19ba34
vxorps %xmm5, %xmm5, %xmm5
movl %edx, 0x70(%rsp)
movq %r11, 0x60(%rsp)
cmpl $0x0, 0x90(%rsp)
jle 0x19bc91
movq 0x48(%rcx), %r13
imulq 0x68(%rsp), %r13
imulq 0x18(%rcx), %r13
addq 0x8(%rcx), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x48(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0xb8(%rsp)
imulq %rax, %rdx
movq %rdx, 0x98(%rsp)
xorl %eax, %eax
movq 0x130(%rsp), %rcx
testl %ecx, %ecx
movq %rax, 0x40(%rsp)
jle 0x19bc71
movq 0xb8(%rsp), %rbp
imulq %rax, %rbp
addq 0x48(%rsp), %rbp
xorl %r9d, %r9d
xorl %r11d, %r11d
movl %r11d, %eax
subl %ecx, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %r14d, %eax
js 0x19bc5a
cltd
idivl 0xb0(%rsp)
testl %edx, %edx
jne 0x19bc5a
cmpl $0x0, 0x148(%rsp)
jle 0x19bc5a
cmpl 0x58(%rsp), %eax
jge 0x19bc5a
movslq %eax, %r15
imulq 0x98(%rsp), %r15
addq %rbp, %r15
movl 0x24(%rsp), %ecx
movq 0x148(%rsp), %rbx
movq %r9, %rdi
testl %ecx, %ecx
js 0x19bc47
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x19bc47
cmpl %esi, %eax
jge 0x19bc47
shll $0x4, %eax
cltq
vbroadcastss (%r15,%rax,4), %zmm3
vbroadcastss 0x4(%r15,%rax,4), %zmm8
vbroadcastss 0x8(%r15,%rax,4), %zmm16
vbroadcastss 0xc(%r15,%rax,4), %zmm17
vbroadcastss 0x10(%r15,%rax,4), %zmm18
vbroadcastss 0x14(%r15,%rax,4), %zmm19
vbroadcastss 0x18(%r15,%rax,4), %zmm20
vbroadcastss 0x1c(%r15,%rax,4), %zmm21
vbroadcastss 0x20(%r15,%rax,4), %zmm22
vbroadcastss 0x24(%r15,%rax,4), %zmm23
vbroadcastss 0x28(%r15,%rax,4), %zmm24
vbroadcastss 0x2c(%r15,%rax,4), %zmm25
vbroadcastss 0x30(%r15,%rax,4), %zmm26
vbroadcastss 0x34(%r15,%rax,4), %zmm27
vbroadcastss 0x38(%r15,%rax,4), %zmm28
vbroadcastss 0x3c(%r15,%rax,4), %zmm31
movl %edi, %eax
andl $0xffffff00, %eax # imm = 0xFFFFFF00
vfmadd231ps (%r13,%rax,4), %zmm3, %zmm5 # zmm5 = (zmm3 * mem) + zmm5
vfmadd231ps 0x40(%r13,%rax,4), %zmm8, %zmm5 # zmm5 = (zmm8 * mem) + zmm5
vfmadd231ps 0x80(%r13,%rax,4), %zmm16, %zmm5 # zmm5 = (zmm16 * mem) + zmm5
vfmadd231ps 0xc0(%r13,%rax,4), %zmm17, %zmm5 # zmm5 = (zmm17 * mem) + zmm5
vfmadd231ps 0x100(%r13,%rax,4), %zmm18, %zmm5 # zmm5 = (zmm18 * mem) + zmm5
vfmadd231ps 0x140(%r13,%rax,4), %zmm19, %zmm5 # zmm5 = (zmm19 * mem) + zmm5
vfmadd231ps 0x180(%r13,%rax,4), %zmm20, %zmm5 # zmm5 = (zmm20 * mem) + zmm5
vfmadd231ps 0x1c0(%r13,%rax,4), %zmm21, %zmm5 # zmm5 = (zmm21 * mem) + zmm5
vfmadd231ps 0x200(%r13,%rax,4), %zmm22, %zmm5 # zmm5 = (zmm22 * mem) + zmm5
vfmadd231ps 0x240(%r13,%rax,4), %zmm23, %zmm5 # zmm5 = (zmm23 * mem) + zmm5
vfmadd231ps 0x280(%r13,%rax,4), %zmm24, %zmm5 # zmm5 = (zmm24 * mem) + zmm5
vfmadd231ps 0x2c0(%r13,%rax,4), %zmm25, %zmm5 # zmm5 = (zmm25 * mem) + zmm5
vfmadd231ps 0x300(%r13,%rax,4), %zmm26, %zmm5 # zmm5 = (zmm26 * mem) + zmm5
vfmadd231ps 0x340(%r13,%rax,4), %zmm27, %zmm5 # zmm5 = (zmm27 * mem) + zmm5
vfmadd231ps 0x380(%r13,%rax,4), %zmm28, %zmm5 # zmm5 = (zmm28 * mem) + zmm5
vfmadd231ps 0x3c0(%r13,%rax,4), %zmm31, %zmm5 # zmm5 = (zmm31 * mem) + zmm5
addq $0x100, %rdi # imm = 0x100
addl %r8d, %ecx
decq %rbx
jne 0x19bb1e
incq %r11
addq %r12, %r9
movq 0x130(%rsp), %rcx
cmpq %rcx, %r11
jne 0x19bac0
movq 0x78(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x40(%rsp), %rax
incq %rax
cmpq 0x90(%rsp), %rax
jne 0x19ba9c
movq 0x38(%rsp), %rdi
cmpl $0x5, %edi
ja 0x19c009
leaq 0x27dfda(%rip), %rcx # 0x419c80
movslq (%rcx,%rdi,4), %rax
addq %rcx, %rax
movq 0x18(%rsp), %rcx
movq 0x30(%rsp), %r9
movq 0x60(%rsp), %r11
movl 0x70(%rsp), %edx
jmpq *%rax
vmaxps %zmm29, %zmm5, %zmm5
jmp 0x19c01c
vminps %zmm1, %zmm5, %zmm3
vmaxps %zmm2, %zmm3, %zmm17
vmovaps %zmm6, %zmm8
vfmadd213ps %zmm4, %zmm17, %zmm8 # zmm8 = (zmm17 * zmm8) + zmm4
vrndscaleps $0x1, %zmm8, %zmm16
vcmpltps %zmm16, %zmm8, %k1
vsubps %zmm0, %zmm16, %zmm16 {%k1}
vfmadd231ps %zmm7, %zmm16, %zmm17 # zmm17 = (zmm16 * zmm7) + zmm17
vfmadd231ps %zmm9, %zmm16, %zmm17 # zmm17 = (zmm16 * zmm9) + zmm17
vmulps %zmm17, %zmm17, %zmm8
vmovaps %zmm17, %zmm3
vfmadd213ps %zmm11, %zmm10, %zmm3 # zmm3 = (zmm10 * zmm3) + zmm11
vfmadd213ps %zmm12, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm12
vfmadd213ps %zmm13, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm13
vfmadd213ps %zmm14, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm14
vfmadd213ps %zmm4, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm4
vfmadd213ps %zmm17, %zmm8, %zmm3 # zmm3 = (zmm8 * zmm3) + zmm17
vaddps %zmm0, %zmm3, %zmm3
vcvttps2dq %zmm16, %zmm8
vpaddd %zmm15, %zmm8, %zmm8
vpslld $0x17, %zmm8, %zmm8
vfmadd213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm30
vmaxps 0x27e3a2(%rip), %zmm8, %zmm3 # 0x41a100
vpsrld $0x17, %zmm3, %zmm16
vpternlogd $0xec, 0x27e3d0(%rip), %zmm4, %zmm3 # 0x41a140
vcmpltps 0x27e405(%rip), %zmm3, %k1 # 0x41a180
vsubps %zmm0, %zmm3, %zmm18
vaddps %zmm3, %zmm18, %zmm18 {%k1}
vmulps %zmm18, %zmm18, %zmm3
vmovaps %zmm18, %zmm17
vmovaps 0x27e463(%rip), %zmm19 # 0x41a200
vfmadd132ps 0x27e419(%rip), %zmm19, %zmm17 # zmm17 = (zmm17 * mem) + zmm19
vfmadd213ps 0x27e48f(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vfmadd213ps 0x27e4c5(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vfmadd213ps 0x27e4fb(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vfmadd213ps 0x27e531(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vfmadd213ps 0x27e567(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vfmadd213ps 0x27e59d(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vfmadd213ps 0x27e5d3(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vmulps %zmm18, %zmm3, %zmm19
vfmadd213ps %zmm18, %zmm17, %zmm19 # zmm19 = (zmm17 * zmm19) + zmm18
vcmpleps %zmm29, %zmm8, %k2
vpsubd %zmm15, %zmm16, %zmm8
vcvtdq2ps %zmm8, %zmm8
vaddps %zmm0, %zmm8, %zmm16
vmovaps %zmm8, %zmm16 {%k1}
vfmadd231ps 0x27e5de(%rip), %zmm16, %zmm19 # zmm19 = (zmm16 * mem) + zmm19
vfmadd231ps 0x1a0(%rsp), %zmm3, %zmm19 # zmm19 = (zmm3 * mem) + zmm19
vfmadd231ps 0x27e609(%rip), %zmm16, %zmm19 # zmm19 = (zmm16 * mem) + zmm19
vbroadcastss 0x27cfa3(%rip), %zmm18 # 0x418de4
vmulps %zmm18, %zmm19, %zmm3
vbroadcastss 0x27b8f3(%rip), %zmm3 {%k2} # 0x417744
vminps %zmm1, %zmm3, %zmm3
vmaxps %zmm2, %zmm3, %zmm3
vmovaps %zmm6, %zmm8
vfmadd213ps %zmm4, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm4
vrndscaleps $0x1, %zmm8, %zmm16
vcmpltps %zmm16, %zmm8, %k1
vsubps %zmm0, %zmm16, %zmm16 {%k1}
vfmadd231ps %zmm7, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm7) + zmm3
vfmadd231ps %zmm9, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm9) + zmm3
vmulps %zmm3, %zmm3, %zmm8
vmovaps %zmm3, %zmm17
vfmadd213ps %zmm11, %zmm10, %zmm17 # zmm17 = (zmm10 * zmm17) + zmm11
vfmadd213ps %zmm12, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm12
vfmadd213ps %zmm13, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm13
vfmadd213ps %zmm14, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm14
vfmadd213ps %zmm4, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm4
vfmadd213ps %zmm3, %zmm8, %zmm17 # zmm17 = (zmm8 * zmm17) + zmm3
vaddps %zmm0, %zmm17, %zmm3
vcvttps2dq %zmm16, %zmm8
vpaddd %zmm15, %zmm8, %zmm8
vpslld $0x17, %zmm8, %zmm8
vfmadd213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm30
vrcp14ps %zmm8, %zmm3
vfmsub213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) - zmm30
vfnmadd132ps %zmm3, %zmm3, %zmm8 # zmm8 = -(zmm8 * zmm3) + zmm3
vfnmsub213ps %zmm30, %zmm18, %zmm8 # zmm8 = -(zmm18 * zmm8) - zmm30
vmulps %zmm5, %zmm8, %zmm5
jmp 0x19c01c
movq 0x10(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps (%rax){1to16}, %zmm5, %zmm5
vminps 0x4(%rax){1to16}, %zmm5, %zmm5
jmp 0x19c01c
vxorps 0x27b130(%rip){1to16}, %zmm5, %zmm5 # 0x417054
vminps %zmm1, %zmm5, %zmm5
vmaxps %zmm2, %zmm5, %zmm16
vmovaps %zmm6, %zmm3
vfmadd213ps %zmm4, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm3) + zmm4
vrndscaleps $0x1, %zmm3, %zmm8
vcmpltps %zmm8, %zmm3, %k1
vsubps %zmm0, %zmm8, %zmm8 {%k1}
vfmadd231ps %zmm7, %zmm8, %zmm16 # zmm16 = (zmm8 * zmm7) + zmm16
vfmadd231ps %zmm9, %zmm8, %zmm16 # zmm16 = (zmm8 * zmm9) + zmm16
vmulps %zmm16, %zmm16, %zmm3
vmovaps %zmm16, %zmm5
vfmadd213ps %zmm11, %zmm10, %zmm5 # zmm5 = (zmm10 * zmm5) + zmm11
vfmadd213ps %zmm12, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm12
vfmadd213ps %zmm13, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm13
vfmadd213ps %zmm14, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm14
vfmadd213ps %zmm4, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm4
vfmadd213ps %zmm16, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) + zmm16
vaddps %zmm0, %zmm5, %zmm3
vcvttps2dq %zmm8, %zmm5
vpaddd %zmm15, %zmm5, %zmm5
vpslld $0x17, %zmm5, %zmm5
vfmadd213ps %zmm30, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) + zmm30
vrcp14ps %zmm5, %zmm3
vfmsub213ps %zmm30, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) - zmm30
vfnmadd132ps %zmm3, %zmm3, %zmm5 # zmm5 = -(zmm5 * zmm3) + zmm3
jmp 0x19c01c
movq 0x10(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vcmpltps %zmm29, %zmm5, %k1
vmulps (%rax){1to16}, %zmm5, %zmm5 {%k1}
jmp 0x19c01c
movq 0x10(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vbroadcastss (%rax), %zmm3
vfmadd213ps 0x4(%rax){1to16}, %zmm5, %zmm3 # zmm3 = (zmm5 * zmm3) + mem
vmaxps %zmm29, %zmm3, %zmm3
vminps %zmm30, %zmm3, %zmm3
vmulps %zmm5, %zmm3, %zmm5
jmp 0x19c01c
movq 0x18(%rsp), %rcx
movq 0x30(%rsp), %r9
movq 0x60(%rsp), %r11
movl 0x70(%rsp), %edx
vmovups %zmm5, (%r11)
addq $0x40, %r11
incl %edx
incl 0x24(%rsp)
cmpl 0x28(%rsp), %edx
jne 0x19ba1d
incl %r14d
cmpl 0x150(%rsp), %r14d
jne 0x19ba05
movq 0x68(%rsp), %rdx
incq %rdx
movq %rdx, 0x68(%rsp)
cmpq 0x88(%rsp), %rdx
jne 0x19b999
cmpl $0x8, 0xd0(%rsp)
jne 0x19c849
cmpl $0x10, 0xc8(%rsp)
jne 0x19c849
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a2e50
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rcx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rcx), %r8d
movl 0xe0(%rax,%rcx), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rax,%rcx), %r10d
movl 0xe8(%rax,%rcx), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rax,%rcx), %edx
movq 0x1a8(%rax,%rcx), %rsi
movl 0xd4(%rax,%rcx), %edi
movq %rcx, 0x150(%rsp)
movl 0xd8(%rax,%rcx), %r12d
movl %edi, %eax
imull %r12d, %eax
shll $0x7, %eax
cltq
movq %rax, 0xb8(%rsp)
vmovaps 0x27dd2c(%rip), %zmm0 # 0x419e40
vmovaps 0x27dd62(%rip), %zmm1 # 0x419e80
vmovaps 0x27dd98(%rip), %zmm2 # 0x419ec0
vmovaps 0x27ddce(%rip), %zmm6 # 0x419f00
vmovaps 0x27df44(%rip), %zmm4 # 0x41a080
vbroadcastss 0x27af0e(%rip), %zmm3 # 0x417054
vxorps 0x27e2f0(%rip), %zmm3, %zmm7 # 0x41a440
vxorps 0x27e2a6(%rip), %zmm3, %zmm9 # 0x41a400
vmovaps 0x27dddc(%rip), %zmm10 # 0x419f40
vmovaps 0x27de12(%rip), %zmm11 # 0x419f80
vmovaps 0x27de48(%rip), %zmm12 # 0x419fc0
vmovaps 0x27de7e(%rip), %zmm13 # 0x41a000
vmovaps 0x27deb4(%rip), %zmm14 # 0x41a040
vmovdqa64 0x27df2a(%rip), %zmm15 # 0x41a0c0
vmovaps 0x27e120(%rip), %zmm23 # 0x41a2c0
vmovaps 0x27e156(%rip), %zmm24 # 0x41a300
vmovaps 0x27e18c(%rip), %zmm25 # 0x41a340
vmovaps 0x27e1c2(%rip), %zmm26 # 0x41a380
vmovaps 0x27e1f8(%rip), %zmm27 # 0x41a3c0
vmovaps %zmm3, %zmm22
vxorps %zmm3, %zmm4, %zmm28
movq %rdi, %rax
shlq $0x7, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
movq %rdi, 0x58(%rsp)
subl %edi, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm29, %xmm29, %xmm29
vbroadcastss 0x278b03(%rip), %zmm30 # 0x414d18
vbroadcastss 0x27cbc5(%rip), %zmm31 # 0x418de4
movq %rdx, 0x68(%rsp)
movq %rsi, 0x50(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x19c82e
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %r14d
movl 0x30(%rax), %ecx
movl %ecx, 0x98(%rsp)
movl 0x10c(%rsp), %ecx
movl %ecx, 0x30(%rsp)
movq 0x120(%rsp), %rdi
movq 0x10(%rsp), %rcx
imulq %rcx, %rdi
imulq 0xf0(%rsp), %rdi
addq 0xe0(%rsp), %rdi
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x6, %rcx
movq %rcx, 0x1a0(%rsp)
xorl %ebp, %ebp
cmpl $0x0, 0x30(%rsp)
jle 0x19c81f
xorl %r9d, %r9d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rsi, %rsi
je 0x19c2c7
movq 0x1a0(%rsp), %rax
vmovups (%rsi,%rax), %zmm5
jmp 0x19c2cb
vxorps %xmm5, %xmm5, %xmm5
movl %r9d, 0x38(%rsp)
movq %rdi, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x19c48f
movq 0x18(%rsp), %rax
movq 0x48(%rax), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rax), %r13
addq 0x8(%rax), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x19c46f
movq 0x70(%rsp), %r11
imulq %rax, %r11
addq 0x60(%rsp), %r11
xorl %r15d, %r15d
xorl %esi, %esi
movl %esi, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %ebp, %eax
js 0x19c45b
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x19c45b
cmpl $0x0, 0x58(%rsp)
jle 0x19c45b
cmpl 0x98(%rsp), %eax
jge 0x19c45b
movslq %eax, %r9
imulq 0x90(%rsp), %r9
addq %r11, %r9
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %rbx
movq %r15, %rdi
testl %ecx, %ecx
js 0x19c44b
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x19c44b
cmpl %r14d, %eax
jge 0x19c44b
shll $0x3, %eax
cltq
vbroadcastss (%r9,%rax,4), %zmm3
vbroadcastss 0x4(%r9,%rax,4), %zmm8
vbroadcastss 0x8(%r9,%rax,4), %zmm16
vbroadcastss 0xc(%r9,%rax,4), %zmm17
vbroadcastss 0x10(%r9,%rax,4), %zmm18
vbroadcastss 0x14(%r9,%rax,4), %zmm19
vbroadcastss 0x18(%r9,%rax,4), %zmm20
vbroadcastss 0x1c(%r9,%rax,4), %zmm21
movl %edi, %eax
andl $-0x80, %eax
vfmadd231ps (%r13,%rax,4), %zmm3, %zmm5 # zmm5 = (zmm3 * mem) + zmm5
vfmadd231ps 0x40(%r13,%rax,4), %zmm8, %zmm5 # zmm5 = (zmm8 * mem) + zmm5
vfmadd231ps 0x80(%r13,%rax,4), %zmm16, %zmm5 # zmm5 = (zmm16 * mem) + zmm5
vfmadd231ps 0xc0(%r13,%rax,4), %zmm17, %zmm5 # zmm5 = (zmm17 * mem) + zmm5
vfmadd231ps 0x100(%r13,%rax,4), %zmm18, %zmm5 # zmm5 = (zmm18 * mem) + zmm5
vfmadd231ps 0x140(%r13,%rax,4), %zmm19, %zmm5 # zmm5 = (zmm19 * mem) + zmm5
vfmadd231ps 0x180(%r13,%rax,4), %zmm20, %zmm5 # zmm5 = (zmm20 * mem) + zmm5
vfmadd231ps 0x1c0(%r13,%rax,4), %zmm21, %zmm5 # zmm5 = (zmm21 * mem) + zmm5
subq $-0x80, %rdi
addl %r8d, %ecx
decq %rbx
jne 0x19c3a3
incq %rsi
addq 0xb0(%rsp), %r15
cmpq %r12, %rsi
jne 0x19c34c
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x19c32b
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x19c7f4
leaq 0x27d7f4(%rip), %rcx # 0x419c98
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x50(%rsp), %rsi
movq 0x28(%rsp), %rdi
movl 0x38(%rsp), %r9d
jmpq *%rax
vmaxps %zmm29, %zmm5, %zmm5
jmp 0x19c803
vminps %zmm1, %zmm5, %zmm3
vmaxps %zmm2, %zmm3, %zmm17
vmovaps %zmm6, %zmm8
vfmadd213ps %zmm4, %zmm17, %zmm8 # zmm8 = (zmm17 * zmm8) + zmm4
vrndscaleps $0x1, %zmm8, %zmm16
vcmpltps %zmm16, %zmm8, %k1
vsubps %zmm0, %zmm16, %zmm16 {%k1}
vfmadd231ps %zmm7, %zmm16, %zmm17 # zmm17 = (zmm16 * zmm7) + zmm17
vfmadd231ps %zmm9, %zmm16, %zmm17 # zmm17 = (zmm16 * zmm9) + zmm17
vmulps %zmm17, %zmm17, %zmm8
vmovaps %zmm17, %zmm3
vfmadd213ps %zmm11, %zmm10, %zmm3 # zmm3 = (zmm10 * zmm3) + zmm11
vfmadd213ps %zmm12, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm12
vfmadd213ps %zmm13, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm13
vfmadd213ps %zmm14, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm14
vfmadd213ps %zmm4, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm4
vfmadd213ps %zmm17, %zmm8, %zmm3 # zmm3 = (zmm8 * zmm3) + zmm17
vaddps %zmm0, %zmm3, %zmm3
vcvttps2dq %zmm16, %zmm8
vpaddd %zmm15, %zmm8, %zmm8
vpslld $0x17, %zmm8, %zmm8
vfmadd213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm30
vmaxps 0x27dba8(%rip), %zmm8, %zmm3 # 0x41a100
vpsrld $0x17, %zmm3, %zmm16
vpternlogd $0xec, 0x27dbd6(%rip), %zmm4, %zmm3 # 0x41a140
vcmpltps 0x27dc0b(%rip), %zmm3, %k1 # 0x41a180
vsubps %zmm0, %zmm3, %zmm18
vaddps %zmm3, %zmm18, %zmm18 {%k1}
vmulps %zmm18, %zmm18, %zmm3
vmovaps %zmm18, %zmm17
vmovaps 0x27dc69(%rip), %zmm19 # 0x41a200
vfmadd132ps 0x27dc1f(%rip), %zmm19, %zmm17 # zmm17 = (zmm17 * mem) + zmm19
vfmadd213ps 0x27dc95(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vfmadd213ps 0x27dccb(%rip), %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + mem
vfmadd213ps %zmm23, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm23
vfmadd213ps %zmm24, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm24
vfmadd213ps %zmm25, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm25
vfmadd213ps %zmm26, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm26
vfmadd213ps %zmm27, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm27
vmulps %zmm18, %zmm3, %zmm19
vfmadd213ps %zmm18, %zmm17, %zmm19 # zmm19 = (zmm17 * zmm19) + zmm18
vcmpleps %zmm29, %zmm8, %k2
vpsubd %zmm15, %zmm16, %zmm8
vcvtdq2ps %zmm8, %zmm8
vaddps %zmm0, %zmm8, %zmm16
vmovaps %zmm8, %zmm16 {%k1}
vfmadd231ps 0x27ddf8(%rip), %zmm16, %zmm19 # zmm19 = (zmm16 * mem) + zmm19
vfmadd231ps %zmm3, %zmm28, %zmm19 # zmm19 = (zmm28 * zmm3) + zmm19
vfmadd231ps 0x27de28(%rip), %zmm16, %zmm19 # zmm19 = (zmm16 * mem) + zmm19
vmulps %zmm31, %zmm19, %zmm3
vbroadcastss 0x27b11c(%rip), %zmm3 {%k2} # 0x417744
vminps %zmm1, %zmm3, %zmm3
vmaxps %zmm2, %zmm3, %zmm3
vmovaps %zmm6, %zmm8
vfmadd213ps %zmm4, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm4
vrndscaleps $0x1, %zmm8, %zmm16
vcmpltps %zmm16, %zmm8, %k1
vsubps %zmm0, %zmm16, %zmm16 {%k1}
vfmadd231ps %zmm7, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm7) + zmm3
vfmadd231ps %zmm9, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm9) + zmm3
vmulps %zmm3, %zmm3, %zmm8
vmovaps %zmm3, %zmm17
vfmadd213ps %zmm11, %zmm10, %zmm17 # zmm17 = (zmm10 * zmm17) + zmm11
vfmadd213ps %zmm12, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm12
vfmadd213ps %zmm13, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm13
vfmadd213ps %zmm14, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm14
vfmadd213ps %zmm4, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm4
vfmadd213ps %zmm3, %zmm8, %zmm17 # zmm17 = (zmm8 * zmm17) + zmm3
vaddps %zmm0, %zmm17, %zmm3
vcvttps2dq %zmm16, %zmm8
vpaddd %zmm15, %zmm8, %zmm8
vpslld $0x17, %zmm8, %zmm8
vfmadd213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm30
vrcp14ps %zmm8, %zmm3
vfmsub213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) - zmm30
vfnmadd132ps %zmm3, %zmm3, %zmm8 # zmm8 = -(zmm8 * zmm3) + zmm3
vfnmsub213ps %zmm30, %zmm31, %zmm8 # zmm8 = -(zmm31 * zmm8) - zmm30
vmulps %zmm5, %zmm8, %zmm5
jmp 0x19c803
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vmaxps (%rax){1to16}, %zmm5, %zmm5
vminps 0x4(%rax){1to16}, %zmm5, %zmm5
jmp 0x19c803
vxorps %zmm22, %zmm5, %zmm5
vminps %zmm1, %zmm5, %zmm5
vmaxps %zmm2, %zmm5, %zmm16
vmovaps %zmm6, %zmm3
vfmadd213ps %zmm4, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm3) + zmm4
vrndscaleps $0x1, %zmm3, %zmm8
vcmpltps %zmm8, %zmm3, %k1
vsubps %zmm0, %zmm8, %zmm8 {%k1}
vfmadd231ps %zmm7, %zmm8, %zmm16 # zmm16 = (zmm8 * zmm7) + zmm16
vfmadd231ps %zmm9, %zmm8, %zmm16 # zmm16 = (zmm8 * zmm9) + zmm16
vmulps %zmm16, %zmm16, %zmm3
vmovaps %zmm16, %zmm5
vfmadd213ps %zmm11, %zmm10, %zmm5 # zmm5 = (zmm10 * zmm5) + zmm11
vfmadd213ps %zmm12, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm12
vfmadd213ps %zmm13, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm13
vfmadd213ps %zmm14, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm14
vfmadd213ps %zmm4, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm4
vfmadd213ps %zmm16, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) + zmm16
vaddps %zmm0, %zmm5, %zmm3
vcvttps2dq %zmm8, %zmm5
vpaddd %zmm15, %zmm5, %zmm5
vpslld $0x17, %zmm5, %zmm5
vfmadd213ps %zmm30, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) + zmm30
vrcp14ps %zmm5, %zmm3
vfmsub213ps %zmm30, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) - zmm30
vfnmadd132ps %zmm3, %zmm3, %zmm5 # zmm5 = -(zmm5 * zmm3) + zmm3
jmp 0x19c803
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vcmpltps %zmm29, %zmm5, %k1
vmulps (%rax){1to16}, %zmm5, %zmm5 {%k1}
jmp 0x19c803
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vbroadcastss (%rax), %zmm3
vfmadd213ps 0x4(%rax){1to16}, %zmm5, %zmm3 # zmm3 = (zmm5 * zmm3) + mem
vmaxps %zmm29, %zmm3, %zmm3
vminps %zmm30, %zmm3, %zmm3
vmulps %zmm5, %zmm3, %zmm5
jmp 0x19c803
movq 0x50(%rsp), %rsi
movq 0x28(%rsp), %rdi
movl 0x38(%rsp), %r9d
vmovups %zmm5, (%rdi)
addq $0x40, %rdi
incl %r9d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r9d
jne 0x19c2b1
incl %ebp
cmpl 0x80(%rsp), %ebp
jne 0x19c298
movq 0x10(%rsp), %rcx
incq %rcx
movq %rcx, 0x10(%rsp)
cmpq 0xc0(%rsp), %rcx
jne 0x19c229
cmpl $0x10, 0xd0(%rsp)
jne 0x19d025
cmpl $0x8, 0xc8(%rsp)
jne 0x19d025
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a2e50
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rax
movq 0x18(%rsp), %rcx
movl 0xdc(%rcx,%rax), %r8d
movl 0xe0(%rcx,%rax), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rcx,%rax), %r10d
movl 0xe8(%rcx,%rax), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rcx,%rax), %edx
movq 0x1a8(%rcx,%rax), %rdi
movl 0xd4(%rcx,%rax), %esi
movq %rax, 0x150(%rsp)
movl 0xd8(%rcx,%rax), %r12d
movl %esi, %eax
imull %r12d, %eax
shll $0x7, %eax
cltq
movq %rax, 0xb8(%rsp)
movq %rsi, %rax
shlq $0x7, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
movq %rsi, 0x58(%rsp)
subl %esi, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x27a729(%rip), %ymm2 # 0x417058
vbroadcastss 0x27a724(%rip), %ymm3 # 0x41705c
vbroadcastss 0x277d4f(%rip), %ymm4 # 0x414690
vbroadcastss 0x27a716(%rip), %ymm5 # 0x417060
vbroadcastss 0x2783c5(%rip), %ymm6 # 0x414d18
vbroadcastss 0x27a708(%rip), %ymm7 # 0x417064
vbroadcastss 0x27a706(%rip), %ymm18 # 0x41706c
vbroadcastss 0x27a701(%rip), %ymm10 # 0x417070
vbroadcastss 0x27a6fc(%rip), %ymm11 # 0x417074
vbroadcastss 0x27a6f7(%rip), %ymm12 # 0x417078
vbroadcastss 0x27a6f2(%rip), %ymm13 # 0x41707c
vpbroadcastd 0x278385(%rip), %ymm14 # 0x414d18
vbroadcastss 0x27a6cc(%rip), %ymm15 # 0x417068
vbroadcastss 0x27c43e(%rip), %ymm31 # 0x418de4
movq %rdx, 0x68(%rsp)
movq %rdi, 0x50(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x19d00a
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %r14d
movl 0x30(%rax), %esi
movl %esi, 0x98(%rsp)
movl 0x10c(%rsp), %esi
movl %esi, 0x30(%rsp)
movq 0x120(%rsp), %r9
movq 0x10(%rsp), %rsi
imulq %rsi, %r9
imulq 0xf0(%rsp), %r9
addq 0xe0(%rsp), %r9
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x5, %rsi
movq %rsi, 0x1a0(%rsp)
xorl %esi, %esi
cmpl $0x0, 0x30(%rsp)
jle 0x19cffb
xorl %r11d, %r11d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rdi, %rdi
je 0x19ca4c
movq 0x1a0(%rsp), %rax
vmovups (%rdi,%rax), %ymm1
jmp 0x19ca50
vxorps %xmm1, %xmm1, %xmm1
movl %r11d, 0x38(%rsp)
movq %r9, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x19cc8b
movq 0x48(%rcx), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rcx), %r13
addq 0x8(%rcx), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x19cc6b
movq 0x70(%rsp), %r11
imulq %rax, %r11
addq 0x60(%rsp), %r11
xorl %r15d, %r15d
xorl %ebp, %ebp
movl %ebp, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %esi, %eax
js 0x19cc57
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x19cc57
cmpl $0x0, 0x58(%rsp)
jle 0x19cc57
cmpl 0x98(%rsp), %eax
jge 0x19cc57
movslq %eax, %rbx
imulq 0x90(%rsp), %rbx
addq %r11, %rbx
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %r9
movq %r15, %rdi
testl %ecx, %ecx
js 0x19cc47
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x19cc47
cmpl %r14d, %eax
jge 0x19cc47
shll $0x4, %eax
cltq
vbroadcastss (%rbx,%rax,4), %ymm8
vbroadcastss 0x4(%rbx,%rax,4), %ymm9
vbroadcastss 0x8(%rbx,%rax,4), %ymm16
vbroadcastss 0xc(%rbx,%rax,4), %ymm17
vbroadcastss 0x10(%rbx,%rax,4), %ymm19
vbroadcastss 0x14(%rbx,%rax,4), %ymm20
vbroadcastss 0x18(%rbx,%rax,4), %ymm23
vbroadcastss 0x1c(%rbx,%rax,4), %ymm24
vbroadcastss 0x20(%rbx,%rax,4), %ymm25
vbroadcastss 0x24(%rbx,%rax,4), %ymm26
vbroadcastss 0x28(%rbx,%rax,4), %ymm27
vbroadcastss 0x2c(%rbx,%rax,4), %ymm22
vbroadcastss 0x30(%rbx,%rax,4), %ymm28
vbroadcastss 0x34(%rbx,%rax,4), %ymm29
vbroadcastss 0x38(%rbx,%rax,4), %ymm30
vbroadcastss 0x3c(%rbx,%rax,4), %ymm21
movl %edi, %eax
andl $-0x80, %eax
vfmadd231ps (%r13,%rax,4), %ymm8, %ymm1 # ymm1 = (ymm8 * mem) + ymm1
vfmadd231ps 0x20(%r13,%rax,4), %ymm9, %ymm1 # ymm1 = (ymm9 * mem) + ymm1
vfmadd231ps 0x40(%r13,%rax,4), %ymm16, %ymm1 # ymm1 = (ymm16 * mem) + ymm1
vfmadd231ps 0x60(%r13,%rax,4), %ymm17, %ymm1 # ymm1 = (ymm17 * mem) + ymm1
vfmadd231ps 0x80(%r13,%rax,4), %ymm19, %ymm1 # ymm1 = (ymm19 * mem) + ymm1
vfmadd231ps 0xa0(%r13,%rax,4), %ymm20, %ymm1 # ymm1 = (ymm20 * mem) + ymm1
vfmadd231ps 0xc0(%r13,%rax,4), %ymm23, %ymm1 # ymm1 = (ymm23 * mem) + ymm1
vfmadd231ps 0xe0(%r13,%rax,4), %ymm24, %ymm1 # ymm1 = (ymm24 * mem) + ymm1
vfmadd231ps 0x100(%r13,%rax,4), %ymm25, %ymm1 # ymm1 = (ymm25 * mem) + ymm1
vfmadd231ps 0x120(%r13,%rax,4), %ymm26, %ymm1 # ymm1 = (ymm26 * mem) + ymm1
vfmadd231ps 0x140(%r13,%rax,4), %ymm27, %ymm1 # ymm1 = (ymm27 * mem) + ymm1
vfmadd231ps 0x160(%r13,%rax,4), %ymm22, %ymm1 # ymm1 = (ymm22 * mem) + ymm1
vfmadd231ps 0x180(%r13,%rax,4), %ymm28, %ymm1 # ymm1 = (ymm28 * mem) + ymm1
vfmadd231ps 0x1a0(%r13,%rax,4), %ymm29, %ymm1 # ymm1 = (ymm29 * mem) + ymm1
vfmadd231ps 0x1c0(%r13,%rax,4), %ymm30, %ymm1 # ymm1 = (ymm30 * mem) + ymm1
vfmadd231ps 0x1e0(%r13,%rax,4), %ymm21, %ymm1 # ymm1 = (ymm21 * mem) + ymm1
subq $-0x80, %rdi
addl %r8d, %ecx
decq %r9
jne 0x19cb23
incq %rbp
addq 0xb0(%rsp), %r15
cmpq %r12, %rbp
jne 0x19cacc
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x19caab
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x19cfcc
leaq 0x27d010(%rip), %rcx # 0x419cb0
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
jmpq *%rax
vmaxps %ymm0, %ymm1, %ymm1
jmp 0x19cfe0
vminps %ymm2, %ymm1, %ymm8
vmaxps %ymm3, %ymm8, %ymm8
vmovaps %ymm5, %ymm9
vfmadd213ps %ymm4, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm4
vrndscaleps $0x1, %ymm9, %ymm16
vcmpltps %ymm16, %ymm9, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm7) - ymm8
vfnmsub231ps %ymm15, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm15) - ymm8
vmulps %ymm8, %ymm8, %ymm9
vmovaps %ymm18, %ymm17
vfmadd213ps %ymm10, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm4
vfmadd213ps %ymm8, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm17) + ymm8
vaddps %ymm6, %ymm17, %ymm8
vcvttps2dq %ymm16, %ymm9
vpslld $0x17, %ymm9, %ymm9
vpaddd %ymm14, %ymm9, %ymm9
vfmadd213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm6
vcmpleps %ymm0, %ymm9, %k1
vmaxps 0x27a32d(%rip){1to8}, %ymm9, %ymm8 # 0x417080
vpsrld $0x17, %ymm8, %ymm9
vpbroadcastd 0x27a321(%rip), %ymm16 # 0x417084
vpternlogd $0xea, 0x277922(%rip){1to8}, %ymm16, %ymm8 # 0x414690
vcmpltps 0x27a313(%rip){1to8}, %ymm8, %k2 # 0x41708c
vbroadcastss 0x27a30d(%rip), %ymm20 # 0x417090
vaddps %ymm20, %ymm8, %ymm16
vaddps %ymm8, %ymm16, %ymm16 {%k2}
vmulps %ymm16, %ymm16, %ymm8
vbroadcastss 0x27a2f5(%rip), %ymm17 # 0x417094
vfmadd213ps 0x27a2ef(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x27a2e9(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x27a2e3(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x27a2dd(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x27a2d7(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x27a2d1(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x27a2cb(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x27a2c5(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vmulps %ymm16, %ymm8, %ymm19
vmulps %ymm17, %ymm19, %ymm17
vpaddd 0x27a283(%rip){1to8}, %ymm9, %ymm9 # 0x417088
vcvtdq2ps %ymm9, %ymm9
vsubps %ymm6, %ymm9, %ymm9 {%k2}
vfmadd231ps %ymm15, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm15) + ymm17
vfmsub231ps %ymm8, %ymm4, %ymm17 # ymm17 = (ymm4 * ymm8) - ymm17
vsubps %ymm16, %ymm17, %ymm8
vfmsub231ps %ymm9, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm9) - ymm8
vmulps %ymm31, %ymm8, %ymm8
vbroadcastss 0x27a90d(%rip), %ymm8 {%k1} # 0x417744
vminps %ymm2, %ymm8, %ymm8
vmaxps %ymm3, %ymm8, %ymm8
vmovaps %ymm5, %ymm9
vfmadd213ps %ymm4, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm4
vrndscaleps $0x1, %ymm9, %ymm16
vcmpltps %ymm16, %ymm9, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm7) - ymm8
vfnmsub231ps %ymm15, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm15) - ymm8
vmulps %ymm8, %ymm8, %ymm9
vmovaps %ymm18, %ymm17
vfmadd213ps %ymm10, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm4
vfmadd213ps %ymm8, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm17) + ymm8
vaddps %ymm6, %ymm17, %ymm8
vcvttps2dq %ymm16, %ymm9
vpslld $0x17, %ymm9, %ymm9
vpaddd %ymm14, %ymm9, %ymm9
vfmadd213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm6
vrcpps %ymm9, %ymm8
vfmsub213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) - ymm6
vfnmadd132ps %ymm8, %ymm8, %ymm9 # ymm9 = -(ymm9 * ymm8) + ymm8
vfnmadd213ps %ymm20, %ymm31, %ymm9 # ymm9 = -(ymm31 * ymm9) + ymm20
vmulps %ymm1, %ymm9, %ymm1
jmp 0x19cfe0
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps (%rax){1to8}, %ymm1, %ymm1
vminps 0x4(%rax){1to8}, %ymm1, %ymm1
jmp 0x19cfe0
vxorps 0x27a157(%rip){1to8}, %ymm1, %ymm1 # 0x417054
vminps %ymm2, %ymm1, %ymm1
vmaxps %ymm3, %ymm1, %ymm1
vmovaps %ymm4, %ymm8
vfmadd231ps %ymm5, %ymm1, %ymm8 # ymm8 = (ymm1 * ymm5) + ymm8
vrndscaleps $0x1, %ymm8, %ymm16
vcmpltps %ymm16, %ymm8, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm1 # ymm1 = (ymm16 * ymm7) - ymm1
vfmsub231ps 0x27beb6(%rip){1to8}, %ymm16, %ymm1 # ymm1 = (ymm16 * mem) - ymm1
vmulps %ymm1, %ymm1, %ymm8
vmovaps %ymm18, %ymm9
vfmadd213ps %ymm10, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm10
vfmadd213ps %ymm11, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm11
vfmadd213ps %ymm12, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm12
vfmadd213ps %ymm13, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm13
vfmadd213ps %ymm4, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm4
vfmadd213ps %ymm1, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm1
vaddps %ymm6, %ymm9, %ymm8
vcvttps2dq %ymm16, %ymm1
vpslld $0x17, %ymm1, %ymm1
vpaddd %ymm1, %ymm14, %ymm1
vfmadd213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) + ymm6
vrcpps %ymm1, %ymm8
vfmsub213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) - ymm6
vfnmadd132ps %ymm8, %ymm8, %ymm1 # ymm1 = -(ymm1 * ymm8) + ymm8
jmp 0x19cfe0
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps %ymm0, %ymm1, %ymm8
vminps %ymm0, %ymm1, %ymm1
vfmadd132ps (%rax){1to8}, %ymm8, %ymm1 # ymm1 = (ymm1 * mem) + ymm8
jmp 0x19cfe0
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vbroadcastss (%rax), %ymm8
vfmadd213ps 0x4(%rax){1to8}, %ymm1, %ymm8 # ymm8 = (ymm1 * ymm8) + mem
vmaxps %ymm0, %ymm8, %ymm8
vminps %ymm6, %ymm8, %ymm8
vmulps %ymm1, %ymm8, %ymm1
jmp 0x19cfe0
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
vmovups %ymm1, (%r9)
addq $0x20, %r9
incl %r11d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r11d
jne 0x19ca38
incl %esi
cmpl 0x80(%rsp), %esi
jne 0x19ca1f
movq 0x10(%rsp), %rsi
incq %rsi
movq %rsi, 0x10(%rsp)
cmpq 0xc0(%rsp), %rsi
jne 0x19c9b0
cmpl $0x4, 0xd0(%rsp)
jne 0x19d7bd
cmpl $0x10, 0xc8(%rsp)
jne 0x19d7bd
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a2e50
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rax
movq 0x18(%rsp), %rcx
movl 0xdc(%rcx,%rax), %r8d
movl 0xe0(%rcx,%rax), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rcx,%rax), %r10d
movl 0xe8(%rcx,%rax), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rcx,%rax), %edx
movq 0x1a8(%rcx,%rax), %rdi
movl 0xd4(%rcx,%rax), %esi
movq %rax, 0x150(%rsp)
movl 0xd8(%rcx,%rax), %r12d
movl %esi, %eax
imull %r12d, %eax
shll $0x6, %eax
cltq
movq %rax, 0xb8(%rsp)
vmovaps 0x27cd69(%rip), %zmm0 # 0x419e40
vmovaps 0x27cd9f(%rip), %zmm1 # 0x419e80
vmovaps 0x27cdd5(%rip), %zmm2 # 0x419ec0
vmovaps 0x27ce0b(%rip), %zmm6 # 0x419f00
vmovaps 0x27cf81(%rip), %zmm4 # 0x41a080
vbroadcastss 0x279f4b(%rip), %zmm3 # 0x417054
vxorps 0x27d32d(%rip), %zmm3, %zmm7 # 0x41a440
vxorps 0x27d2e3(%rip), %zmm3, %zmm9 # 0x41a400
vmovaps 0x27ce19(%rip), %zmm10 # 0x419f40
vmovaps 0x27ce4f(%rip), %zmm11 # 0x419f80
vmovaps 0x27ce85(%rip), %zmm12 # 0x419fc0
vmovaps 0x27cebb(%rip), %zmm13 # 0x41a000
vmovaps 0x27cef1(%rip), %zmm14 # 0x41a040
vmovdqa64 0x27cf67(%rip), %zmm15 # 0x41a0c0
vmovaps 0x27d0dd(%rip), %zmm21 # 0x41a240
vmovaps 0x27d113(%rip), %zmm22 # 0x41a280
vmovaps 0x27d149(%rip), %zmm23 # 0x41a2c0
vmovaps 0x27d17f(%rip), %zmm24 # 0x41a300
vmovaps 0x27d1b5(%rip), %zmm25 # 0x41a340
vmovaps 0x27d1eb(%rip), %zmm26 # 0x41a380
vmovaps 0x27d221(%rip), %zmm27 # 0x41a3c0
vmovaps %zmm3, %zmm20
vxorps %zmm3, %zmm4, %zmm28
movq %rsi, %rax
shlq $0x6, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
movq %rsi, 0x58(%rsp)
subl %esi, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm29, %xmm29, %xmm29
vbroadcastss 0x277b2c(%rip), %zmm30 # 0x414d18
vbroadcastss 0x27bbee(%rip), %zmm31 # 0x418de4
movq %rdx, 0x68(%rsp)
movq %rdi, 0x50(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x19d7a2
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %r14d
movl 0x30(%rax), %esi
movl %esi, 0x98(%rsp)
movl 0x10c(%rsp), %esi
movl %esi, 0x30(%rsp)
movq 0x120(%rsp), %r9
movq 0x10(%rsp), %rsi
imulq %rsi, %r9
imulq 0xf0(%rsp), %r9
addq 0xe0(%rsp), %r9
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x6, %rsi
movq %rsi, 0x1a0(%rsp)
xorl %esi, %esi
cmpl $0x0, 0x30(%rsp)
jle 0x19d793
xorl %r11d, %r11d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rdi, %rdi
je 0x19d29e
movq 0x1a0(%rsp), %rax
vmovups (%rdi,%rax), %zmm5
jmp 0x19d2a2
vxorps %xmm5, %xmm5, %xmm5
movl %r11d, 0x38(%rsp)
movq %r9, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x19d410
movq 0x48(%rcx), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rcx), %r13
addq 0x8(%rcx), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x19d3f0
movq 0x70(%rsp), %r11
imulq %rax, %r11
addq 0x60(%rsp), %r11
xorl %ebp, %ebp
xorl %ebx, %ebx
movl %ebx, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %esi, %eax
js 0x19d3dc
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x19d3dc
cmpl $0x0, 0x58(%rsp)
jle 0x19d3dc
cmpl 0x98(%rsp), %eax
jge 0x19d3dc
movslq %eax, %r9
imulq 0x90(%rsp), %r9
addq %r11, %r9
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %r15
movq %rbp, %rdi
testl %ecx, %ecx
js 0x19d3d0
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x19d3d0
cmpl %r14d, %eax
jge 0x19d3d0
shll $0x2, %eax
cltq
vbroadcastss (%r9,%rax,4), %zmm3
vbroadcastss 0x4(%r9,%rax,4), %zmm8
vbroadcastss 0x8(%r9,%rax,4), %zmm16
vbroadcastss 0xc(%r9,%rax,4), %zmm17
movl %edi, %eax
andl $-0x40, %eax
vfmadd231ps (%r13,%rax,4), %zmm3, %zmm5 # zmm5 = (zmm3 * mem) + zmm5
vfmadd231ps 0x40(%r13,%rax,4), %zmm8, %zmm5 # zmm5 = (zmm8 * mem) + zmm5
vfmadd231ps 0x80(%r13,%rax,4), %zmm16, %zmm5 # zmm5 = (zmm16 * mem) + zmm5
vfmadd231ps 0xc0(%r13,%rax,4), %zmm17, %zmm5 # zmm5 = (zmm17 * mem) + zmm5
addq $0x40, %rdi
addl %r8d, %ecx
decq %r15
jne 0x19d374
incq %rbx
addq 0xb0(%rsp), %rbp
cmpq %r12, %rbx
jne 0x19d31d
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x19d2fd
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x19d763
leaq 0x27c8a3(%rip), %rcx # 0x419cc8
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
jmpq *%rax
vmaxps %zmm29, %zmm5, %zmm5
jmp 0x19d777
vminps %zmm1, %zmm5, %zmm3
vmaxps %zmm2, %zmm3, %zmm17
vmovaps %zmm6, %zmm8
vfmadd213ps %zmm4, %zmm17, %zmm8 # zmm8 = (zmm17 * zmm8) + zmm4
vrndscaleps $0x1, %zmm8, %zmm16
vcmpltps %zmm16, %zmm8, %k1
vsubps %zmm0, %zmm16, %zmm16 {%k1}
vfmadd231ps %zmm7, %zmm16, %zmm17 # zmm17 = (zmm16 * zmm7) + zmm17
vfmadd231ps %zmm9, %zmm16, %zmm17 # zmm17 = (zmm16 * zmm9) + zmm17
vmulps %zmm17, %zmm17, %zmm8
vmovaps %zmm17, %zmm3
vfmadd213ps %zmm11, %zmm10, %zmm3 # zmm3 = (zmm10 * zmm3) + zmm11
vfmadd213ps %zmm12, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm12
vfmadd213ps %zmm13, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm13
vfmadd213ps %zmm14, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm14
vfmadd213ps %zmm4, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm4
vfmadd213ps %zmm17, %zmm8, %zmm3 # zmm3 = (zmm8 * zmm3) + zmm17
vaddps %zmm0, %zmm3, %zmm3
vcvttps2dq %zmm16, %zmm8
vpaddd %zmm15, %zmm8, %zmm8
vpslld $0x17, %zmm8, %zmm8
vfmadd213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm30
vmaxps 0x27cc22(%rip), %zmm8, %zmm3 # 0x41a100
vpsrld $0x17, %zmm3, %zmm16
vpternlogd $0xec, 0x27cc50(%rip), %zmm4, %zmm3 # 0x41a140
vcmpltps 0x27cc85(%rip), %zmm3, %k1 # 0x41a180
vsubps %zmm0, %zmm3, %zmm18
vaddps %zmm3, %zmm18, %zmm18 {%k1}
vmulps %zmm18, %zmm18, %zmm3
vmovaps %zmm18, %zmm17
vmovaps 0x27cce3(%rip), %zmm19 # 0x41a200
vfmadd132ps 0x27cc99(%rip), %zmm19, %zmm17 # zmm17 = (zmm17 * mem) + zmm19
vfmadd213ps %zmm21, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm21
vfmadd213ps %zmm22, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm22
vfmadd213ps %zmm23, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm23
vfmadd213ps %zmm24, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm24
vfmadd213ps %zmm25, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm25
vfmadd213ps %zmm26, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm26
vfmadd213ps %zmm27, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm27
vmulps %zmm18, %zmm3, %zmm19
vfmadd213ps %zmm18, %zmm17, %zmm19 # zmm19 = (zmm17 * zmm19) + zmm18
vcmpleps %zmm29, %zmm8, %k2
vpsubd %zmm15, %zmm16, %zmm8
vcvtdq2ps %zmm8, %zmm8
vaddps %zmm0, %zmm8, %zmm16
vmovaps %zmm8, %zmm16 {%k1}
vfmadd231ps 0x27ce7a(%rip), %zmm16, %zmm19 # zmm19 = (zmm16 * mem) + zmm19
vfmadd231ps %zmm3, %zmm28, %zmm19 # zmm19 = (zmm28 * zmm3) + zmm19
vfmadd231ps 0x27ceaa(%rip), %zmm16, %zmm19 # zmm19 = (zmm16 * mem) + zmm19
vmulps %zmm31, %zmm19, %zmm3
vbroadcastss 0x27a19e(%rip), %zmm3 {%k2} # 0x417744
vminps %zmm1, %zmm3, %zmm3
vmaxps %zmm2, %zmm3, %zmm3
vmovaps %zmm6, %zmm8
vfmadd213ps %zmm4, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm4
vrndscaleps $0x1, %zmm8, %zmm16
vcmpltps %zmm16, %zmm8, %k1
vsubps %zmm0, %zmm16, %zmm16 {%k1}
vfmadd231ps %zmm7, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm7) + zmm3
vfmadd231ps %zmm9, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm9) + zmm3
vmulps %zmm3, %zmm3, %zmm8
vmovaps %zmm3, %zmm17
vfmadd213ps %zmm11, %zmm10, %zmm17 # zmm17 = (zmm10 * zmm17) + zmm11
vfmadd213ps %zmm12, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm12
vfmadd213ps %zmm13, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm13
vfmadd213ps %zmm14, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm14
vfmadd213ps %zmm4, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm4
vfmadd213ps %zmm3, %zmm8, %zmm17 # zmm17 = (zmm8 * zmm17) + zmm3
vaddps %zmm0, %zmm17, %zmm3
vcvttps2dq %zmm16, %zmm8
vpaddd %zmm15, %zmm8, %zmm8
vpslld $0x17, %zmm8, %zmm8
vfmadd213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm30
vrcp14ps %zmm8, %zmm3
vfmsub213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) - zmm30
vfnmadd132ps %zmm3, %zmm3, %zmm8 # zmm8 = -(zmm8 * zmm3) + zmm3
vfnmsub213ps %zmm30, %zmm31, %zmm8 # zmm8 = -(zmm31 * zmm8) - zmm30
vmulps %zmm5, %zmm8, %zmm5
jmp 0x19d777
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps (%rax){1to16}, %zmm5, %zmm5
vminps 0x4(%rax){1to16}, %zmm5, %zmm5
jmp 0x19d777
vxorps %zmm20, %zmm5, %zmm5
vminps %zmm1, %zmm5, %zmm5
vmaxps %zmm2, %zmm5, %zmm16
vmovaps %zmm6, %zmm3
vfmadd213ps %zmm4, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm3) + zmm4
vrndscaleps $0x1, %zmm3, %zmm8
vcmpltps %zmm8, %zmm3, %k1
vsubps %zmm0, %zmm8, %zmm8 {%k1}
vfmadd231ps %zmm7, %zmm8, %zmm16 # zmm16 = (zmm8 * zmm7) + zmm16
vfmadd231ps %zmm9, %zmm8, %zmm16 # zmm16 = (zmm8 * zmm9) + zmm16
vmulps %zmm16, %zmm16, %zmm3
vmovaps %zmm16, %zmm5
vfmadd213ps %zmm11, %zmm10, %zmm5 # zmm5 = (zmm10 * zmm5) + zmm11
vfmadd213ps %zmm12, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm12
vfmadd213ps %zmm13, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm13
vfmadd213ps %zmm14, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm14
vfmadd213ps %zmm4, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm4
vfmadd213ps %zmm16, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) + zmm16
vaddps %zmm0, %zmm5, %zmm3
vcvttps2dq %zmm8, %zmm5
vpaddd %zmm15, %zmm5, %zmm5
vpslld $0x17, %zmm5, %zmm5
vfmadd213ps %zmm30, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) + zmm30
vrcp14ps %zmm5, %zmm3
vfmsub213ps %zmm30, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) - zmm30
vfnmadd132ps %zmm3, %zmm3, %zmm5 # zmm5 = -(zmm5 * zmm3) + zmm3
jmp 0x19d777
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vcmpltps %zmm29, %zmm5, %k1
vmulps (%rax){1to16}, %zmm5, %zmm5 {%k1}
jmp 0x19d777
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vbroadcastss (%rax), %zmm3
vfmadd213ps 0x4(%rax){1to16}, %zmm5, %zmm3 # zmm3 = (zmm5 * zmm3) + mem
vmaxps %zmm29, %zmm3, %zmm3
vminps %zmm30, %zmm3, %zmm3
vmulps %zmm5, %zmm3, %zmm5
jmp 0x19d777
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
vmovups %zmm5, (%r9)
addq $0x40, %r9
incl %r11d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r11d
jne 0x19d288
incl %esi
cmpl 0x80(%rsp), %esi
jne 0x19d26f
movq 0x10(%rsp), %rsi
incq %rsi
movq %rsi, 0x10(%rsp)
cmpq 0xc0(%rsp), %rsi
jne 0x19d200
cmpl $0x10, 0xd0(%rsp)
jne 0x19dfa4
cmpl $0x4, 0xc8(%rsp)
jne 0x19dfa4
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a2e50
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rax
movq 0x18(%rsp), %rcx
movl 0xdc(%rcx,%rax), %r8d
movl 0xe0(%rcx,%rax), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rcx,%rax), %r10d
movl 0xe8(%rcx,%rax), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rcx,%rax), %edx
movq 0x1a8(%rcx,%rax), %rdi
movl 0xd4(%rcx,%rax), %esi
movq %rax, 0x150(%rsp)
movl 0xd8(%rcx,%rax), %r12d
movl %esi, %eax
imull %r12d, %eax
shll $0x6, %eax
cltq
movq %rax, 0xb8(%rsp)
movq %rsi, %rax
shlq $0x6, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
movq %rsi, 0x58(%rsp)
subl %esi, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x2797b5(%rip), %xmm2 # 0x417058
vbroadcastss 0x2797b0(%rip), %xmm3 # 0x41705c
vbroadcastss 0x276ddb(%rip), %xmm4 # 0x414690
vbroadcastss 0x2797a2(%rip), %xmm5 # 0x417060
vbroadcastss 0x277451(%rip), %xmm6 # 0x414d18
vbroadcastss 0x279794(%rip), %xmm7 # 0x417064
vbroadcastss 0x279792(%rip), %xmm18 # 0x41706c
vbroadcastss 0x27978d(%rip), %xmm10 # 0x417070
vbroadcastss 0x279788(%rip), %xmm11 # 0x417074
vbroadcastss 0x279783(%rip), %xmm12 # 0x417078
vbroadcastss 0x27977e(%rip), %xmm13 # 0x41707c
vpbroadcastd 0x277411(%rip), %xmm14 # 0x414d18
vbroadcastss 0x279758(%rip), %xmm15 # 0x417068
vbroadcastss 0x27b4d2(%rip), %xmm31 # 0x418dec
movq %rdx, 0x68(%rsp)
movq %rdi, 0x50(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x19df89
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %r14d
movl 0x30(%rax), %esi
movl %esi, 0x98(%rsp)
movl 0x10c(%rsp), %esi
movl %esi, 0x30(%rsp)
movq 0x120(%rsp), %r9
movq 0x10(%rsp), %rsi
imulq %rsi, %r9
imulq 0xf0(%rsp), %r9
addq 0xe0(%rsp), %r9
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x4, %rsi
movq %rsi, 0x1a0(%rsp)
xorl %esi, %esi
cmpl $0x0, 0x30(%rsp)
jle 0x19df7a
xorl %r11d, %r11d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rdi, %rdi
je 0x19d9c0
movq 0x1a0(%rsp), %rax
vmovups (%rdi,%rax), %xmm1
jmp 0x19d9c4
vxorps %xmm1, %xmm1, %xmm1
movl %r11d, 0x38(%rsp)
movq %r9, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x19dbff
movq 0x48(%rcx), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rcx), %r13
addq 0x8(%rcx), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x19dbdf
movq 0x70(%rsp), %r11
imulq %rax, %r11
addq 0x60(%rsp), %r11
xorl %r15d, %r15d
xorl %ebp, %ebp
movl %ebp, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %esi, %eax
js 0x19dbcb
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x19dbcb
cmpl $0x0, 0x58(%rsp)
jle 0x19dbcb
cmpl 0x98(%rsp), %eax
jge 0x19dbcb
movslq %eax, %r9
imulq 0x90(%rsp), %r9
addq %r11, %r9
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %rbx
movq %r15, %rdi
testl %ecx, %ecx
js 0x19dbbb
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x19dbbb
cmpl %r14d, %eax
jge 0x19dbbb
shll $0x4, %eax
cltq
vbroadcastss (%r9,%rax,4), %xmm8
vbroadcastss 0x4(%r9,%rax,4), %xmm9
vbroadcastss 0x8(%r9,%rax,4), %xmm16
vbroadcastss 0xc(%r9,%rax,4), %xmm17
vbroadcastss 0x10(%r9,%rax,4), %xmm19
vbroadcastss 0x14(%r9,%rax,4), %xmm20
vbroadcastss 0x18(%r9,%rax,4), %xmm21
vbroadcastss 0x1c(%r9,%rax,4), %xmm23
vbroadcastss 0x20(%r9,%rax,4), %xmm24
vbroadcastss 0x24(%r9,%rax,4), %xmm25
vbroadcastss 0x28(%r9,%rax,4), %xmm26
vbroadcastss 0x2c(%r9,%rax,4), %xmm27
vbroadcastss 0x30(%r9,%rax,4), %xmm22
vbroadcastss 0x34(%r9,%rax,4), %xmm28
vbroadcastss 0x38(%r9,%rax,4), %xmm29
vbroadcastss 0x3c(%r9,%rax,4), %xmm30
movl %edi, %eax
andl $-0x40, %eax
vfmadd231ps (%r13,%rax,4), %xmm8, %xmm1 # xmm1 = (xmm8 * mem) + xmm1
vfmadd231ps 0x10(%r13,%rax,4), %xmm9, %xmm1 # xmm1 = (xmm9 * mem) + xmm1
vfmadd231ps 0x20(%r13,%rax,4), %xmm16, %xmm1 # xmm1 = (xmm16 * mem) + xmm1
vfmadd231ps 0x30(%r13,%rax,4), %xmm17, %xmm1 # xmm1 = (xmm17 * mem) + xmm1
vfmadd231ps 0x40(%r13,%rax,4), %xmm19, %xmm1 # xmm1 = (xmm19 * mem) + xmm1
vfmadd231ps 0x50(%r13,%rax,4), %xmm20, %xmm1 # xmm1 = (xmm20 * mem) + xmm1
vfmadd231ps 0x60(%r13,%rax,4), %xmm21, %xmm1 # xmm1 = (xmm21 * mem) + xmm1
vfmadd231ps 0x70(%r13,%rax,4), %xmm23, %xmm1 # xmm1 = (xmm23 * mem) + xmm1
vfmadd231ps 0x80(%r13,%rax,4), %xmm24, %xmm1 # xmm1 = (xmm24 * mem) + xmm1
vfmadd231ps 0x90(%r13,%rax,4), %xmm25, %xmm1 # xmm1 = (xmm25 * mem) + xmm1
vfmadd231ps 0xa0(%r13,%rax,4), %xmm26, %xmm1 # xmm1 = (xmm26 * mem) + xmm1
vfmadd231ps 0xb0(%r13,%rax,4), %xmm27, %xmm1 # xmm1 = (xmm27 * mem) + xmm1
vfmadd231ps 0xc0(%r13,%rax,4), %xmm22, %xmm1 # xmm1 = (xmm22 * mem) + xmm1
vfmadd231ps 0xd0(%r13,%rax,4), %xmm28, %xmm1 # xmm1 = (xmm28 * mem) + xmm1
vfmadd231ps 0xe0(%r13,%rax,4), %xmm29, %xmm1 # xmm1 = (xmm29 * mem) + xmm1
vfmadd231ps 0xf0(%r13,%rax,4), %xmm30, %xmm1 # xmm1 = (xmm30 * mem) + xmm1
addq $0x40, %rdi
addl %r8d, %ecx
decq %rbx
jne 0x19da97
incq %rbp
addq 0xb0(%rsp), %r15
cmpq %r12, %rbp
jne 0x19da40
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x19da1f
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x19df4b
leaq 0x27c0cc(%rip), %rcx # 0x419ce0
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
jmpq *%rax
vmaxps %xmm0, %xmm1, %xmm1
jmp 0x19df5f
vminps %xmm2, %xmm1, %xmm8
vmaxps %xmm3, %xmm8, %xmm8
vmovaps %xmm5, %xmm9
vfmadd213ps %xmm4, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm4
vcvttps2dq %xmm9, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm9, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm8 # xmm8 = (xmm16 * xmm7) - xmm8
vfnmsub231ps %xmm15, %xmm16, %xmm8 # xmm8 = -(xmm16 * xmm15) - xmm8
vmulps %xmm8, %xmm8, %xmm9
vmovaps %xmm18, %xmm17
vfmadd213ps %xmm10, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm10
vfmadd213ps %xmm11, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm11
vfmadd213ps %xmm12, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm12
vfmadd213ps %xmm13, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm13
vfmadd213ps %xmm4, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm4
vfmadd213ps %xmm8, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm17) + xmm8
vaddps %xmm6, %xmm17, %xmm8
vcvttps2dq %xmm16, %xmm9
vpslld $0x17, %xmm9, %xmm9
vpaddd %xmm14, %xmm9, %xmm9
vfmadd213ps %xmm6, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm6
vcmpleps %xmm0, %xmm9, %k1
vmaxps 0x2793b4(%rip){1to4}, %xmm9, %xmm8 # 0x417080
vpsrld $0x17, %xmm8, %xmm9
vpbroadcastd 0x2793a8(%rip), %xmm16 # 0x417084
vpternlogd $0xea, 0x2769a9(%rip){1to4}, %xmm16, %xmm8 # 0x414690
vcmpltps 0x27939a(%rip){1to4}, %xmm8, %k2 # 0x41708c
vaddps 0x279394(%rip){1to4}, %xmm8, %xmm16 # 0x417090
vaddps %xmm8, %xmm16, %xmm16 {%k2}
vmulps %xmm16, %xmm16, %xmm8
vbroadcastss 0x279382(%rip), %xmm17 # 0x417094
vfmadd213ps 0x27937c(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vfmadd213ps 0x279376(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vfmadd213ps 0x279370(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vfmadd213ps 0x27936a(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vfmadd213ps 0x279364(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vfmadd213ps 0x27935e(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vfmadd213ps 0x279358(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vfmadd213ps 0x279352(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vmulps %xmm16, %xmm8, %xmm19
vmulps %xmm17, %xmm19, %xmm17
vpaddd 0x279310(%rip){1to4}, %xmm9, %xmm9 # 0x417088
vcvtdq2ps %xmm9, %xmm9
vsubps %xmm6, %xmm9, %xmm9 {%k2}
vfmadd231ps %xmm15, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm15) + xmm17
vfmsub231ps %xmm8, %xmm4, %xmm17 # xmm17 = (xmm4 * xmm8) - xmm17
vsubps %xmm16, %xmm17, %xmm8
vfnmadd231ps %xmm9, %xmm7, %xmm8 # xmm8 = -(xmm7 * xmm9) + xmm8
vaddps %xmm8, %xmm8, %xmm8
vbroadcastss 0x27999b(%rip), %xmm8 {%k1} # 0x417744
vminps %xmm2, %xmm8, %xmm8
vmaxps %xmm3, %xmm8, %xmm8
vmovaps %xmm5, %xmm9
vfmadd213ps %xmm4, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm4
vcvttps2dq %xmm9, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm9, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm8 # xmm8 = (xmm16 * xmm7) - xmm8
vfnmsub231ps %xmm15, %xmm16, %xmm8 # xmm8 = -(xmm16 * xmm15) - xmm8
vmulps %xmm8, %xmm8, %xmm9
vmovaps %xmm18, %xmm17
vfmadd213ps %xmm10, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm10
vfmadd213ps %xmm11, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm11
vfmadd213ps %xmm12, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm12
vfmadd213ps %xmm13, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm13
vfmadd213ps %xmm4, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm4
vfmadd213ps %xmm8, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm17) + xmm8
vaddps %xmm6, %xmm17, %xmm8
vcvttps2dq %xmm16, %xmm9
vpslld $0x17, %xmm9, %xmm9
vpaddd %xmm14, %xmm9, %xmm9
vfmadd213ps %xmm6, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm6
vrcpps %xmm9, %xmm8
vaddps %xmm8, %xmm8, %xmm16
vfmsub213ps %xmm31, %xmm16, %xmm9 # xmm9 = (xmm16 * xmm9) - xmm31
vfnmadd213ps %xmm16, %xmm8, %xmm9 # xmm9 = -(xmm8 * xmm9) + xmm16
vfmsub231ps %xmm9, %xmm1, %xmm1 # xmm1 = (xmm1 * xmm9) - xmm1
jmp 0x19df5f
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps (%rax){1to4}, %xmm1, %xmm1
vminps 0x4(%rax){1to4}, %xmm1, %xmm1
jmp 0x19df5f
vxorps 0x2791dd(%rip){1to4}, %xmm1, %xmm1 # 0x417054
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm1, %xmm1
vmovaps %xmm4, %xmm8
vfmadd231ps %xmm5, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm5) + xmm8
vcvttps2dq %xmm8, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm8, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm1 # xmm1 = (xmm16 * xmm7) - xmm1
vfmsub231ps 0x27af37(%rip){1to4}, %xmm16, %xmm1 # xmm1 = (xmm16 * mem) - xmm1
vmulps %xmm1, %xmm1, %xmm8
vmovaps %xmm18, %xmm9
vfmadd213ps %xmm10, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm10
vfmadd213ps %xmm11, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm11
vfmadd213ps %xmm12, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm12
vfmadd213ps %xmm13, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm13
vfmadd213ps %xmm4, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm4
vfmadd213ps %xmm1, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm1
vaddps %xmm6, %xmm9, %xmm8
vcvttps2dq %xmm16, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm1, %xmm14, %xmm1
vfmadd213ps %xmm6, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm6
vrcpps %xmm1, %xmm8
vfmsub213ps %xmm6, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) - xmm6
vfnmadd132ps %xmm8, %xmm8, %xmm1 # xmm1 = -(xmm1 * xmm8) + xmm8
jmp 0x19df5f
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps %xmm0, %xmm1, %xmm8
vminps %xmm0, %xmm1, %xmm1
vfmadd132ps (%rax){1to4}, %xmm8, %xmm1 # xmm1 = (xmm1 * mem) + xmm8
jmp 0x19df5f
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vbroadcastss (%rax), %xmm8
vfmadd213ps 0x4(%rax){1to4}, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + mem
vmaxps %xmm0, %xmm8, %xmm8
vminps %xmm6, %xmm8, %xmm8
vmulps %xmm1, %xmm8, %xmm1
jmp 0x19df5f
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
vmovups %xmm1, (%r9)
addq $0x10, %r9
incl %r11d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r11d
jne 0x19d9ac
incl %esi
cmpl 0x80(%rsp), %esi
jne 0x19d993
movq 0x10(%rsp), %rsi
incq %rsi
movq %rsi, 0x10(%rsp)
cmpq 0xc0(%rsp), %rsi
jne 0x19d924
cmpl $0x1, 0xd0(%rsp)
jne 0x19e6fe
cmpl $0x10, 0xc8(%rsp)
jne 0x19e6fe
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a2e50
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rcx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rcx), %r8d
movl 0xe0(%rax,%rcx), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rax,%rcx), %r10d
movl 0xe8(%rax,%rcx), %edx
movl %edx, 0x58(%rsp)
movl 0x114(%rax,%rcx), %edx
movq 0x1a8(%rax,%rcx), %rdi
movl 0xd4(%rax,%rcx), %r9d
movq %rcx, 0x150(%rsp)
movl 0xd8(%rax,%rcx), %r12d
movl %r9d, %eax
imull %r12d, %eax
shll $0x4, %eax
cltq
movq %rax, 0xb8(%rsp)
vmovaps 0x27bde8(%rip), %zmm0 # 0x419e40
vmovaps 0x27be1e(%rip), %zmm1 # 0x419e80
vmovaps 0x27be54(%rip), %zmm2 # 0x419ec0
vmovaps 0x27be8a(%rip), %zmm6 # 0x419f00
vmovaps 0x27c000(%rip), %zmm4 # 0x41a080
vbroadcastss 0x278fca(%rip), %zmm3 # 0x417054
vxorps 0x27c3ac(%rip), %zmm3, %zmm7 # 0x41a440
vxorps 0x27c362(%rip), %zmm3, %zmm9 # 0x41a400
vmovaps 0x27be98(%rip), %zmm10 # 0x419f40
vmovaps 0x27bece(%rip), %zmm11 # 0x419f80
vmovaps 0x27bf04(%rip), %zmm12 # 0x419fc0
vmovaps 0x27bf3a(%rip), %zmm13 # 0x41a000
vmovaps 0x27bf70(%rip), %zmm14 # 0x41a040
vmovdqa64 0x27bfe6(%rip), %zmm15 # 0x41a0c0
vmovaps 0x27c15c(%rip), %zmm21 # 0x41a240
vmovaps 0x27c192(%rip), %zmm22 # 0x41a280
vmovaps 0x27c1c8(%rip), %zmm23 # 0x41a2c0
vmovaps 0x27c1fe(%rip), %zmm24 # 0x41a300
vmovaps 0x27c234(%rip), %zmm25 # 0x41a340
vmovaps 0x27c26a(%rip), %zmm26 # 0x41a380
vmovaps 0x27c2a0(%rip), %zmm27 # 0x41a3c0
vmovaps %zmm3, %zmm20
vxorps %zmm3, %zmm4, %zmm28
movq %r9, %rax
shlq $0x4, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
subl %r9d, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm29, %xmm29, %xmm29
vbroadcastss 0x276baf(%rip), %zmm30 # 0x414d18
vbroadcastss 0x27ac71(%rip), %zmm31 # 0x418de4
movq %rdx, 0x68(%rsp)
movq %rdi, 0x50(%rsp)
movq %r9, 0x98(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x19e6e3
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %ebp
movl 0x30(%rax), %ecx
movl %ecx, 0x24(%rsp)
movl 0x10c(%rsp), %ecx
movl %ecx, 0x30(%rsp)
movq 0x120(%rsp), %r11
movq 0x10(%rsp), %rcx
imulq %rcx, %r11
imulq 0xf0(%rsp), %r11
addq 0xe0(%rsp), %r11
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x6, %rcx
movq %rcx, 0x1a0(%rsp)
xorl %esi, %esi
cmpl $0x0, 0x30(%rsp)
jle 0x19e6d4
xorl %ebx, %ebx
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rdi, %rdi
je 0x19e21e
movq 0x1a0(%rsp), %rax
vmovups (%rdi,%rax), %zmm5
jmp 0x19e222
vxorps %xmm5, %xmm5, %xmm5
movl %ebx, 0x38(%rsp)
movq %r11, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x19e350
movq 0x18(%rsp), %rax
movq 0x48(%rax), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rax), %r13
addq 0x8(%rax), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x19e330
movq 0x70(%rsp), %r14
imulq %rax, %r14
addq 0x60(%rsp), %r14
xorl %edi, %edi
xorl %r11d, %r11d
movl %r11d, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %esi, %eax
js 0x19e31c
cltd
idivl 0x58(%rsp)
testl %edx, %edx
jne 0x19e31c
cmpl 0x24(%rsp), %eax
jge 0x19e31c
movq 0x98(%rsp), %r15
testl %r15d, %r15d
jle 0x19e31c
movslq %eax, %r9
imulq 0x90(%rsp), %r9
addq %r14, %r9
movl 0x40(%rsp), %ecx
movq %rdi, %rbx
testl %ecx, %ecx
js 0x19e310
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x19e310
cmpl %ebp, %eax
jge 0x19e310
cltq
vbroadcastss (%r9,%rax,4), %zmm3
movl %ebx, %eax
andl $-0x10, %eax
vfmadd231ps (%r13,%rax,4), %zmm3, %zmm5 # zmm5 = (zmm3 * mem) + zmm5
addq $0x10, %rbx
addl %r8d, %ecx
decq %r15
jne 0x19e2e8
incq %r11
addq 0xb0(%rsp), %rdi
cmpq %r12, %r11
jne 0x19e2a2
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x19e281
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x19e6ac
leaq 0x27b993(%rip), %rcx # 0x419cf8
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r11
movl 0x38(%rsp), %ebx
jmpq *%rax
vmaxps %zmm29, %zmm5, %zmm5
jmp 0x19e6ba
vminps %zmm1, %zmm5, %zmm3
vmaxps %zmm2, %zmm3, %zmm17
vmovaps %zmm6, %zmm8
vfmadd213ps %zmm4, %zmm17, %zmm8 # zmm8 = (zmm17 * zmm8) + zmm4
vrndscaleps $0x1, %zmm8, %zmm16
vcmpltps %zmm16, %zmm8, %k1
vsubps %zmm0, %zmm16, %zmm16 {%k1}
vfmadd231ps %zmm7, %zmm16, %zmm17 # zmm17 = (zmm16 * zmm7) + zmm17
vfmadd231ps %zmm9, %zmm16, %zmm17 # zmm17 = (zmm16 * zmm9) + zmm17
vmulps %zmm17, %zmm17, %zmm8
vmovaps %zmm17, %zmm3
vfmadd213ps %zmm11, %zmm10, %zmm3 # zmm3 = (zmm10 * zmm3) + zmm11
vfmadd213ps %zmm12, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm12
vfmadd213ps %zmm13, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm13
vfmadd213ps %zmm14, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm14
vfmadd213ps %zmm4, %zmm17, %zmm3 # zmm3 = (zmm17 * zmm3) + zmm4
vfmadd213ps %zmm17, %zmm8, %zmm3 # zmm3 = (zmm8 * zmm3) + zmm17
vaddps %zmm0, %zmm3, %zmm3
vcvttps2dq %zmm16, %zmm8
vpaddd %zmm15, %zmm8, %zmm8
vpslld $0x17, %zmm8, %zmm8
vfmadd213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm30
vmaxps 0x27bce8(%rip), %zmm8, %zmm3 # 0x41a100
vpsrld $0x17, %zmm3, %zmm16
vpternlogd $0xec, 0x27bd16(%rip), %zmm4, %zmm3 # 0x41a140
vcmpltps 0x27bd4b(%rip), %zmm3, %k1 # 0x41a180
vsubps %zmm0, %zmm3, %zmm18
vaddps %zmm3, %zmm18, %zmm18 {%k1}
vmulps %zmm18, %zmm18, %zmm3
vmovaps %zmm18, %zmm17
vmovaps 0x27bda9(%rip), %zmm19 # 0x41a200
vfmadd132ps 0x27bd5f(%rip), %zmm19, %zmm17 # zmm17 = (zmm17 * mem) + zmm19
vfmadd213ps %zmm21, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm21
vfmadd213ps %zmm22, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm22
vfmadd213ps %zmm23, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm23
vfmadd213ps %zmm24, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm24
vfmadd213ps %zmm25, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm25
vfmadd213ps %zmm26, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm26
vfmadd213ps %zmm27, %zmm18, %zmm17 # zmm17 = (zmm18 * zmm17) + zmm27
vmulps %zmm18, %zmm3, %zmm19
vfmadd213ps %zmm18, %zmm17, %zmm19 # zmm19 = (zmm17 * zmm19) + zmm18
vcmpleps %zmm29, %zmm8, %k2
vpsubd %zmm15, %zmm16, %zmm8
vcvtdq2ps %zmm8, %zmm8
vaddps %zmm0, %zmm8, %zmm16
vmovaps %zmm8, %zmm16 {%k1}
vfmadd231ps 0x27bf40(%rip), %zmm16, %zmm19 # zmm19 = (zmm16 * mem) + zmm19
vfmadd231ps %zmm3, %zmm28, %zmm19 # zmm19 = (zmm28 * zmm3) + zmm19
vfmadd231ps 0x27bf70(%rip), %zmm16, %zmm19 # zmm19 = (zmm16 * mem) + zmm19
vmulps %zmm31, %zmm19, %zmm3
vbroadcastss 0x279264(%rip), %zmm3 {%k2} # 0x417744
vminps %zmm1, %zmm3, %zmm3
vmaxps %zmm2, %zmm3, %zmm3
vmovaps %zmm6, %zmm8
vfmadd213ps %zmm4, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm4
vrndscaleps $0x1, %zmm8, %zmm16
vcmpltps %zmm16, %zmm8, %k1
vsubps %zmm0, %zmm16, %zmm16 {%k1}
vfmadd231ps %zmm7, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm7) + zmm3
vfmadd231ps %zmm9, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm9) + zmm3
vmulps %zmm3, %zmm3, %zmm8
vmovaps %zmm3, %zmm17
vfmadd213ps %zmm11, %zmm10, %zmm17 # zmm17 = (zmm10 * zmm17) + zmm11
vfmadd213ps %zmm12, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm12
vfmadd213ps %zmm13, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm13
vfmadd213ps %zmm14, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm14
vfmadd213ps %zmm4, %zmm3, %zmm17 # zmm17 = (zmm3 * zmm17) + zmm4
vfmadd213ps %zmm3, %zmm8, %zmm17 # zmm17 = (zmm8 * zmm17) + zmm3
vaddps %zmm0, %zmm17, %zmm3
vcvttps2dq %zmm16, %zmm8
vpaddd %zmm15, %zmm8, %zmm8
vpslld $0x17, %zmm8, %zmm8
vfmadd213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) + zmm30
vrcp14ps %zmm8, %zmm3
vfmsub213ps %zmm30, %zmm3, %zmm8 # zmm8 = (zmm3 * zmm8) - zmm30
vfnmadd132ps %zmm3, %zmm3, %zmm8 # zmm8 = -(zmm8 * zmm3) + zmm3
vfnmsub213ps %zmm30, %zmm31, %zmm8 # zmm8 = -(zmm31 * zmm8) - zmm30
vmulps %zmm5, %zmm8, %zmm5
jmp 0x19e6ba
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vmaxps (%rax){1to16}, %zmm5, %zmm5
vminps 0x4(%rax){1to16}, %zmm5, %zmm5
jmp 0x19e6ba
vxorps %zmm20, %zmm5, %zmm5
vminps %zmm1, %zmm5, %zmm5
vmaxps %zmm2, %zmm5, %zmm16
vmovaps %zmm6, %zmm3
vfmadd213ps %zmm4, %zmm16, %zmm3 # zmm3 = (zmm16 * zmm3) + zmm4
vrndscaleps $0x1, %zmm3, %zmm8
vcmpltps %zmm8, %zmm3, %k1
vsubps %zmm0, %zmm8, %zmm8 {%k1}
vfmadd231ps %zmm7, %zmm8, %zmm16 # zmm16 = (zmm8 * zmm7) + zmm16
vfmadd231ps %zmm9, %zmm8, %zmm16 # zmm16 = (zmm8 * zmm9) + zmm16
vmulps %zmm16, %zmm16, %zmm3
vmovaps %zmm16, %zmm5
vfmadd213ps %zmm11, %zmm10, %zmm5 # zmm5 = (zmm10 * zmm5) + zmm11
vfmadd213ps %zmm12, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm12
vfmadd213ps %zmm13, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm13
vfmadd213ps %zmm14, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm14
vfmadd213ps %zmm4, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + zmm4
vfmadd213ps %zmm16, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) + zmm16
vaddps %zmm0, %zmm5, %zmm3
vcvttps2dq %zmm8, %zmm5
vpaddd %zmm15, %zmm5, %zmm5
vpslld $0x17, %zmm5, %zmm5
vfmadd213ps %zmm30, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) + zmm30
vrcp14ps %zmm5, %zmm3
vfmsub213ps %zmm30, %zmm3, %zmm5 # zmm5 = (zmm3 * zmm5) - zmm30
vfnmadd132ps %zmm3, %zmm3, %zmm5 # zmm5 = -(zmm5 * zmm3) + zmm3
jmp 0x19e6ba
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vcmpltps %zmm29, %zmm5, %k1
vmulps (%rax){1to16}, %zmm5, %zmm5 {%k1}
jmp 0x19e6ba
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vbroadcastss (%rax), %zmm3
vfmadd213ps 0x4(%rax){1to16}, %zmm5, %zmm3 # zmm3 = (zmm5 * zmm3) + mem
vmaxps %zmm29, %zmm3, %zmm3
vminps %zmm30, %zmm3, %zmm3
vmulps %zmm5, %zmm3, %zmm5
jmp 0x19e6ba
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r11
movl 0x38(%rsp), %ebx
vmovups %zmm5, (%r11)
addq $0x40, %r11
incl %ebx
incl 0x40(%rsp)
cmpl 0x30(%rsp), %ebx
jne 0x19e208
incl %esi
cmpl 0x80(%rsp), %esi
jne 0x19e1f0
movq 0x10(%rsp), %rcx
incq %rcx
movq %rcx, 0x10(%rsp)
cmpq 0xc0(%rsp), %rcx
jne 0x19e185
cmpl $0x10, 0xd0(%rsp)
jne 0x19ebd7
cmpl $0x1, 0xc8(%rsp)
jne 0x19ebd7
movslq 0x118(%rsp), %rax
movq %rax, 0x80(%rsp)
testq %rax, %rax
jle 0x1a2e50
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rcx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rcx), %ebp
movl 0xe0(%rax,%rcx), %edx
movl %edx, 0x130(%rsp)
movl 0xe4(%rax,%rcx), %r12d
movl 0xe8(%rax,%rcx), %edx
movl %edx, 0x148(%rsp)
movl 0x114(%rax,%rcx), %r14d
movq 0x1a8(%rax,%rcx), %rdx
movl 0xd4(%rax,%rcx), %esi
movq %rcx, 0x150(%rsp)
movl 0xd8(%rax,%rcx), %ebx
movq 0xf0(%rsp), %rax
imulq 0x120(%rsp), %rax
movq %rax, 0x88(%rsp)
movq 0xe0(%rsp), %rax
movq %rax, 0xc0(%rsp)
movq 0xa8(%rsp), %rcx
movl 0x2c(%rcx), %r13d
movl %esi, %eax
imull %ebx, %eax
shll $0x4, %eax
cltq
movq %rax, 0x78(%rsp)
movq %rsi, %rax
shlq $0x4, %rax
movq %rax, 0xa0(%rsp)
movl $0x1, %eax
movq %rsi, 0xb0(%rsp)
subl %esi, %eax
imull %ebp, %eax
movl %eax, 0x12c(%rsp)
decl %r14d
movl 0x30(%rcx), %eax
movl %eax, 0x58(%rsp)
movl 0x38(%rcx), %eax
movq %rax, 0x90(%rsp)
movl 0x10c(%rsp), %eax
movl %eax, 0x38(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x1a0(%rsp)
movq $0x0, 0x68(%rsp)
movq %r14, 0x30(%rsp)
movq %rdx, 0x28(%rsp)
cmpl $0x0, 0x1a0(%rsp)
jle 0x19ebb7
movq 0x88(%rsp), %rsi
movq 0x68(%rsp), %rcx
imulq %rcx, %rsi
movq 0x18(%rsp), %rax
movq 0x48(%rax), %rdi
imulq %rcx, %rdi
imulq 0x18(%rax), %rdi
addq 0xc0(%rsp), %rsi
movq %rsi, 0x70(%rsp)
addq 0x8(%rax), %rdi
movq %rdi, 0x50(%rsp)
xorl %r15d, %r15d
cmpl $0x0, 0x38(%rsp)
jle 0x19eba6
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rsi
movq (%rcx), %rax
movq %rax, 0x48(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0xb8(%rsp)
imulq %rax, %rsi
movq %rsi, 0x98(%rsp)
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
movq %rax, 0x10(%rsp)
xorl %ecx, %ecx
movl 0x12c(%rsp), %eax
movl %eax, 0x24(%rsp)
movl %ecx, 0x60(%rsp)
testq %rdx, %rdx
je 0x19e907
movq 0x68(%rsp), %rax
vmovss (%rdx,%rax,4), %xmm0
jmp 0x19e90b
vxorps %xmm0, %xmm0, %xmm0
cmpl $0x0, 0x90(%rsp)
jle 0x19ea02
vxorps %xmm1, %xmm1, %xmm1
xorl %eax, %eax
movq 0x50(%rsp), %rdi
testl %ebx, %ebx
movq %rax, 0x40(%rsp)
jle 0x19e9e1
movq 0xb8(%rsp), %r8
imulq %rax, %r8
addq 0x48(%rsp), %r8
xorl %r14d, %r14d
xorl %r10d, %r10d
movl %r10d, %eax
subl %ebx, %eax
incl %eax
imull 0x130(%rsp), %eax
addl %r15d, %eax
js 0x19e9cd
cltd
idivl 0x148(%rsp)
testl %edx, %edx
jne 0x19e9cd
cmpl $0x0, 0xb0(%rsp)
jle 0x19e9cd
cmpl 0x58(%rsp), %eax
jge 0x19e9cd
movslq %eax, %r11
imulq 0x98(%rsp), %r11
addq %r8, %r11
movl 0x24(%rsp), %ecx
movq 0xb0(%rsp), %rsi
movq %r14, %r9
testl %ecx, %ecx
js 0x19e9c2
movl %ecx, %eax
cltd
idivl %r12d
testl %edx, %edx
jne 0x19e9c2
cmpl %r13d, %eax
jge 0x19e9c2
shll $0x4, %eax
cltq
vmovaps (%r11,%rax,4), %zmm2
movl %r9d, %eax
andl $-0x10, %eax
vfmadd231ps (%rdi,%rax,4), %zmm2, %zmm1 # zmm1 = (zmm2 * mem) + zmm1
addq $0x10, %r9
addl %ebp, %ecx
decq %rsi
jne 0x19e996
incq %r10
addq 0xa0(%rsp), %r14
cmpq %rbx, %r10
jne 0x19e948
movq 0x78(%rsp), %rax
leaq (%rdi,%rax,4), %rdi
movq 0x40(%rsp), %rax
incq %rax
cmpq 0x90(%rsp), %rax
jne 0x19e924
jmp 0x19ea06
vxorps %xmm1, %xmm1, %xmm1
vextractf64x4 $0x1, %zmm1, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vaddps %xmm1, %xmm2, %xmm1
vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3]
vaddss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm2, %xmm4
movq 0x30(%rsp), %r14
cmpl $0x5, %r14d
ja 0x19eb6a
leaq 0x27b3a2(%rip), %rcx # 0x419de8
movslq (%rcx,%r14,4), %rax
addq %rcx, %rax
movq 0x28(%rsp), %rdx
jmpq *%rax
vmaxss 0x27842c(%rip), %xmm4, %xmm0 # 0x416e88
jmp 0x19eb73
vmovaps %xmm4, %xmm0
vmovss %xmm4, 0x40(%rsp)
vzeroupper
callq 0x563e0
vaddss 0x27629d(%rip), %xmm0, %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x28(%rsp), %rdx
vmulss 0x40(%rsp), %xmm0, %xmm0
jmp 0x19eb73
movq 0x10(%rsp), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
movl 0x60(%rsp), %ecx
jbe 0x19eb77
vmovaps %xmm1, %xmm0
jmp 0x19eb77
vmovss 0x278596(%rip), %xmm2 # 0x417058
vminss %xmm2, %xmm4, %xmm1
vxorps 0x278584(%rip){1to4}, %xmm1, %xmm0 # 0x417054
vcmpltss 0x278581(%rip), %xmm1, %k1 # 0x41705c
vmovss %xmm2, %xmm0, %xmm0 {%k1}
vzeroupper
callq 0x563e0
movq 0x28(%rsp), %rdx
vmovss 0x276222(%rip), %xmm1 # 0x414d18
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x19eb73
movq 0x10(%rsp), %rax
vmovss (%rax), %xmm0
vcmpgtss 0x278374(%rip), %xmm4, %k1 # 0x416e88
vmovss 0x2761fa(%rip), %xmm0 {%k1} # 0x414d18
vmulss %xmm4, %xmm0, %xmm0
jmp 0x19eb73
movq 0x10(%rsp), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vxorps 0x278518(%rip){1to4}, %xmm2, %xmm0 # 0x417054
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
movl 0x60(%rsp), %ecx
jb 0x19eb77
vmovss 0x2761c2(%rip), %xmm0 # 0x414d18
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x19eb9b
vmovaps %xmm4, %xmm0
jmp 0x19eb73
vmovaps %xmm4, %xmm0
movq 0x28(%rsp), %rdx
movl 0x60(%rsp), %ecx
movq 0x70(%rsp), %rax
vmovss %xmm0, (%rax)
addq $0x4, %rax
movq %rax, 0x70(%rsp)
incl %ecx
incl 0x24(%rsp)
cmpl 0x38(%rsp), %ecx
jne 0x19e8f2
jmp 0x19eba6
vfmadd213ss %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm2
vmulss %xmm4, %xmm1, %xmm0
jmp 0x19eb73
incl %r15d
cmpl 0x1a0(%rsp), %r15d
jne 0x19e88c
movq 0x68(%rsp), %rcx
incq %rcx
movq %rcx, 0x68(%rsp)
cmpq 0x80(%rsp), %rcx
jne 0x19e842
jmp 0x1a2e50
movl 0xc8(%rsp), %ecx
xorl $0x8, %ecx
movq 0xd0(%rsp), %rax
xorl $0x8, %eax
movl %ecx, 0x190(%rsp)
orl %ecx, %eax
movq 0x18(%rsp), %rcx
jne 0x19f365
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x19f365
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rax
movl 0xdc(%rcx,%rax), %r9d
movl 0xe0(%rcx,%rax), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rcx,%rax), %r11d
movl 0xe8(%rcx,%rax), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rcx,%rax), %edx
movq 0x1a8(%rcx,%rax), %rsi
movl 0xd4(%rcx,%rax), %edi
movq %rax, 0x150(%rsp)
movl 0xd8(%rcx,%rax), %r12d
movl %edi, %eax
imull %r12d, %eax
shll $0x6, %eax
cltq
movq %rax, 0xb8(%rsp)
movq %rdi, %rax
shlq $0x6, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
movq %rdi, 0x58(%rsp)
subl %edi, %eax
imull %r9d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x278393(%rip), %ymm2 # 0x417058
vbroadcastss 0x27838e(%rip), %ymm3 # 0x41705c
vbroadcastss 0x2759b9(%rip), %ymm4 # 0x414690
vbroadcastss 0x278380(%rip), %ymm5 # 0x417060
vbroadcastss 0x27602f(%rip), %ymm6 # 0x414d18
vbroadcastss 0x278372(%rip), %ymm7 # 0x417064
vbroadcastss 0x278370(%rip), %ymm18 # 0x41706c
vbroadcastss 0x27836b(%rip), %ymm10 # 0x417070
vbroadcastss 0x278366(%rip), %ymm11 # 0x417074
vbroadcastss 0x278361(%rip), %ymm12 # 0x417078
vbroadcastss 0x27835c(%rip), %ymm13 # 0x41707c
vpbroadcastd 0x275fef(%rip), %ymm14 # 0x414d18
vbroadcastss 0x278336(%rip), %ymm15 # 0x417068
vbroadcastss 0x278354(%rip), %ymm21 # 0x417090
vbroadcastss 0x27834e(%rip), %ymm22 # 0x417094
vbroadcastss 0x278350(%rip), %ymm25 # 0x4170a0
vbroadcastss 0x27834a(%rip), %ymm26 # 0x4170a4
vbroadcastss 0x278344(%rip), %ymm27 # 0x4170a8
vbroadcastss 0x27833e(%rip), %ymm28 # 0x4170ac
vbroadcastss 0x278338(%rip), %ymm29 # 0x4170b0
vbroadcastss 0x278332(%rip), %ymm30 # 0x4170b4
vbroadcastss 0x27a058(%rip), %ymm31 # 0x418de4
movq %rdx, 0x68(%rsp)
movq %rsi, 0x50(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x19f34a
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %r14d
movl 0x30(%rax), %edi
movl %edi, 0x98(%rsp)
movl 0x10c(%rsp), %edi
movl %edi, 0x30(%rsp)
movq 0x120(%rsp), %r8
movq 0x10(%rsp), %rdi
imulq %rdi, %r8
imulq 0xf0(%rsp), %r8
addq 0xe0(%rsp), %r8
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x5, %rdi
movq %rdi, 0x1a0(%rsp)
xorl %edi, %edi
cmpl $0x0, 0x30(%rsp)
jle 0x19f33b
xorl %r10d, %r10d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rsi, %rsi
je 0x19ee32
movq 0x1a0(%rsp), %rax
vmovups (%rsi,%rax), %ymm1
jmp 0x19ee36
vxorps %xmm1, %xmm1, %xmm1
movl %r10d, 0x38(%rsp)
movq %r8, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x19eff1
movq 0x48(%rcx), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rcx), %r13
addq 0x8(%rcx), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x19efd1
movq 0x70(%rsp), %rbx
imulq %rax, %rbx
addq 0x60(%rsp), %rbx
xorl %esi, %esi
xorl %ebp, %ebp
movl %ebp, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %edi, %eax
js 0x19efbd
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x19efbd
cmpl $0x0, 0x58(%rsp)
jle 0x19efbd
cmpl 0x98(%rsp), %eax
jge 0x19efbd
movslq %eax, %r10
imulq 0x90(%rsp), %r10
addq %rbx, %r10
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %r15
movq %rsi, %r8
testl %ecx, %ecx
js 0x19efad
movl %ecx, %eax
cltd
idivl %r11d
testl %edx, %edx
jne 0x19efad
cmpl %r14d, %eax
jge 0x19efad
shll $0x3, %eax
cltq
vbroadcastss (%r10,%rax,4), %ymm8
vbroadcastss 0x4(%r10,%rax,4), %ymm9
vbroadcastss 0x8(%r10,%rax,4), %ymm16
vbroadcastss 0xc(%r10,%rax,4), %ymm17
vbroadcastss 0x10(%r10,%rax,4), %ymm19
vbroadcastss 0x14(%r10,%rax,4), %ymm20
vbroadcastss 0x18(%r10,%rax,4), %ymm23
vbroadcastss 0x1c(%r10,%rax,4), %ymm24
movl %r8d, %eax
andl $-0x40, %eax
vfmadd231ps (%r13,%rax,4), %ymm8, %ymm1 # ymm1 = (ymm8 * mem) + ymm1
vfmadd231ps 0x20(%r13,%rax,4), %ymm9, %ymm1 # ymm1 = (ymm9 * mem) + ymm1
vfmadd231ps 0x40(%r13,%rax,4), %ymm16, %ymm1 # ymm1 = (ymm16 * mem) + ymm1
vfmadd231ps 0x60(%r13,%rax,4), %ymm17, %ymm1 # ymm1 = (ymm17 * mem) + ymm1
vfmadd231ps 0x80(%r13,%rax,4), %ymm19, %ymm1 # ymm1 = (ymm19 * mem) + ymm1
vfmadd231ps 0xa0(%r13,%rax,4), %ymm20, %ymm1 # ymm1 = (ymm20 * mem) + ymm1
vfmadd231ps 0xc0(%r13,%rax,4), %ymm23, %ymm1 # ymm1 = (ymm23 * mem) + ymm1
vfmadd231ps 0xe0(%r13,%rax,4), %ymm24, %ymm1 # ymm1 = (ymm24 * mem) + ymm1
addq $0x40, %r8
addl %r9d, %ecx
decq %r15
jne 0x19ef08
incq %rbp
addq 0xb0(%rsp), %rsi
cmpq %r12, %rbp
jne 0x19eeb1
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x19ee91
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x19f30c
leaq 0x27ad0a(%rip), %rcx # 0x419d10
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rsi
movq 0x28(%rsp), %r8
movl 0x38(%rsp), %r10d
jmpq *%rax
vmaxps %ymm0, %ymm1, %ymm1
jmp 0x19f320
vminps %ymm2, %ymm1, %ymm8
vmaxps %ymm3, %ymm8, %ymm8
vmovaps %ymm5, %ymm9
vfmadd213ps %ymm4, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm4
vrndscaleps $0x1, %ymm9, %ymm16
vcmpltps %ymm16, %ymm9, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm7) - ymm8
vfnmsub231ps %ymm15, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm15) - ymm8
vmulps %ymm8, %ymm8, %ymm9
vmovaps %ymm18, %ymm17
vfmadd213ps %ymm10, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm4
vfmadd213ps %ymm8, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm17) + ymm8
vaddps %ymm6, %ymm17, %ymm8
vcvttps2dq %ymm16, %ymm9
vpslld $0x17, %ymm9, %ymm9
vpaddd %ymm14, %ymm9, %ymm9
vfmadd213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm6
vcmpleps %ymm0, %ymm9, %k1
vmaxps 0x277fc7(%rip){1to8}, %ymm9, %ymm8 # 0x417080
vpsrld $0x17, %ymm8, %ymm9
vpbroadcastd 0x277fbb(%rip), %ymm16 # 0x417084
vpternlogd $0xea, 0x2755bc(%rip){1to8}, %ymm16, %ymm8 # 0x414690
vcmpltps 0x277fad(%rip){1to8}, %ymm8, %k2 # 0x41708c
vaddps %ymm21, %ymm8, %ymm16
vaddps %ymm8, %ymm16, %ymm16 {%k2}
vmulps %ymm16, %ymm16, %ymm8
vmovaps %ymm22, %ymm17
vfmadd213ps 0x277f97(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x277f91(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps %ymm25, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm25
vfmadd213ps %ymm26, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm26
vfmadd213ps %ymm27, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm27
vfmadd213ps %ymm28, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm28
vfmadd213ps %ymm29, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm29
vfmadd213ps %ymm30, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm30
vmulps %ymm16, %ymm8, %ymm19
vmulps %ymm17, %ymm19, %ymm17
vpaddd 0x277f43(%rip){1to8}, %ymm9, %ymm9 # 0x417088
vcvtdq2ps %ymm9, %ymm9
vsubps %ymm6, %ymm9, %ymm9 {%k2}
vfmadd231ps %ymm15, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm15) + ymm17
vfmsub231ps %ymm8, %ymm4, %ymm17 # ymm17 = (ymm4 * ymm8) - ymm17
vsubps %ymm16, %ymm17, %ymm8
vfmsub231ps %ymm9, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm9) - ymm8
vmulps %ymm31, %ymm8, %ymm8
vbroadcastss 0x2785cd(%rip), %ymm8 {%k1} # 0x417744
vminps %ymm2, %ymm8, %ymm8
vmaxps %ymm3, %ymm8, %ymm8
vmovaps %ymm5, %ymm9
vfmadd213ps %ymm4, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm4
vrndscaleps $0x1, %ymm9, %ymm16
vcmpltps %ymm16, %ymm9, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm7) - ymm8
vfnmsub231ps %ymm15, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm15) - ymm8
vmulps %ymm8, %ymm8, %ymm9
vmovaps %ymm18, %ymm17
vfmadd213ps %ymm10, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm4
vfmadd213ps %ymm8, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm17) + ymm8
vaddps %ymm6, %ymm17, %ymm8
vcvttps2dq %ymm16, %ymm9
vpslld $0x17, %ymm9, %ymm9
vpaddd %ymm14, %ymm9, %ymm9
vfmadd213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm6
vrcpps %ymm9, %ymm8
vfmsub213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) - ymm6
vfnmadd132ps %ymm8, %ymm8, %ymm9 # ymm9 = -(ymm9 * ymm8) + ymm8
vfnmadd213ps %ymm21, %ymm31, %ymm9 # ymm9 = -(ymm31 * ymm9) + ymm21
vmulps %ymm1, %ymm9, %ymm1
jmp 0x19f320
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps (%rax){1to8}, %ymm1, %ymm1
vminps 0x4(%rax){1to8}, %ymm1, %ymm1
jmp 0x19f320
vxorps 0x277e17(%rip){1to8}, %ymm1, %ymm1 # 0x417054
vminps %ymm2, %ymm1, %ymm1
vmaxps %ymm3, %ymm1, %ymm1
vmovaps %ymm4, %ymm8
vfmadd231ps %ymm5, %ymm1, %ymm8 # ymm8 = (ymm1 * ymm5) + ymm8
vrndscaleps $0x1, %ymm8, %ymm16
vcmpltps %ymm16, %ymm8, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm1 # ymm1 = (ymm16 * ymm7) - ymm1
vfmsub231ps 0x279b76(%rip){1to8}, %ymm16, %ymm1 # ymm1 = (ymm16 * mem) - ymm1
vmulps %ymm1, %ymm1, %ymm8
vmovaps %ymm18, %ymm9
vfmadd213ps %ymm10, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm10
vfmadd213ps %ymm11, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm11
vfmadd213ps %ymm12, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm12
vfmadd213ps %ymm13, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm13
vfmadd213ps %ymm4, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm4
vfmadd213ps %ymm1, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm1
vaddps %ymm6, %ymm9, %ymm8
vcvttps2dq %ymm16, %ymm1
vpslld $0x17, %ymm1, %ymm1
vpaddd %ymm1, %ymm14, %ymm1
vfmadd213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) + ymm6
vrcpps %ymm1, %ymm8
vfmsub213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) - ymm6
vfnmadd132ps %ymm8, %ymm8, %ymm1 # ymm1 = -(ymm1 * ymm8) + ymm8
jmp 0x19f320
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps %ymm0, %ymm1, %ymm8
vminps %ymm0, %ymm1, %ymm1
vfmadd132ps (%rax){1to8}, %ymm8, %ymm1 # ymm1 = (ymm1 * mem) + ymm8
jmp 0x19f320
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vbroadcastss (%rax), %ymm8
vfmadd213ps 0x4(%rax){1to8}, %ymm1, %ymm8 # ymm8 = (ymm1 * ymm8) + mem
vmaxps %ymm0, %ymm8, %ymm8
vminps %ymm6, %ymm8, %ymm8
vmulps %ymm1, %ymm8, %ymm1
jmp 0x19f320
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rsi
movq 0x28(%rsp), %r8
movl 0x38(%rsp), %r10d
vmovups %ymm1, (%r8)
addq $0x20, %r8
incl %r10d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r10d
jne 0x19ee1e
incl %edi
cmpl 0x80(%rsp), %edi
jne 0x19ee05
movq 0x10(%rsp), %rdi
incq %rdi
movq %rdi, 0x10(%rsp)
cmpq 0xc0(%rsp), %rdi
jne 0x19ed96
movq 0xd0(%rsp), %rax
xorl $0x4, %eax
orl 0x190(%rsp), %eax
jne 0x19faad
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x19faad
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rcx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rcx), %r8d
movl 0xe0(%rax,%rcx), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rax,%rcx), %r10d
movl 0xe8(%rax,%rcx), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rax,%rcx), %edx
movq 0x1a8(%rax,%rcx), %rdi
movl 0xd4(%rax,%rcx), %esi
movq %rcx, 0x150(%rsp)
movl 0xd8(%rax,%rcx), %r12d
movl %esi, %eax
imull %r12d, %eax
shll $0x5, %eax
cltq
movq %rax, 0xb8(%rsp)
movq %rsi, %rax
shlq $0x5, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
movq %rsi, 0x58(%rsp)
subl %esi, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x277c11(%rip), %ymm2 # 0x417058
vbroadcastss 0x277c0c(%rip), %ymm3 # 0x41705c
vbroadcastss 0x275237(%rip), %ymm4 # 0x414690
vbroadcastss 0x277bfe(%rip), %ymm5 # 0x417060
vbroadcastss 0x2758ad(%rip), %ymm6 # 0x414d18
vbroadcastss 0x277bf0(%rip), %ymm7 # 0x417064
vbroadcastss 0x277bee(%rip), %ymm18 # 0x41706c
vbroadcastss 0x277be9(%rip), %ymm10 # 0x417070
vbroadcastss 0x277be4(%rip), %ymm11 # 0x417074
vbroadcastss 0x277bdf(%rip), %ymm12 # 0x417078
vbroadcastss 0x277bda(%rip), %ymm13 # 0x41707c
vpbroadcastd 0x27586d(%rip), %ymm14 # 0x414d18
vbroadcastss 0x277bb4(%rip), %ymm15 # 0x417068
vbroadcastss 0x277bce(%rip), %ymm20 # 0x41708c
vbroadcastss 0x277bc8(%rip), %ymm21 # 0x417090
vbroadcastss 0x277bc2(%rip), %ymm22 # 0x417094
vbroadcastss 0x277bbc(%rip), %ymm23 # 0x417098
vbroadcastss 0x277bb6(%rip), %ymm24 # 0x41709c
vbroadcastss 0x277bb0(%rip), %ymm25 # 0x4170a0
vbroadcastss 0x277baa(%rip), %ymm26 # 0x4170a4
vbroadcastss 0x277ba4(%rip), %ymm27 # 0x4170a8
vbroadcastss 0x277b9e(%rip), %ymm28 # 0x4170ac
vbroadcastss 0x277b98(%rip), %ymm29 # 0x4170b0
vbroadcastss 0x277b92(%rip), %ymm30 # 0x4170b4
vbroadcastss 0x2798b8(%rip), %ymm31 # 0x418de4
movq %rdx, 0x68(%rsp)
movq %rdi, 0x50(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x19fa92
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %ebp
movl 0x30(%rax), %ecx
movl %ecx, 0x98(%rsp)
movl 0x10c(%rsp), %ecx
movl %ecx, 0x30(%rsp)
movq 0x120(%rsp), %r9
movq 0x10(%rsp), %rcx
imulq %rcx, %r9
imulq 0xf0(%rsp), %r9
addq 0xe0(%rsp), %r9
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x5, %rcx
movq %rcx, 0x1a0(%rsp)
xorl %esi, %esi
cmpl $0x0, 0x30(%rsp)
jle 0x19fa83
xorl %r11d, %r11d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rdi, %rdi
je 0x19f5d1
movq 0x1a0(%rsp), %rax
vmovups (%rdi,%rax), %ymm1
jmp 0x19f5d5
vxorps %xmm1, %xmm1, %xmm1
movl %r11d, 0x38(%rsp)
movq %r9, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x19f740
movq 0x18(%rsp), %rax
movq 0x48(%rax), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rax), %r13
addq 0x8(%rax), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x19f720
movq 0x70(%rsp), %r11
imulq %rax, %r11
addq 0x60(%rsp), %r11
xorl %r14d, %r14d
xorl %ebx, %ebx
movl %ebx, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %esi, %eax
js 0x19f70c
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x19f70c
cmpl $0x0, 0x58(%rsp)
jle 0x19f70c
cmpl 0x98(%rsp), %eax
jge 0x19f70c
movslq %eax, %r9
imulq 0x90(%rsp), %r9
addq %r11, %r9
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %r15
movq %r14, %rdi
testl %ecx, %ecx
js 0x19f700
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x19f700
cmpl %ebp, %eax
jge 0x19f700
shll $0x2, %eax
cltq
vbroadcastss (%r9,%rax,4), %ymm8
vbroadcastss 0x4(%r9,%rax,4), %ymm9
vbroadcastss 0x8(%r9,%rax,4), %ymm16
vbroadcastss 0xc(%r9,%rax,4), %ymm17
movl %edi, %eax
andl $-0x20, %eax
vfmadd231ps (%r13,%rax,4), %ymm8, %ymm1 # ymm1 = (ymm8 * mem) + ymm1
vfmadd231ps 0x20(%r13,%rax,4), %ymm9, %ymm1 # ymm1 = (ymm9 * mem) + ymm1
vfmadd231ps 0x40(%r13,%rax,4), %ymm16, %ymm1 # ymm1 = (ymm16 * mem) + ymm1
vfmadd231ps 0x60(%r13,%rax,4), %ymm17, %ymm1 # ymm1 = (ymm17 * mem) + ymm1
addq $0x20, %rdi
addl %r8d, %ecx
decq %r15
jne 0x19f6a9
incq %rbx
addq 0xb0(%rsp), %r14
cmpq %r12, %rbx
jne 0x19f656
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x19f635
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x19fa59
leaq 0x27a5d3(%rip), %rcx # 0x419d28
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
jmpq *%rax
vmaxps %ymm0, %ymm1, %ymm1
jmp 0x19fa68
vminps %ymm2, %ymm1, %ymm8
vmaxps %ymm3, %ymm8, %ymm8
vmovaps %ymm5, %ymm9
vfmadd213ps %ymm4, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm4
vrndscaleps $0x1, %ymm9, %ymm16
vcmpltps %ymm16, %ymm9, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm7) - ymm8
vfnmsub231ps %ymm15, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm15) - ymm8
vmulps %ymm8, %ymm8, %ymm9
vmovaps %ymm18, %ymm17
vfmadd213ps %ymm10, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm4
vfmadd213ps %ymm8, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm17) + ymm8
vaddps %ymm6, %ymm17, %ymm8
vcvttps2dq %ymm16, %ymm9
vpslld $0x17, %ymm9, %ymm9
vpaddd %ymm14, %ymm9, %ymm9
vfmadd213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm6
vcmpleps %ymm0, %ymm9, %k1
vmaxps 0x27787d(%rip){1to8}, %ymm9, %ymm8 # 0x417080
vpsrld $0x17, %ymm8, %ymm9
vpbroadcastd 0x277871(%rip), %ymm16 # 0x417084
vpternlogd $0xea, 0x274e72(%rip){1to8}, %ymm16, %ymm8 # 0x414690
vcmpltps %ymm20, %ymm8, %k2
vaddps %ymm21, %ymm8, %ymm16
vaddps %ymm8, %ymm16, %ymm16 {%k2}
vmulps %ymm16, %ymm16, %ymm8
vmovaps %ymm22, %ymm17
vfmadd213ps %ymm23, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm23
vfmadd213ps %ymm24, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm24
vfmadd213ps %ymm25, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm25
vfmadd213ps %ymm26, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm26
vfmadd213ps %ymm27, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm27
vfmadd213ps %ymm28, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm28
vfmadd213ps %ymm29, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm29
vfmadd213ps %ymm30, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm30
vmulps %ymm16, %ymm8, %ymm19
vmulps %ymm17, %ymm19, %ymm17
vpaddd 0x277805(%rip){1to8}, %ymm9, %ymm9 # 0x417088
vcvtdq2ps %ymm9, %ymm9
vsubps %ymm6, %ymm9, %ymm9 {%k2}
vfmadd231ps %ymm15, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm15) + ymm17
vfmsub231ps %ymm8, %ymm4, %ymm17 # ymm17 = (ymm4 * ymm8) - ymm17
vsubps %ymm16, %ymm17, %ymm8
vfmsub231ps %ymm9, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm9) - ymm8
vmulps %ymm31, %ymm8, %ymm8
vbroadcastss 0x277e8f(%rip), %ymm8 {%k1} # 0x417744
vminps %ymm2, %ymm8, %ymm8
vmaxps %ymm3, %ymm8, %ymm8
vmovaps %ymm5, %ymm9
vfmadd213ps %ymm4, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm4
vrndscaleps $0x1, %ymm9, %ymm16
vcmpltps %ymm16, %ymm9, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm7) - ymm8
vfnmsub231ps %ymm15, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm15) - ymm8
vmulps %ymm8, %ymm8, %ymm9
vmovaps %ymm18, %ymm17
vfmadd213ps %ymm10, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm4
vfmadd213ps %ymm8, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm17) + ymm8
vaddps %ymm6, %ymm17, %ymm8
vcvttps2dq %ymm16, %ymm9
vpslld $0x17, %ymm9, %ymm9
vpaddd %ymm14, %ymm9, %ymm9
vfmadd213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm6
vrcpps %ymm9, %ymm8
vfmsub213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) - ymm6
vfnmadd132ps %ymm8, %ymm8, %ymm9 # ymm9 = -(ymm9 * ymm8) + ymm8
vfnmadd213ps %ymm21, %ymm31, %ymm9 # ymm9 = -(ymm31 * ymm9) + ymm21
vmulps %ymm1, %ymm9, %ymm1
jmp 0x19fa68
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vmaxps (%rax){1to8}, %ymm1, %ymm1
vminps 0x4(%rax){1to8}, %ymm1, %ymm1
jmp 0x19fa68
vxorps 0x2776d4(%rip){1to8}, %ymm1, %ymm1 # 0x417054
vminps %ymm2, %ymm1, %ymm1
vmaxps %ymm3, %ymm1, %ymm1
vmovaps %ymm4, %ymm8
vfmadd231ps %ymm5, %ymm1, %ymm8 # ymm8 = (ymm1 * ymm5) + ymm8
vrndscaleps $0x1, %ymm8, %ymm16
vcmpltps %ymm16, %ymm8, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm1 # ymm1 = (ymm16 * ymm7) - ymm1
vfmsub231ps 0x279433(%rip){1to8}, %ymm16, %ymm1 # ymm1 = (ymm16 * mem) - ymm1
vmulps %ymm1, %ymm1, %ymm8
vmovaps %ymm18, %ymm9
vfmadd213ps %ymm10, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm10
vfmadd213ps %ymm11, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm11
vfmadd213ps %ymm12, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm12
vfmadd213ps %ymm13, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm13
vfmadd213ps %ymm4, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm4
vfmadd213ps %ymm1, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm1
vaddps %ymm6, %ymm9, %ymm8
vcvttps2dq %ymm16, %ymm1
vpslld $0x17, %ymm1, %ymm1
vpaddd %ymm1, %ymm14, %ymm1
vfmadd213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) + ymm6
vrcpps %ymm1, %ymm8
vfmsub213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) - ymm6
vfnmadd132ps %ymm8, %ymm8, %ymm1 # ymm1 = -(ymm1 * ymm8) + ymm8
jmp 0x19fa68
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vmaxps %ymm0, %ymm1, %ymm8
vminps %ymm0, %ymm1, %ymm1
vfmadd132ps (%rax){1to8}, %ymm8, %ymm1 # ymm1 = (ymm1 * mem) + ymm8
jmp 0x19fa68
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vbroadcastss (%rax), %ymm8
vfmadd213ps 0x4(%rax){1to8}, %ymm1, %ymm8 # ymm8 = (ymm1 * ymm8) + mem
vmaxps %ymm0, %ymm8, %ymm8
vminps %ymm6, %ymm8, %ymm8
vmulps %ymm1, %ymm8, %ymm1
jmp 0x19fa68
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
vmovups %ymm1, (%r9)
addq $0x20, %r9
incl %r11d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r11d
jne 0x19f5bd
incl %esi
cmpl 0x80(%rsp), %esi
jne 0x19f5a4
movq 0x10(%rsp), %rcx
incq %rcx
movq %rcx, 0x10(%rsp)
cmpq 0xc0(%rsp), %rcx
jne 0x19f536
movl 0xc8(%rsp), %ecx
xorl $0x4, %ecx
movq 0xd0(%rsp), %rax
xorl $0x8, %eax
movl %ecx, 0x198(%rsp)
orl %ecx, %eax
jne 0x1a0256
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a0256
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rcx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rcx), %r9d
movl 0xe0(%rax,%rcx), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rax,%rcx), %r11d
movl 0xe8(%rax,%rcx), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rax,%rcx), %edx
movq 0x1a8(%rax,%rcx), %rdi
movl 0xd4(%rax,%rcx), %r8d
movq %rcx, 0x150(%rsp)
movl 0xd8(%rax,%rcx), %esi
movl %r8d, %eax
imull %esi, %eax
shll $0x5, %eax
cltq
movq %rax, 0xb8(%rsp)
movq %r8, %rax
shlq $0x5, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
movq %r8, 0x58(%rsp)
subl %r8d, %eax
imull %r9d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x2774bc(%rip), %xmm2 # 0x417058
vbroadcastss 0x2774b7(%rip), %xmm3 # 0x41705c
vbroadcastss 0x274ae2(%rip), %xmm4 # 0x414690
vbroadcastss 0x2774a9(%rip), %xmm5 # 0x417060
vbroadcastss 0x275158(%rip), %xmm6 # 0x414d18
vbroadcastss 0x27749b(%rip), %xmm7 # 0x417064
vbroadcastss 0x277499(%rip), %xmm18 # 0x41706c
vbroadcastss 0x277494(%rip), %xmm10 # 0x417070
vbroadcastss 0x27748f(%rip), %xmm11 # 0x417074
vbroadcastss 0x27748a(%rip), %xmm12 # 0x417078
vbroadcastss 0x277485(%rip), %xmm13 # 0x41707c
vpbroadcastd 0x275118(%rip), %xmm14 # 0x414d18
vbroadcastss 0x27745f(%rip), %xmm15 # 0x417068
vbroadcastss 0x277481(%rip), %xmm22 # 0x417094
vbroadcastss 0x27747f(%rip), %xmm24 # 0x41709c
vbroadcastss 0x277479(%rip), %xmm25 # 0x4170a0
vbroadcastss 0x277473(%rip), %xmm26 # 0x4170a4
vbroadcastss 0x27746d(%rip), %xmm27 # 0x4170a8
vbroadcastss 0x277467(%rip), %xmm28 # 0x4170ac
vbroadcastss 0x277461(%rip), %xmm29 # 0x4170b0
vbroadcastss 0x27745b(%rip), %xmm30 # 0x4170b4
vbroadcastss 0x279189(%rip), %xmm31 # 0x418dec
movq %rdx, 0x68(%rsp)
movq %rdi, 0x50(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x1a023b
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %r15d
movl 0x30(%rax), %ecx
movl %ecx, 0x98(%rsp)
movl 0x10c(%rsp), %ecx
movl %ecx, 0x30(%rsp)
movq 0x120(%rsp), %r8
movq 0x10(%rsp), %rcx
imulq %rcx, %r8
imulq 0xf0(%rsp), %r8
addq 0xe0(%rsp), %r8
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x4, %rcx
movq %rcx, 0x1a0(%rsp)
xorl %ebp, %ebp
cmpl $0x0, 0x30(%rsp)
jle 0x1a022c
xorl %r10d, %r10d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rdi, %rdi
je 0x19fd09
movq 0x1a0(%rsp), %rax
vmovups (%rdi,%rax), %xmm1
jmp 0x19fd0d
vxorps %xmm1, %xmm1, %xmm1
movl %r10d, 0x38(%rsp)
movq %r8, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x19fecc
movq 0x18(%rsp), %rax
movq 0x48(%rax), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rax), %r13
addq 0x8(%rax), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %esi, %esi
movq %rax, 0x78(%rsp)
jle 0x19feac
movq 0x70(%rsp), %rbx
imulq %rax, %rbx
addq 0x60(%rsp), %rbx
xorl %r14d, %r14d
xorl %edi, %edi
movl %edi, %eax
subl %esi, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %ebp, %eax
js 0x19fe98
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x19fe98
cmpl $0x0, 0x58(%rsp)
jle 0x19fe98
cmpl 0x98(%rsp), %eax
jge 0x19fe98
movslq %eax, %r10
imulq 0x90(%rsp), %r10
addq %rbx, %r10
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %r12
movq %r14, %r8
testl %ecx, %ecx
js 0x19fe88
movl %ecx, %eax
cltd
idivl %r11d
testl %edx, %edx
jne 0x19fe88
cmpl %r15d, %eax
jge 0x19fe88
shll $0x3, %eax
cltq
vbroadcastss (%r10,%rax,4), %xmm8
vbroadcastss 0x4(%r10,%rax,4), %xmm9
vbroadcastss 0x8(%r10,%rax,4), %xmm16
vbroadcastss 0xc(%r10,%rax,4), %xmm17
vbroadcastss 0x10(%r10,%rax,4), %xmm19
vbroadcastss 0x14(%r10,%rax,4), %xmm20
vbroadcastss 0x18(%r10,%rax,4), %xmm21
vbroadcastss 0x1c(%r10,%rax,4), %xmm23
movl %r8d, %eax
andl $-0x20, %eax
vfmadd231ps (%r13,%rax,4), %xmm8, %xmm1 # xmm1 = (xmm8 * mem) + xmm1
vfmadd231ps 0x10(%r13,%rax,4), %xmm9, %xmm1 # xmm1 = (xmm9 * mem) + xmm1
vfmadd231ps 0x20(%r13,%rax,4), %xmm16, %xmm1 # xmm1 = (xmm16 * mem) + xmm1
vfmadd231ps 0x30(%r13,%rax,4), %xmm17, %xmm1 # xmm1 = (xmm17 * mem) + xmm1
vfmadd231ps 0x40(%r13,%rax,4), %xmm19, %xmm1 # xmm1 = (xmm19 * mem) + xmm1
vfmadd231ps 0x50(%r13,%rax,4), %xmm20, %xmm1 # xmm1 = (xmm20 * mem) + xmm1
vfmadd231ps 0x60(%r13,%rax,4), %xmm21, %xmm1 # xmm1 = (xmm21 * mem) + xmm1
vfmadd231ps 0x70(%r13,%rax,4), %xmm23, %xmm1 # xmm1 = (xmm23 * mem) + xmm1
addq $0x20, %r8
addl %r9d, %ecx
decq %r12
jne 0x19fde3
incq %rdi
addq 0xb0(%rsp), %r14
cmpq %rsi, %rdi
jne 0x19fd8d
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x19fd6d
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x1a0202
leaq 0x279e5f(%rip), %rcx # 0x419d40
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r8
movl 0x38(%rsp), %r10d
jmpq *%rax
vmaxps %xmm0, %xmm1, %xmm1
jmp 0x1a0211
vminps %xmm2, %xmm1, %xmm8
vmaxps %xmm3, %xmm8, %xmm8
vmovaps %xmm5, %xmm9
vfmadd213ps %xmm4, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm4
vcvttps2dq %xmm9, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm9, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm8 # xmm8 = (xmm16 * xmm7) - xmm8
vfnmsub231ps %xmm15, %xmm16, %xmm8 # xmm8 = -(xmm16 * xmm15) - xmm8
vmulps %xmm8, %xmm8, %xmm9
vmovaps %xmm18, %xmm17
vfmadd213ps %xmm10, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm10
vfmadd213ps %xmm11, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm11
vfmadd213ps %xmm12, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm12
vfmadd213ps %xmm13, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm13
vfmadd213ps %xmm4, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm4
vfmadd213ps %xmm8, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm17) + xmm8
vaddps %xmm6, %xmm17, %xmm8
vcvttps2dq %xmm16, %xmm9
vpslld $0x17, %xmm9, %xmm9
vpaddd %xmm14, %xmm9, %xmm9
vfmadd213ps %xmm6, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm6
vcmpleps %xmm0, %xmm9, %k1
vmaxps 0x2770ec(%rip){1to4}, %xmm9, %xmm8 # 0x417080
vpsrld $0x17, %xmm8, %xmm9
vpbroadcastd 0x2770e0(%rip), %xmm16 # 0x417084
vpternlogd $0xea, 0x2746e1(%rip){1to4}, %xmm16, %xmm8 # 0x414690
vcmpltps 0x2770d2(%rip){1to4}, %xmm8, %k2 # 0x41708c
vaddps 0x2770cc(%rip){1to4}, %xmm8, %xmm16 # 0x417090
vaddps %xmm8, %xmm16, %xmm16 {%k2}
vmulps %xmm16, %xmm16, %xmm8
vmovaps %xmm22, %xmm17
vfmadd213ps 0x2770b8(%rip){1to4}, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + mem
vfmadd213ps %xmm24, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm24
vfmadd213ps %xmm25, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm25
vfmadd213ps %xmm26, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm26
vfmadd213ps %xmm27, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm27
vfmadd213ps %xmm28, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm28
vfmadd213ps %xmm29, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm29
vfmadd213ps %xmm30, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm30
vmulps %xmm16, %xmm8, %xmm19
vmulps %xmm17, %xmm19, %xmm17
vpaddd 0x277068(%rip){1to4}, %xmm9, %xmm9 # 0x417088
vcvtdq2ps %xmm9, %xmm9
vsubps %xmm6, %xmm9, %xmm9 {%k2}
vfmadd231ps %xmm15, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm15) + xmm17
vfmsub231ps %xmm8, %xmm4, %xmm17 # xmm17 = (xmm4 * xmm8) - xmm17
vsubps %xmm16, %xmm17, %xmm8
vfnmadd231ps %xmm9, %xmm7, %xmm8 # xmm8 = -(xmm7 * xmm9) + xmm8
vaddps %xmm8, %xmm8, %xmm8
vbroadcastss 0x2776f3(%rip), %xmm8 {%k1} # 0x417744
vminps %xmm2, %xmm8, %xmm8
vmaxps %xmm3, %xmm8, %xmm8
vmovaps %xmm5, %xmm9
vfmadd213ps %xmm4, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm4
vcvttps2dq %xmm9, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm9, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm8 # xmm8 = (xmm16 * xmm7) - xmm8
vfnmsub231ps %xmm15, %xmm16, %xmm8 # xmm8 = -(xmm16 * xmm15) - xmm8
vmulps %xmm8, %xmm8, %xmm9
vmovaps %xmm18, %xmm17
vfmadd213ps %xmm10, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm10
vfmadd213ps %xmm11, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm11
vfmadd213ps %xmm12, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm12
vfmadd213ps %xmm13, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm13
vfmadd213ps %xmm4, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm4
vfmadd213ps %xmm8, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm17) + xmm8
vaddps %xmm6, %xmm17, %xmm8
vcvttps2dq %xmm16, %xmm9
vpslld $0x17, %xmm9, %xmm9
vpaddd %xmm14, %xmm9, %xmm9
vfmadd213ps %xmm6, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm6
vrcpps %xmm9, %xmm8
vaddps %xmm8, %xmm8, %xmm16
vfmsub213ps %xmm31, %xmm16, %xmm9 # xmm9 = (xmm16 * xmm9) - xmm31
vfnmadd213ps %xmm16, %xmm8, %xmm9 # xmm9 = -(xmm8 * xmm9) + xmm16
vfmsub231ps %xmm9, %xmm1, %xmm1 # xmm1 = (xmm1 * xmm9) - xmm1
jmp 0x1a0211
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vmaxps (%rax){1to4}, %xmm1, %xmm1
vminps 0x4(%rax){1to4}, %xmm1, %xmm1
jmp 0x1a0211
vxorps 0x276f30(%rip){1to4}, %xmm1, %xmm1 # 0x417054
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm1, %xmm1
vmovaps %xmm4, %xmm8
vfmadd231ps %xmm5, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm5) + xmm8
vcvttps2dq %xmm8, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm8, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm1 # xmm1 = (xmm16 * xmm7) - xmm1
vfmsub231ps 0x278c8a(%rip){1to4}, %xmm16, %xmm1 # xmm1 = (xmm16 * mem) - xmm1
vmulps %xmm1, %xmm1, %xmm8
vmovaps %xmm18, %xmm9
vfmadd213ps %xmm10, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm10
vfmadd213ps %xmm11, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm11
vfmadd213ps %xmm12, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm12
vfmadd213ps %xmm13, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm13
vfmadd213ps %xmm4, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm4
vfmadd213ps %xmm1, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm1
vaddps %xmm6, %xmm9, %xmm8
vcvttps2dq %xmm16, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm1, %xmm14, %xmm1
vfmadd213ps %xmm6, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm6
vrcpps %xmm1, %xmm8
vfmsub213ps %xmm6, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) - xmm6
vfnmadd132ps %xmm8, %xmm8, %xmm1 # xmm1 = -(xmm1 * xmm8) + xmm8
jmp 0x1a0211
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vmaxps %xmm0, %xmm1, %xmm8
vminps %xmm0, %xmm1, %xmm1
vfmadd132ps (%rax){1to4}, %xmm8, %xmm1 # xmm1 = (xmm1 * mem) + xmm8
jmp 0x1a0211
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vbroadcastss (%rax), %xmm8
vfmadd213ps 0x4(%rax){1to4}, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + mem
vmaxps %xmm0, %xmm8, %xmm8
vminps %xmm6, %xmm8, %xmm8
vmulps %xmm1, %xmm8, %xmm1
jmp 0x1a0211
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r8
movl 0x38(%rsp), %r10d
vmovups %xmm1, (%r8)
addq $0x10, %r8
incl %r10d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r10d
jne 0x19fcf5
incl %ebp
cmpl 0x80(%rsp), %ebp
jne 0x19fcdc
movq 0x10(%rsp), %rcx
incq %rcx
movq %rcx, 0x10(%rsp)
cmpq 0xc0(%rsp), %rcx
jne 0x19fc6d
movq 0xd0(%rsp), %rax
xorl $0x1, %eax
orl %eax, 0x190(%rsp)
jne 0x1a095e
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a095e
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rcx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rcx), %r8d
movl 0xe0(%rax,%rcx), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rax,%rcx), %r10d
movl 0xe8(%rax,%rcx), %edx
movl %edx, 0x58(%rsp)
movl 0x114(%rax,%rcx), %edx
movq 0x1a8(%rax,%rcx), %rdi
movl 0xd4(%rax,%rcx), %r9d
movq %rcx, 0x150(%rsp)
movl 0xd8(%rax,%rcx), %r12d
movl %r9d, %eax
imull %r12d, %eax
shll $0x3, %eax
cltq
movq %rax, 0xb8(%rsp)
leaq (,%r9,8), %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
subl %r9d, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x276d21(%rip), %ymm2 # 0x417058
vbroadcastss 0x276d1c(%rip), %ymm3 # 0x41705c
vbroadcastss 0x274347(%rip), %ymm4 # 0x414690
vbroadcastss 0x276d0e(%rip), %ymm5 # 0x417060
vbroadcastss 0x2749bd(%rip), %ymm6 # 0x414d18
vbroadcastss 0x276d00(%rip), %ymm7 # 0x417064
vbroadcastss 0x276cfe(%rip), %ymm18 # 0x41706c
vbroadcastss 0x276cf9(%rip), %ymm10 # 0x417070
vbroadcastss 0x276cf4(%rip), %ymm11 # 0x417074
vbroadcastss 0x276cef(%rip), %ymm12 # 0x417078
vbroadcastss 0x276cea(%rip), %ymm13 # 0x41707c
vpbroadcastd 0x27497d(%rip), %ymm14 # 0x414d18
vbroadcastss 0x276cc4(%rip), %ymm15 # 0x417068
vbroadcastss 0x276cde(%rip), %ymm20 # 0x41708c
vbroadcastss 0x276cd8(%rip), %ymm21 # 0x417090
vbroadcastss 0x276cd2(%rip), %ymm22 # 0x417094
vbroadcastss 0x276ccc(%rip), %ymm23 # 0x417098
vbroadcastss 0x276cc6(%rip), %ymm24 # 0x41709c
vbroadcastss 0x276cc0(%rip), %ymm25 # 0x4170a0
vbroadcastss 0x276cba(%rip), %ymm26 # 0x4170a4
vbroadcastss 0x276cb4(%rip), %ymm27 # 0x4170a8
vbroadcastss 0x276cae(%rip), %ymm28 # 0x4170ac
vbroadcastss 0x276ca8(%rip), %ymm29 # 0x4170b0
vbroadcastss 0x276ca2(%rip), %ymm30 # 0x4170b4
vbroadcastss 0x2789c8(%rip), %ymm31 # 0x418de4
movq %rdx, 0x68(%rsp)
movq %rdi, 0x50(%rsp)
movq %r9, 0x98(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x1a0943
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %ebp
movl 0x30(%rax), %ecx
movl %ecx, 0x24(%rsp)
movl 0x10c(%rsp), %ecx
movl %ecx, 0x30(%rsp)
movq 0x120(%rsp), %r11
movq 0x10(%rsp), %rcx
imulq %rcx, %r11
imulq 0xf0(%rsp), %r11
addq 0xe0(%rsp), %r11
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x5, %rcx
movq %rcx, 0x1a0(%rsp)
xorl %esi, %esi
cmpl $0x0, 0x30(%rsp)
jle 0x1a0934
xorl %ebx, %ebx
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rdi, %rdi
je 0x1a04c5
movq 0x1a0(%rsp), %rax
vmovups (%rdi,%rax), %ymm1
jmp 0x1a04c9
vxorps %xmm1, %xmm1, %xmm1
movl %ebx, 0x38(%rsp)
movq %r11, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x1a05f5
movq 0x18(%rsp), %rax
movq 0x48(%rax), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rax), %r13
addq 0x8(%rax), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x1a05d5
movq 0x70(%rsp), %r14
imulq %rax, %r14
addq 0x60(%rsp), %r14
xorl %r15d, %r15d
xorl %r11d, %r11d
movl %r11d, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %esi, %eax
js 0x1a05c1
cltd
idivl 0x58(%rsp)
testl %edx, %edx
jne 0x1a05c1
cmpl 0x24(%rsp), %eax
jge 0x1a05c1
movq 0x98(%rsp), %rdi
testl %edi, %edi
jle 0x1a05c1
movslq %eax, %r9
imulq 0x90(%rsp), %r9
addq %r14, %r9
movl 0x40(%rsp), %ecx
movq %r15, %rbx
testl %ecx, %ecx
js 0x1a05b5
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x1a05b5
cmpl %ebp, %eax
jge 0x1a05b5
cltq
vbroadcastss (%r9,%rax,4), %ymm8
movl %ebx, %eax
andl $-0x8, %eax
vfmadd231ps (%r13,%rax,4), %ymm8, %ymm1 # ymm1 = (ymm8 * mem) + ymm1
addq $0x8, %rbx
addl %r8d, %ecx
decq %rdi
jne 0x1a058f
incq %r11
addq 0xb0(%rsp), %r15
cmpq %r12, %r11
jne 0x1a054a
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x1a0528
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x1a090d
leaq 0x27974e(%rip), %rcx # 0x419d58
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r11
movl 0x38(%rsp), %ebx
jmpq *%rax
vmaxps %ymm0, %ymm1, %ymm1
jmp 0x1a091b
vminps %ymm2, %ymm1, %ymm8
vmaxps %ymm3, %ymm8, %ymm8
vmovaps %ymm5, %ymm9
vfmadd213ps %ymm4, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm4
vrndscaleps $0x1, %ymm9, %ymm16
vcmpltps %ymm16, %ymm9, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm7) - ymm8
vfnmsub231ps %ymm15, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm15) - ymm8
vmulps %ymm8, %ymm8, %ymm9
vmovaps %ymm18, %ymm17
vfmadd213ps %ymm10, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm4
vfmadd213ps %ymm8, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm17) + ymm8
vaddps %ymm6, %ymm17, %ymm8
vcvttps2dq %ymm16, %ymm9
vpslld $0x17, %ymm9, %ymm9
vpaddd %ymm14, %ymm9, %ymm9
vfmadd213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm6
vcmpleps %ymm0, %ymm9, %k1
vmaxps 0x2769c9(%rip){1to8}, %ymm9, %ymm8 # 0x417080
vpsrld $0x17, %ymm8, %ymm9
vpbroadcastd 0x2769bd(%rip), %ymm16 # 0x417084
vpternlogd $0xea, 0x273fbe(%rip){1to8}, %ymm16, %ymm8 # 0x414690
vcmpltps %ymm20, %ymm8, %k2
vaddps %ymm21, %ymm8, %ymm16
vaddps %ymm8, %ymm16, %ymm16 {%k2}
vmulps %ymm16, %ymm16, %ymm8
vmovaps %ymm22, %ymm17
vfmadd213ps %ymm23, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm23
vfmadd213ps %ymm24, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm24
vfmadd213ps %ymm25, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm25
vfmadd213ps %ymm26, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm26
vfmadd213ps %ymm27, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm27
vfmadd213ps %ymm28, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm28
vfmadd213ps %ymm29, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm29
vfmadd213ps %ymm30, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm30
vmulps %ymm16, %ymm8, %ymm19
vmulps %ymm17, %ymm19, %ymm17
vpaddd 0x276951(%rip){1to8}, %ymm9, %ymm9 # 0x417088
vcvtdq2ps %ymm9, %ymm9
vsubps %ymm6, %ymm9, %ymm9 {%k2}
vfmadd231ps %ymm15, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm15) + ymm17
vfmsub231ps %ymm8, %ymm4, %ymm17 # ymm17 = (ymm4 * ymm8) - ymm17
vsubps %ymm16, %ymm17, %ymm8
vfmsub231ps %ymm9, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm9) - ymm8
vmulps %ymm31, %ymm8, %ymm8
vbroadcastss 0x276fdb(%rip), %ymm8 {%k1} # 0x417744
vminps %ymm2, %ymm8, %ymm8
vmaxps %ymm3, %ymm8, %ymm8
vmovaps %ymm5, %ymm9
vfmadd213ps %ymm4, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm4
vrndscaleps $0x1, %ymm9, %ymm16
vcmpltps %ymm16, %ymm9, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm8 # ymm8 = (ymm16 * ymm7) - ymm8
vfnmsub231ps %ymm15, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm15) - ymm8
vmulps %ymm8, %ymm8, %ymm9
vmovaps %ymm18, %ymm17
vfmadd213ps %ymm10, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm4
vfmadd213ps %ymm8, %ymm9, %ymm17 # ymm17 = (ymm9 * ymm17) + ymm8
vaddps %ymm6, %ymm17, %ymm8
vcvttps2dq %ymm16, %ymm9
vpslld $0x17, %ymm9, %ymm9
vpaddd %ymm14, %ymm9, %ymm9
vfmadd213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm6
vrcpps %ymm9, %ymm8
vfmsub213ps %ymm6, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) - ymm6
vfnmadd132ps %ymm8, %ymm8, %ymm9 # ymm9 = -(ymm9 * ymm8) + ymm8
vfnmadd213ps %ymm21, %ymm31, %ymm9 # ymm9 = -(ymm31 * ymm9) + ymm21
vmulps %ymm1, %ymm9, %ymm1
jmp 0x1a091b
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vmaxps (%rax){1to8}, %ymm1, %ymm1
vminps 0x4(%rax){1to8}, %ymm1, %ymm1
jmp 0x1a091b
vxorps 0x276820(%rip){1to8}, %ymm1, %ymm1 # 0x417054
vminps %ymm2, %ymm1, %ymm1
vmaxps %ymm3, %ymm1, %ymm1
vmovaps %ymm4, %ymm8
vfmadd231ps %ymm5, %ymm1, %ymm8 # ymm8 = (ymm1 * ymm5) + ymm8
vrndscaleps $0x1, %ymm8, %ymm16
vcmpltps %ymm16, %ymm8, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm1 # ymm1 = (ymm16 * ymm7) - ymm1
vfmsub231ps 0x27857f(%rip){1to8}, %ymm16, %ymm1 # ymm1 = (ymm16 * mem) - ymm1
vmulps %ymm1, %ymm1, %ymm8
vmovaps %ymm18, %ymm9
vfmadd213ps %ymm10, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm10
vfmadd213ps %ymm11, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm11
vfmadd213ps %ymm12, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm12
vfmadd213ps %ymm13, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm13
vfmadd213ps %ymm4, %ymm1, %ymm9 # ymm9 = (ymm1 * ymm9) + ymm4
vfmadd213ps %ymm1, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm9) + ymm1
vaddps %ymm6, %ymm9, %ymm8
vcvttps2dq %ymm16, %ymm1
vpslld $0x17, %ymm1, %ymm1
vpaddd %ymm1, %ymm14, %ymm1
vfmadd213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) + ymm6
vrcpps %ymm1, %ymm8
vfmsub213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) - ymm6
vfnmadd132ps %ymm8, %ymm8, %ymm1 # ymm1 = -(ymm1 * ymm8) + ymm8
jmp 0x1a091b
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vmaxps %ymm0, %ymm1, %ymm8
vminps %ymm0, %ymm1, %ymm1
vfmadd132ps (%rax){1to8}, %ymm8, %ymm1 # ymm1 = (ymm1 * mem) + ymm8
jmp 0x1a091b
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
vbroadcastss (%rax), %ymm8
vfmadd213ps 0x4(%rax){1to8}, %ymm1, %ymm8 # ymm8 = (ymm1 * ymm8) + mem
vmaxps %ymm0, %ymm8, %ymm8
vminps %ymm6, %ymm8, %ymm8
vmulps %ymm1, %ymm8, %ymm1
jmp 0x1a091b
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r11
movl 0x38(%rsp), %ebx
vmovups %ymm1, (%r11)
addq $0x20, %r11
incl %ebx
incl 0x40(%rsp)
cmpl 0x30(%rsp), %ebx
jne 0x1a04b1
incl %esi
cmpl 0x80(%rsp), %esi
jne 0x1a0499
movq 0x10(%rsp), %rcx
incq %rcx
movq %rcx, 0x10(%rsp)
cmpq 0xc0(%rsp), %rcx
jne 0x1a042e
movl 0xc8(%rsp), %ecx
xorl $0x1, %ecx
movq 0xd0(%rsp), %rax
xorl $0x8, %eax
movl %ecx, 0xc8(%rsp)
orl %ecx, %eax
jne 0x1a0e2e
movslq 0x118(%rsp), %rax
movq %rax, 0x190(%rsp)
testq %rax, %rax
jle 0x1a0e2e
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rcx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rcx), %ebp
movl 0xe0(%rax,%rcx), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rax,%rcx), %r12d
movl 0xe8(%rax,%rcx), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rax,%rcx), %r13d
movq 0x1a8(%rax,%rcx), %rdx
movl 0xd4(%rax,%rcx), %esi
movq %rcx, 0x88(%rsp)
movl 0xd8(%rax,%rcx), %ebx
movq 0xe0(%rsp), %rax
movq %rax, 0x1e8(%rsp)
movq 0xf0(%rsp), %rax
imulq 0x120(%rsp), %rax
movq %rax, 0x1f8(%rsp)
movq 0xa8(%rsp), %rcx
movl 0x2c(%rcx), %r14d
movl 0x30(%rcx), %eax
movl %eax, 0x98(%rsp)
movl %esi, %eax
imull %ebx, %eax
shll $0x3, %eax
cltq
movq %rax, 0xb8(%rsp)
movl $0x1, %eax
subl %esi, %eax
imull %ebp, %eax
movl %eax, 0xc0(%rsp)
decl %r13d
movl 0x38(%rcx), %eax
movq %rax, 0x48(%rsp)
movl 0x10c(%rsp), %eax
movl %eax, 0x68(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
movq %rsi, 0x58(%rsp)
leaq (,%rsi,8), %rax
movq %rax, 0xb0(%rsp)
movq $0x0, 0x10(%rsp)
movq %r13, 0x50(%rsp)
movq %rdx, 0x30(%rsp)
cmpl $0x0, 0x80(%rsp)
jle 0x1a0e13
movq 0x1f8(%rsp), %rsi
movq 0x10(%rsp), %rcx
imulq %rcx, %rsi
movq 0x18(%rsp), %rax
movq 0x48(%rax), %rdi
imulq %rcx, %rdi
imulq 0x18(%rax), %rdi
addq 0x1e8(%rsp), %rsi
movq %rsi, 0x38(%rsp)
addq 0x8(%rax), %rdi
movq %rdi, 0x1a0(%rsp)
xorl %r15d, %r15d
cmpl $0x0, 0x68(%rsp)
jle 0x1a0e02
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rsi
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rsi
movq %rsi, 0x90(%rsp)
movq 0x18(%rsp), %rax
movq 0x88(%rsp), %rcx
movq 0x118(%rax,%rcx), %rax
movq %rax, 0x150(%rsp)
xorl %ecx, %ecx
movl 0xc0(%rsp), %eax
movl %eax, 0x40(%rsp)
movl %ecx, 0x28(%rsp)
testq %rdx, %rdx
je 0x1a0b70
movq 0x10(%rsp), %rax
vmovss (%rdx,%rax,4), %xmm0
jmp 0x1a0b74
vxorps %xmm0, %xmm0, %xmm0
cmpl $0x0, 0x48(%rsp)
jle 0x1a0c60
vxorps %xmm1, %xmm1, %xmm1
xorl %eax, %eax
movq 0x1a0(%rsp), %rdi
testl %ebx, %ebx
movq %rax, 0x78(%rsp)
jle 0x1a0c3f
movq 0x70(%rsp), %r8
imulq %rax, %r8
addq 0x60(%rsp), %r8
xorl %r13d, %r13d
xorl %r10d, %r10d
movl %r10d, %eax
subl %ebx, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %r15d, %eax
js 0x1a0c2b
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x1a0c2b
cmpl $0x0, 0x58(%rsp)
jle 0x1a0c2b
cmpl 0x98(%rsp), %eax
jge 0x1a0c2b
movslq %eax, %r11
imulq 0x90(%rsp), %r11
addq %r8, %r11
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %rsi
movq %r13, %r9
testl %ecx, %ecx
js 0x1a0c20
movl %ecx, %eax
cltd
idivl %r12d
testl %edx, %edx
jne 0x1a0c20
cmpl %r14d, %eax
jge 0x1a0c20
shll $0x3, %eax
cltq
vmovaps (%r11,%rax,4), %ymm2
movl %r9d, %eax
andl $-0x8, %eax
vfmadd231ps (%rdi,%rax,4), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
addq $0x8, %r9
addl %ebp, %ecx
decq %rsi
jne 0x1a0bf6
incq %r10
addq 0xb0(%rsp), %r13
cmpq %rbx, %r10
jne 0x1a0bae
movq 0xb8(%rsp), %rax
leaq (%rdi,%rax,4), %rdi
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x1a0b8d
jmp 0x1a0c64
vxorps %xmm1, %xmm1, %xmm1
vextractf128 $0x1, %ymm1, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vaddps %xmm1, %xmm2, %xmm1
vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3]
vaddss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm2, %xmm4
movq 0x50(%rsp), %r13
cmpl $0x5, %r13d
ja 0x1a0dc6
leaq 0x2790d7(%rip), %rcx # 0x419d70
movslq (%rcx,%r13,4), %rax
addq %rcx, %rax
movq 0x30(%rsp), %rdx
jmpq *%rax
vmaxss 0x2761d9(%rip), %xmm4, %xmm0 # 0x416e88
jmp 0x1a0dcf
vmovaps %xmm4, %xmm0
vmovss %xmm4, 0x78(%rsp)
vzeroupper
callq 0x563e0
vaddss 0x27404a(%rip), %xmm0, %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x30(%rsp), %rdx
vmulss 0x78(%rsp), %xmm0, %xmm0
jmp 0x1a0dcf
movq 0x150(%rsp), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
movl 0x28(%rsp), %ecx
jbe 0x1a0dd3
vmovaps %xmm1, %xmm0
jmp 0x1a0dd3
vmovss 0x276340(%rip), %xmm2 # 0x417058
vminss %xmm2, %xmm4, %xmm1
vxorps 0x27632e(%rip){1to4}, %xmm1, %xmm0 # 0x417054
vcmpltss 0x27632b(%rip), %xmm1, %k1 # 0x41705c
vmovss %xmm2, %xmm0, %xmm0 {%k1}
vzeroupper
callq 0x563e0
movq 0x30(%rsp), %rdx
vmovss 0x273fcc(%rip), %xmm1 # 0x414d18
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x1a0dcf
movq 0x150(%rsp), %rax
vmovss (%rax), %xmm0
vcmpgtss 0x27611b(%rip), %xmm4, %k1 # 0x416e88
vmovss 0x273fa1(%rip), %xmm0 {%k1} # 0x414d18
vmulss %xmm4, %xmm0, %xmm0
jmp 0x1a0dcf
movq 0x150(%rsp), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vxorps 0x2762bc(%rip){1to4}, %xmm2, %xmm0 # 0x417054
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
movl 0x28(%rsp), %ecx
jb 0x1a0dd3
vmovss 0x273f66(%rip), %xmm0 # 0x414d18
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x1a0df7
vmovaps %xmm4, %xmm0
jmp 0x1a0dcf
vmovaps %xmm4, %xmm0
movq 0x30(%rsp), %rdx
movl 0x28(%rsp), %ecx
movq 0x38(%rsp), %rax
vmovss %xmm0, (%rax)
addq $0x4, %rax
movq %rax, 0x38(%rsp)
incl %ecx
incl 0x40(%rsp)
cmpl 0x68(%rsp), %ecx
jne 0x1a0b5b
jmp 0x1a0e02
vfmadd213ss %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm2
vmulss %xmm4, %xmm1, %xmm0
jmp 0x1a0dcf
incl %r15d
cmpl 0x80(%rsp), %r15d
jne 0x1a0af5
movq 0x10(%rsp), %rcx
incq %rcx
movq %rcx, 0x10(%rsp)
cmpq 0x190(%rsp), %rcx
jne 0x1a0aa8
movq 0xd0(%rsp), %rax
xorl $0x4, %eax
orl 0x198(%rsp), %eax
movq 0x18(%rsp), %rcx
jne 0x1a157e
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a157e
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rax
movl 0xdc(%rcx,%rax), %r8d
movl 0xe0(%rcx,%rax), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rcx,%rax), %r10d
movl 0xe8(%rcx,%rax), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rcx,%rax), %edx
movq 0x1a8(%rcx,%rax), %rdi
movl 0xd4(%rcx,%rax), %esi
movq %rax, 0x150(%rsp)
movl 0xd8(%rcx,%rax), %r12d
movl %esi, %eax
imull %r12d, %eax
shll $0x4, %eax
cltq
movq %rax, 0xb8(%rsp)
movq %rsi, %rax
shlq $0x4, %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
movq %rsi, 0x58(%rsp)
subl %esi, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edx
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x276148(%rip), %xmm2 # 0x417058
vbroadcastss 0x276143(%rip), %xmm3 # 0x41705c
vbroadcastss 0x27376e(%rip), %xmm4 # 0x414690
vbroadcastss 0x276135(%rip), %xmm5 # 0x417060
vbroadcastss 0x273de4(%rip), %xmm6 # 0x414d18
vbroadcastss 0x276127(%rip), %xmm7 # 0x417064
vbroadcastss 0x276125(%rip), %xmm18 # 0x41706c
vbroadcastss 0x276120(%rip), %xmm10 # 0x417070
vbroadcastss 0x27611b(%rip), %xmm11 # 0x417074
vbroadcastss 0x276116(%rip), %xmm12 # 0x417078
vbroadcastss 0x276111(%rip), %xmm13 # 0x41707c
vpbroadcastd 0x273da4(%rip), %xmm14 # 0x414d18
vbroadcastss 0x2760eb(%rip), %xmm15 # 0x417068
vbroadcastss 0x276105(%rip), %xmm20 # 0x41708c
vbroadcastss 0x2760ff(%rip), %xmm21 # 0x417090
vbroadcastss 0x2760f9(%rip), %xmm22 # 0x417094
vbroadcastss 0x2760f3(%rip), %xmm23 # 0x417098
vbroadcastss 0x2760ed(%rip), %xmm24 # 0x41709c
vbroadcastss 0x2760e7(%rip), %xmm25 # 0x4170a0
vbroadcastss 0x2760e1(%rip), %xmm26 # 0x4170a4
vbroadcastss 0x2760db(%rip), %xmm27 # 0x4170a8
vbroadcastss 0x2760d5(%rip), %xmm28 # 0x4170ac
vbroadcastss 0x2760cf(%rip), %xmm29 # 0x4170b0
vbroadcastss 0x2760c9(%rip), %xmm30 # 0x4170b4
vbroadcastss 0x277df7(%rip), %xmm31 # 0x418dec
movq %rdx, 0x68(%rsp)
movq %rdi, 0x50(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x1a1563
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %r14d
movl 0x30(%rax), %esi
movl %esi, 0x98(%rsp)
movl 0x10c(%rsp), %esi
movl %esi, 0x30(%rsp)
movq 0x120(%rsp), %r9
movq 0x10(%rsp), %rsi
imulq %rsi, %r9
imulq 0xf0(%rsp), %r9
addq 0xe0(%rsp), %r9
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x4, %rsi
movq %rsi, 0x1a0(%rsp)
xorl %esi, %esi
cmpl $0x0, 0x30(%rsp)
jle 0x1a1554
xorl %r11d, %r11d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %rdi, %rdi
je 0x1a109b
movq 0x1a0(%rsp), %rax
vmovups (%rdi,%rax), %xmm1
jmp 0x1a109f
vxorps %xmm1, %xmm1, %xmm1
movl %r11d, 0x38(%rsp)
movq %r9, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x1a1205
movq 0x48(%rcx), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rcx), %r13
addq 0x8(%rcx), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x1a11e5
movq 0x70(%rsp), %r11
imulq %rax, %r11
addq 0x60(%rsp), %r11
xorl %ebx, %ebx
xorl %ebp, %ebp
movl %ebp, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %esi, %eax
js 0x1a11d1
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x1a11d1
cmpl $0x0, 0x58(%rsp)
jle 0x1a11d1
cmpl 0x98(%rsp), %eax
jge 0x1a11d1
movslq %eax, %r9
imulq 0x90(%rsp), %r9
addq %r11, %r9
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %r15
movq %rbx, %rdi
testl %ecx, %ecx
js 0x1a11c5
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x1a11c5
cmpl %r14d, %eax
jge 0x1a11c5
shll $0x2, %eax
cltq
vbroadcastss (%r9,%rax,4), %xmm8
vbroadcastss 0x4(%r9,%rax,4), %xmm9
vbroadcastss 0x8(%r9,%rax,4), %xmm16
vbroadcastss 0xc(%r9,%rax,4), %xmm17
movl %edi, %eax
andl $-0x10, %eax
vfmadd231ps (%r13,%rax,4), %xmm8, %xmm1 # xmm1 = (xmm8 * mem) + xmm1
vfmadd231ps 0x10(%r13,%rax,4), %xmm9, %xmm1 # xmm1 = (xmm9 * mem) + xmm1
vfmadd231ps 0x20(%r13,%rax,4), %xmm16, %xmm1 # xmm1 = (xmm16 * mem) + xmm1
vfmadd231ps 0x30(%r13,%rax,4), %xmm17, %xmm1 # xmm1 = (xmm17 * mem) + xmm1
addq $0x10, %rdi
addl %r8d, %ecx
decq %r15
jne 0x1a116d
incq %rbp
addq 0xb0(%rsp), %rbx
cmpq %r12, %rbp
jne 0x1a111a
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x1a10fa
movq 0x68(%rsp), %rdx
cmpl $0x5, %edx
ja 0x1a1525
leaq 0x278b6e(%rip), %rcx # 0x419d88
movslq (%rcx,%rdx,4), %rax
addq %rcx, %rax
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
jmpq *%rax
vmaxps %xmm0, %xmm1, %xmm1
jmp 0x1a1539
vminps %xmm2, %xmm1, %xmm8
vmaxps %xmm3, %xmm8, %xmm8
vmovaps %xmm5, %xmm9
vfmadd213ps %xmm4, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm4
vcvttps2dq %xmm9, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm9, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm8 # xmm8 = (xmm16 * xmm7) - xmm8
vfnmsub231ps %xmm15, %xmm16, %xmm8 # xmm8 = -(xmm16 * xmm15) - xmm8
vmulps %xmm8, %xmm8, %xmm9
vmovaps %xmm18, %xmm17
vfmadd213ps %xmm10, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm10
vfmadd213ps %xmm11, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm11
vfmadd213ps %xmm12, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm12
vfmadd213ps %xmm13, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm13
vfmadd213ps %xmm4, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm4
vfmadd213ps %xmm8, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm17) + xmm8
vaddps %xmm6, %xmm17, %xmm8
vcvttps2dq %xmm16, %xmm9
vpslld $0x17, %xmm9, %xmm9
vpaddd %xmm14, %xmm9, %xmm9
vfmadd213ps %xmm6, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm6
vcmpleps %xmm0, %xmm9, %k1
vmaxps 0x275dae(%rip){1to4}, %xmm9, %xmm8 # 0x417080
vpsrld $0x17, %xmm8, %xmm9
vpbroadcastd 0x275da2(%rip), %xmm16 # 0x417084
vpternlogd $0xea, 0x2733a3(%rip){1to4}, %xmm16, %xmm8 # 0x414690
vcmpltps %xmm20, %xmm8, %k2
vaddps %xmm21, %xmm8, %xmm16
vaddps %xmm8, %xmm16, %xmm16 {%k2}
vmulps %xmm16, %xmm16, %xmm8
vmovaps %xmm22, %xmm17
vfmadd213ps %xmm23, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm23
vfmadd213ps %xmm24, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm24
vfmadd213ps %xmm25, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm25
vfmadd213ps %xmm26, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm26
vfmadd213ps %xmm27, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm27
vfmadd213ps %xmm28, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm28
vfmadd213ps %xmm29, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm29
vfmadd213ps %xmm30, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm30
vmulps %xmm16, %xmm8, %xmm19
vmulps %xmm17, %xmm19, %xmm17
vpaddd 0x275d36(%rip){1to4}, %xmm9, %xmm9 # 0x417088
vcvtdq2ps %xmm9, %xmm9
vsubps %xmm6, %xmm9, %xmm9 {%k2}
vfmadd231ps %xmm15, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm15) + xmm17
vfmsub231ps %xmm8, %xmm4, %xmm17 # xmm17 = (xmm4 * xmm8) - xmm17
vsubps %xmm16, %xmm17, %xmm8
vfnmadd231ps %xmm9, %xmm7, %xmm8 # xmm8 = -(xmm7 * xmm9) + xmm8
vaddps %xmm8, %xmm8, %xmm8
vbroadcastss 0x2763c1(%rip), %xmm8 {%k1} # 0x417744
vminps %xmm2, %xmm8, %xmm8
vmaxps %xmm3, %xmm8, %xmm8
vmovaps %xmm5, %xmm9
vfmadd213ps %xmm4, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm4
vcvttps2dq %xmm9, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm9, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm8 # xmm8 = (xmm16 * xmm7) - xmm8
vfnmsub231ps %xmm15, %xmm16, %xmm8 # xmm8 = -(xmm16 * xmm15) - xmm8
vmulps %xmm8, %xmm8, %xmm9
vmovaps %xmm18, %xmm17
vfmadd213ps %xmm10, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm10
vfmadd213ps %xmm11, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm11
vfmadd213ps %xmm12, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm12
vfmadd213ps %xmm13, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm13
vfmadd213ps %xmm4, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm4
vfmadd213ps %xmm8, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm17) + xmm8
vaddps %xmm6, %xmm17, %xmm8
vcvttps2dq %xmm16, %xmm9
vpslld $0x17, %xmm9, %xmm9
vpaddd %xmm14, %xmm9, %xmm9
vfmadd213ps %xmm6, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm6
vrcpps %xmm9, %xmm8
vaddps %xmm8, %xmm8, %xmm16
vfmsub213ps %xmm31, %xmm16, %xmm9 # xmm9 = (xmm16 * xmm9) - xmm31
vfnmadd213ps %xmm16, %xmm8, %xmm9 # xmm9 = -(xmm8 * xmm9) + xmm16
vfmsub231ps %xmm9, %xmm1, %xmm1 # xmm1 = (xmm1 * xmm9) - xmm1
jmp 0x1a1539
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps (%rax){1to4}, %xmm1, %xmm1
vminps 0x4(%rax){1to4}, %xmm1, %xmm1
jmp 0x1a1539
vxorps 0x275c03(%rip){1to4}, %xmm1, %xmm1 # 0x417054
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm1, %xmm1
vmovaps %xmm4, %xmm8
vfmadd231ps %xmm5, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm5) + xmm8
vcvttps2dq %xmm8, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm8, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm1 # xmm1 = (xmm16 * xmm7) - xmm1
vfmsub231ps 0x27795d(%rip){1to4}, %xmm16, %xmm1 # xmm1 = (xmm16 * mem) - xmm1
vmulps %xmm1, %xmm1, %xmm8
vmovaps %xmm18, %xmm9
vfmadd213ps %xmm10, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm10
vfmadd213ps %xmm11, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm11
vfmadd213ps %xmm12, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm12
vfmadd213ps %xmm13, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm13
vfmadd213ps %xmm4, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm4
vfmadd213ps %xmm1, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm1
vaddps %xmm6, %xmm9, %xmm8
vcvttps2dq %xmm16, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm1, %xmm14, %xmm1
vfmadd213ps %xmm6, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm6
vrcpps %xmm1, %xmm8
vfmsub213ps %xmm6, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) - xmm6
vfnmadd132ps %xmm8, %xmm8, %xmm1 # xmm1 = -(xmm1 * xmm8) + xmm8
jmp 0x1a1539
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vmaxps %xmm0, %xmm1, %xmm8
vminps %xmm0, %xmm1, %xmm1
vfmadd132ps (%rax){1to4}, %xmm8, %xmm1 # xmm1 = (xmm1 * mem) + xmm8
jmp 0x1a1539
movq 0x150(%rsp), %rax
movq 0x118(%rcx,%rax), %rax
vbroadcastss (%rax), %xmm8
vfmadd213ps 0x4(%rax){1to4}, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + mem
vmaxps %xmm0, %xmm8, %xmm8
vminps %xmm6, %xmm8, %xmm8
vmulps %xmm1, %xmm8, %xmm1
jmp 0x1a1539
movq 0x18(%rsp), %rcx
movq 0x50(%rsp), %rdi
movq 0x28(%rsp), %r9
movl 0x38(%rsp), %r11d
vmovups %xmm1, (%r9)
addq $0x10, %r9
incl %r11d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r11d
jne 0x1a1087
incl %esi
cmpl 0x80(%rsp), %esi
jne 0x1a106e
movq 0x10(%rsp), %rsi
incq %rsi
movq %rsi, 0x10(%rsp)
cmpq 0xc0(%rsp), %rsi
jne 0x1a0fff
movq 0xd0(%rsp), %rax
xorl $0x1, %eax
orl %eax, 0x198(%rsp)
movq 0x130(%rsp), %rcx
jne 0x1a1cb5
movslq 0x118(%rsp), %rax
movq %rax, 0xc0(%rsp)
testq %rax, %rax
jle 0x1a1cb5
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rdx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rdx), %r8d
movl 0xe0(%rax,%rdx), %esi
movl %esi, 0xa0(%rsp)
movl 0xe4(%rax,%rdx), %r10d
movl 0xe8(%rax,%rdx), %esi
movl %esi, 0x58(%rsp)
movl 0x114(%rax,%rdx), %edi
movq 0x1a8(%rax,%rdx), %r9
movl 0xd4(%rax,%rdx), %r11d
movq %rdx, 0x150(%rsp)
movl 0xd8(%rax,%rdx), %r12d
movl %r11d, %eax
imull %r12d, %eax
shll $0x2, %eax
cltq
movq %rax, 0xb8(%rsp)
leaq (,%r11,4), %rax
movq %rax, 0xb0(%rsp)
movl $0x1, %eax
subl %r11d, %eax
imull %r8d, %eax
movl %eax, 0x88(%rsp)
movq $0x0, 0x10(%rsp)
decl %edi
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x2759f1(%rip), %xmm2 # 0x417058
vbroadcastss 0x2759ec(%rip), %xmm3 # 0x41705c
vbroadcastss 0x273017(%rip), %xmm4 # 0x414690
vbroadcastss 0x2759de(%rip), %xmm5 # 0x417060
vbroadcastss 0x27368d(%rip), %xmm6 # 0x414d18
vbroadcastss 0x2759d0(%rip), %xmm7 # 0x417064
vbroadcastss 0x2759ce(%rip), %xmm18 # 0x41706c
vbroadcastss 0x2759c9(%rip), %xmm10 # 0x417070
vbroadcastss 0x2759c4(%rip), %xmm11 # 0x417074
vbroadcastss 0x2759bf(%rip), %xmm12 # 0x417078
vbroadcastss 0x2759ba(%rip), %xmm13 # 0x41707c
vpbroadcastd 0x27364d(%rip), %xmm14 # 0x414d18
vbroadcastss 0x275994(%rip), %xmm15 # 0x417068
vbroadcastss 0x2759ae(%rip), %xmm20 # 0x41708c
vbroadcastss 0x2759a8(%rip), %xmm21 # 0x417090
vbroadcastss 0x2759a2(%rip), %xmm22 # 0x417094
vbroadcastss 0x27599c(%rip), %xmm23 # 0x417098
vbroadcastss 0x275996(%rip), %xmm24 # 0x41709c
vbroadcastss 0x275990(%rip), %xmm25 # 0x4170a0
vbroadcastss 0x27598a(%rip), %xmm26 # 0x4170a4
vbroadcastss 0x275984(%rip), %xmm27 # 0x4170a8
vbroadcastss 0x27597e(%rip), %xmm28 # 0x4170ac
vbroadcastss 0x275978(%rip), %xmm29 # 0x4170b0
vbroadcastss 0x275972(%rip), %xmm30 # 0x4170b4
vbroadcastss 0x2776a0(%rip), %xmm31 # 0x418dec
movq %rdi, 0x68(%rsp)
movq %r9, 0x50(%rsp)
movq %r11, 0x98(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
testl %eax, %eax
jle 0x1a1c9a
movq 0xa8(%rsp), %rax
movl 0x2c(%rax), %ebp
movl 0x30(%rax), %edx
movl %edx, 0x24(%rsp)
movl 0x10c(%rsp), %edx
movl %edx, 0x30(%rsp)
movq 0x120(%rsp), %rbx
movq 0x10(%rsp), %rdx
imulq %rdx, %rbx
imulq 0xf0(%rsp), %rbx
addq 0xe0(%rsp), %rbx
movl 0x38(%rax), %eax
movq %rax, 0x48(%rsp)
shlq $0x4, %rdx
movq %rdx, 0x1a0(%rsp)
xorl %esi, %esi
cmpl $0x0, 0x30(%rsp)
jle 0x1a1c8b
xorl %r14d, %r14d
movl 0x88(%rsp), %eax
movl %eax, 0x40(%rsp)
testq %r9, %r9
je 0x1a17f7
movq 0x1a0(%rsp), %rax
vmovups (%r9,%rax), %xmm1
jmp 0x1a17fb
vxorps %xmm1, %xmm1, %xmm1
movl %r14d, 0x38(%rsp)
movq %rbx, 0x28(%rsp)
cmpl $0x0, 0x48(%rsp)
jle 0x1a1938
movq 0x18(%rsp), %rax
movq 0x48(%rax), %r13
imulq 0x10(%rsp), %r13
imulq 0x18(%rax), %r13
addq 0x8(%rax), %r13
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rdi
movq 0x130(%rsp), %rcx
imulq %rax, %rdi
movq %rdi, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
xorl %eax, %eax
testl %r12d, %r12d
movq %rax, 0x78(%rsp)
jle 0x1a1918
movq 0x70(%rsp), %r14
imulq %rax, %r14
addq 0x60(%rsp), %r14
xorl %r15d, %r15d
xorl %r11d, %r11d
movl %r11d, %eax
subl %r12d, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %esi, %eax
js 0x1a18fc
cltd
idivl 0x58(%rsp)
testl %edx, %edx
jne 0x1a18fc
cmpl 0x24(%rsp), %eax
jge 0x1a18fc
movq 0x98(%rsp), %rdi
testl %edi, %edi
jle 0x1a18fc
movslq %eax, %r9
imulq 0x90(%rsp), %r9
addq %r14, %r9
movl 0x40(%rsp), %ecx
movq %r15, %rbx
testl %ecx, %ecx
js 0x1a18f0
movl %ecx, %eax
cltd
idivl %r10d
testl %edx, %edx
jne 0x1a18f0
cmpl %ebp, %eax
jge 0x1a18f0
cltq
vbroadcastss (%r9,%rax,4), %xmm8
movl %ebx, %eax
andl $-0x4, %eax
vfmadd231ps (%r13,%rax,4), %xmm8, %xmm1 # xmm1 = (xmm8 * mem) + xmm1
addq $0x4, %rbx
addl %r8d, %ecx
decq %rdi
jne 0x1a18ca
incq %r11
addq 0xb0(%rsp), %r15
cmpq %r12, %r11
movq 0x130(%rsp), %rcx
jne 0x1a1885
movq 0xb8(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x1a1863
movq 0x68(%rsp), %rdi
cmpl $0x5, %edi
ja 0x1a1c62
leaq 0x278453(%rip), %rdx # 0x419da0
movslq (%rdx,%rdi,4), %rax
addq %rdx, %rax
movq 0x50(%rsp), %r9
movq 0x28(%rsp), %rbx
movl 0x38(%rsp), %r14d
jmpq *%rax
vmaxps %xmm0, %xmm1, %xmm1
jmp 0x1a1c71
vminps %xmm2, %xmm1, %xmm8
vmaxps %xmm3, %xmm8, %xmm8
vmovaps %xmm5, %xmm9
vfmadd213ps %xmm4, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm4
vcvttps2dq %xmm9, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm9, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm8 # xmm8 = (xmm16 * xmm7) - xmm8
vfnmsub231ps %xmm15, %xmm16, %xmm8 # xmm8 = -(xmm16 * xmm15) - xmm8
vmulps %xmm8, %xmm8, %xmm9
vmovaps %xmm18, %xmm17
vfmadd213ps %xmm10, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm10
vfmadd213ps %xmm11, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm11
vfmadd213ps %xmm12, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm12
vfmadd213ps %xmm13, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm13
vfmadd213ps %xmm4, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm4
vfmadd213ps %xmm8, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm17) + xmm8
vaddps %xmm6, %xmm17, %xmm8
vcvttps2dq %xmm16, %xmm9
vpslld $0x17, %xmm9, %xmm9
vpaddd %xmm14, %xmm9, %xmm9
vfmadd213ps %xmm6, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm6
vcmpleps %xmm0, %xmm9, %k1
vmaxps 0x275680(%rip){1to4}, %xmm9, %xmm8 # 0x417080
vpsrld $0x17, %xmm8, %xmm9
vpbroadcastd 0x275674(%rip), %xmm16 # 0x417084
vpternlogd $0xea, 0x272c75(%rip){1to4}, %xmm16, %xmm8 # 0x414690
vcmpltps %xmm20, %xmm8, %k2
vaddps %xmm21, %xmm8, %xmm16
vaddps %xmm8, %xmm16, %xmm16 {%k2}
vmulps %xmm16, %xmm16, %xmm8
vmovaps %xmm22, %xmm17
vfmadd213ps %xmm23, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm23
vfmadd213ps %xmm24, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm24
vfmadd213ps %xmm25, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm25
vfmadd213ps %xmm26, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm26
vfmadd213ps %xmm27, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm27
vfmadd213ps %xmm28, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm28
vfmadd213ps %xmm29, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm29
vfmadd213ps %xmm30, %xmm16, %xmm17 # xmm17 = (xmm16 * xmm17) + xmm30
vmulps %xmm16, %xmm8, %xmm19
vmulps %xmm17, %xmm19, %xmm17
vpaddd 0x275608(%rip){1to4}, %xmm9, %xmm9 # 0x417088
vcvtdq2ps %xmm9, %xmm9
vsubps %xmm6, %xmm9, %xmm9 {%k2}
vfmadd231ps %xmm15, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm15) + xmm17
vfmsub231ps %xmm8, %xmm4, %xmm17 # xmm17 = (xmm4 * xmm8) - xmm17
vsubps %xmm16, %xmm17, %xmm8
vfnmadd231ps %xmm9, %xmm7, %xmm8 # xmm8 = -(xmm7 * xmm9) + xmm8
vaddps %xmm8, %xmm8, %xmm8
vbroadcastss 0x275c93(%rip), %xmm8 {%k1} # 0x417744
vminps %xmm2, %xmm8, %xmm8
vmaxps %xmm3, %xmm8, %xmm8
vmovaps %xmm5, %xmm9
vfmadd213ps %xmm4, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm4
vcvttps2dq %xmm9, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm9, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm8 # xmm8 = (xmm16 * xmm7) - xmm8
vfnmsub231ps %xmm15, %xmm16, %xmm8 # xmm8 = -(xmm16 * xmm15) - xmm8
vmulps %xmm8, %xmm8, %xmm9
vmovaps %xmm18, %xmm17
vfmadd213ps %xmm10, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm10
vfmadd213ps %xmm11, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm11
vfmadd213ps %xmm12, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm12
vfmadd213ps %xmm13, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm13
vfmadd213ps %xmm4, %xmm8, %xmm17 # xmm17 = (xmm8 * xmm17) + xmm4
vfmadd213ps %xmm8, %xmm9, %xmm17 # xmm17 = (xmm9 * xmm17) + xmm8
vaddps %xmm6, %xmm17, %xmm8
vcvttps2dq %xmm16, %xmm9
vpslld $0x17, %xmm9, %xmm9
vpaddd %xmm14, %xmm9, %xmm9
vfmadd213ps %xmm6, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm6
vrcpps %xmm9, %xmm8
vaddps %xmm8, %xmm8, %xmm16
vfmsub213ps %xmm31, %xmm16, %xmm9 # xmm9 = (xmm16 * xmm9) - xmm31
vfnmadd213ps %xmm16, %xmm8, %xmm9 # xmm9 = -(xmm8 * xmm9) + xmm16
vfmsub231ps %xmm9, %xmm1, %xmm1 # xmm1 = (xmm1 * xmm9) - xmm1
jmp 0x1a1c71
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rdx
movq 0x118(%rax,%rdx), %rax
vmaxps (%rax){1to4}, %xmm1, %xmm1
vminps 0x4(%rax){1to4}, %xmm1, %xmm1
jmp 0x1a1c71
vxorps 0x2754d0(%rip){1to4}, %xmm1, %xmm1 # 0x417054
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm1, %xmm1
vmovaps %xmm4, %xmm8
vfmadd231ps %xmm5, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm5) + xmm8
vcvttps2dq %xmm8, %xmm16
vcvtdq2ps %xmm16, %xmm16
vcmpltps %xmm16, %xmm8, %k1
vsubps %xmm6, %xmm16, %xmm16 {%k1}
vfmsub231ps %xmm7, %xmm16, %xmm1 # xmm1 = (xmm16 * xmm7) - xmm1
vfmsub231ps 0x27722a(%rip){1to4}, %xmm16, %xmm1 # xmm1 = (xmm16 * mem) - xmm1
vmulps %xmm1, %xmm1, %xmm8
vmovaps %xmm18, %xmm9
vfmadd213ps %xmm10, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm10
vfmadd213ps %xmm11, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm11
vfmadd213ps %xmm12, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm12
vfmadd213ps %xmm13, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm13
vfmadd213ps %xmm4, %xmm1, %xmm9 # xmm9 = (xmm1 * xmm9) + xmm4
vfmadd213ps %xmm1, %xmm8, %xmm9 # xmm9 = (xmm8 * xmm9) + xmm1
vaddps %xmm6, %xmm9, %xmm8
vcvttps2dq %xmm16, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm1, %xmm14, %xmm1
vfmadd213ps %xmm6, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm6
vrcpps %xmm1, %xmm8
vfmsub213ps %xmm6, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) - xmm6
vfnmadd132ps %xmm8, %xmm8, %xmm1 # xmm1 = -(xmm1 * xmm8) + xmm8
jmp 0x1a1c71
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rdx
movq 0x118(%rax,%rdx), %rax
vmaxps %xmm0, %xmm1, %xmm8
vminps %xmm0, %xmm1, %xmm1
vfmadd132ps (%rax){1to4}, %xmm8, %xmm1 # xmm1 = (xmm1 * mem) + xmm8
jmp 0x1a1c71
movq 0x18(%rsp), %rax
movq 0x150(%rsp), %rdx
movq 0x118(%rax,%rdx), %rax
vbroadcastss (%rax), %xmm8
vfmadd213ps 0x4(%rax){1to4}, %xmm1, %xmm8 # xmm8 = (xmm1 * xmm8) + mem
vmaxps %xmm0, %xmm8, %xmm8
vminps %xmm6, %xmm8, %xmm8
vmulps %xmm1, %xmm8, %xmm1
jmp 0x1a1c71
movq 0x50(%rsp), %r9
movq 0x28(%rsp), %rbx
movl 0x38(%rsp), %r14d
vmovups %xmm1, (%rbx)
addq $0x10, %rbx
incl %r14d
incl 0x40(%rsp)
cmpl 0x30(%rsp), %r14d
jne 0x1a17e2
incl %esi
cmpl 0x80(%rsp), %esi
jne 0x1a17c9
movq 0x10(%rsp), %rdx
incq %rdx
movq %rdx, 0x10(%rsp)
cmpq 0xc0(%rsp), %rdx
jne 0x1a175e
movq 0xd0(%rsp), %rax
xorl $0x4, %eax
orl 0xc8(%rsp), %eax
jne 0x1a2197
movslq 0x118(%rsp), %rax
movq %rax, 0x190(%rsp)
testq %rax, %rax
jle 0x1a2197
movq 0xd8(%rsp), %rax
movq -0x18(%rax), %rcx
movq 0x18(%rsp), %rax
movl 0xdc(%rax,%rcx), %ebp
movl 0xe0(%rax,%rcx), %edx
movl %edx, 0xa0(%rsp)
movl 0xe4(%rax,%rcx), %r12d
movl 0xe8(%rax,%rcx), %edx
movl %edx, 0x24(%rsp)
movl 0x114(%rax,%rcx), %r13d
movq 0x1a8(%rax,%rcx), %rsi
movl 0xd4(%rax,%rcx), %edx
movq %rcx, 0x88(%rsp)
movl 0xd8(%rax,%rcx), %ebx
movq 0xe0(%rsp), %rax
movq %rax, 0xd8(%rsp)
movq 0xf0(%rsp), %rax
imulq 0x120(%rsp), %rax
movq %rax, 0x1e8(%rsp)
movq 0xa8(%rsp), %rcx
movl 0x2c(%rcx), %r14d
movl 0x30(%rcx), %eax
movl %eax, 0x98(%rsp)
movl %edx, %eax
imull %ebx, %eax
shll $0x2, %eax
cltq
movq %rax, 0xb8(%rsp)
movl $0x1, %eax
subl %edx, %eax
imull %ebp, %eax
movl %eax, 0xc0(%rsp)
decl %r13d
movl 0x38(%rcx), %eax
movq %rax, 0x48(%rsp)
movq 0x130(%rsp), %rcx
movl 0x10c(%rsp), %eax
movl %eax, 0x68(%rsp)
movl 0x110(%rsp), %eax
movl %eax, 0x80(%rsp)
movq %rdx, 0x58(%rsp)
leaq (,%rdx,4), %rax
movq %rax, 0xb0(%rsp)
movq $0x0, 0x10(%rsp)
movq %r13, 0x50(%rsp)
movq %rsi, 0x30(%rsp)
cmpl $0x0, 0x80(%rsp)
jle 0x1a217c
movq 0x1e8(%rsp), %rdi
movq 0x10(%rsp), %rdx
imulq %rdx, %rdi
movq 0x18(%rsp), %rax
movq 0x48(%rax), %r8
imulq %rdx, %r8
imulq 0x18(%rax), %r8
addq 0xd8(%rsp), %rdi
movq %rdi, 0x38(%rsp)
addq 0x8(%rax), %r8
movq %r8, 0x1a0(%rsp)
xorl %r15d, %r15d
cmpl $0x0, 0x68(%rsp)
jle 0x1a216b
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0x60(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rdi
movq 0x130(%rsp), %rcx
imulq %rax, %rdi
movq %rdi, 0x70(%rsp)
imulq %rax, %rdx
movq %rdx, 0x90(%rsp)
movq 0x18(%rsp), %rax
movq 0x88(%rsp), %rdx
movq 0x118(%rax,%rdx), %rax
movq %rax, 0x150(%rsp)
xorl %edx, %edx
movl 0xc0(%rsp), %eax
movl %eax, 0x40(%rsp)
movl %edx, 0x28(%rsp)
testq %rsi, %rsi
je 0x1a1ecb
movq 0x10(%rsp), %rax
vmovss (%rsi,%rax,4), %xmm0
jmp 0x1a1ecf
vxorps %xmm0, %xmm0, %xmm0
cmpl $0x0, 0x48(%rsp)
jle 0x1a1fc3
vxorps %xmm1, %xmm1, %xmm1
xorl %eax, %eax
movq 0x1a0(%rsp), %rdi
testl %ebx, %ebx
movq %rax, 0x78(%rsp)
jle 0x1a1fa2
movq 0x70(%rsp), %r8
imulq %rax, %r8
addq 0x60(%rsp), %r8
xorl %r13d, %r13d
xorl %r10d, %r10d
movl %r10d, %eax
subl %ebx, %eax
incl %eax
imull 0xa0(%rsp), %eax
addl %r15d, %eax
js 0x1a1f86
cltd
idivl 0x24(%rsp)
testl %edx, %edx
jne 0x1a1f86
cmpl $0x0, 0x58(%rsp)
jle 0x1a1f86
cmpl 0x98(%rsp), %eax
jge 0x1a1f86
movslq %eax, %r11
imulq 0x90(%rsp), %r11
addq %r8, %r11
movl 0x40(%rsp), %ecx
movq 0x58(%rsp), %rsi
movq %r13, %r9
testl %ecx, %ecx
js 0x1a1f7b
movl %ecx, %eax
cltd
idivl %r12d
testl %edx, %edx
jne 0x1a1f7b
cmpl %r14d, %eax
jge 0x1a1f7b
shll $0x2, %eax
cltq
vmovaps (%r11,%rax,4), %xmm2
movl %r9d, %eax
andl $-0x4, %eax
vfmadd231ps (%rdi,%rax,4), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
addq $0x4, %r9
addl %ebp, %ecx
decq %rsi
jne 0x1a1f51
incq %r10
addq 0xb0(%rsp), %r13
cmpq %rbx, %r10
movq 0x130(%rsp), %rcx
jne 0x1a1f09
movq 0xb8(%rsp), %rax
leaq (%rdi,%rax,4), %rdi
movq 0x78(%rsp), %rax
incq %rax
cmpq 0x48(%rsp), %rax
jne 0x1a1ee8
jmp 0x1a1fc7
vxorps %xmm1, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vaddps %xmm1, %xmm2, %xmm1
vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3]
vaddss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm2, %xmm4
movq 0x50(%rsp), %r13
cmpl $0x5, %r13d
ja 0x1a212f
leaq 0x277dc6(%rip), %rdx # 0x419db8
movslq (%rdx,%r13,4), %rax
addq %rdx, %rax
movq 0x30(%rsp), %rsi
jmpq *%rax
vmaxss 0x274e80(%rip), %xmm4, %xmm0 # 0x416e88
jmp 0x1a2138
vmovaps %xmm4, %xmm0
vmovss %xmm4, 0x78(%rsp)
vzeroupper
callq 0x563e0
vaddss 0x272cf1(%rip), %xmm0, %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x30(%rsp), %rsi
movq 0x130(%rsp), %rcx
vmulss 0x78(%rsp), %xmm0, %xmm0
jmp 0x1a2138
movq 0x150(%rsp), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
movl 0x28(%rsp), %edx
jbe 0x1a213c
vmovaps %xmm1, %xmm0
jmp 0x1a213c
vmovss 0x274fdf(%rip), %xmm2 # 0x417058
vminss %xmm2, %xmm4, %xmm1
vxorps 0x274fcd(%rip){1to4}, %xmm1, %xmm0 # 0x417054
vcmpltss 0x274fca(%rip), %xmm1, %k1 # 0x41705c
vmovss %xmm2, %xmm0, %xmm0 {%k1}
vzeroupper
callq 0x563e0
movq 0x30(%rsp), %rsi
movq 0x130(%rsp), %rcx
vmovss 0x272c63(%rip), %xmm1 # 0x414d18
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x1a2138
movq 0x150(%rsp), %rax
vmovss (%rax), %xmm0
vcmpgtss 0x274db2(%rip), %xmm4, %k1 # 0x416e88
vmovss 0x272c38(%rip), %xmm0 {%k1} # 0x414d18
vmulss %xmm4, %xmm0, %xmm0
jmp 0x1a2138
movq 0x150(%rsp), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vxorps 0x274f53(%rip){1to4}, %xmm2, %xmm0 # 0x417054
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
movl 0x28(%rsp), %edx
jb 0x1a213c
vmovss 0x272bfd(%rip), %xmm0 # 0x414d18
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x1a2160
vmovaps %xmm4, %xmm0
jmp 0x1a2138
vmovaps %xmm4, %xmm0
movq 0x30(%rsp), %rsi
movl 0x28(%rsp), %edx
movq 0x38(%rsp), %rax
vmovss %xmm0, (%rax)
addq $0x4, %rax
movq %rax, 0x38(%rsp)
incl %edx
incl 0x40(%rsp)
cmpl 0x68(%rsp), %edx
jne 0x1a1eb6
jmp 0x1a216b
vfmadd213ss %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm2
vmulss %xmm4, %xmm1, %xmm0
jmp 0x1a2138
incl %r15d
cmpl 0x80(%rsp), %r15d
jne 0x1a1e48
movq 0x10(%rsp), %rdx
incq %rdx
movq %rdx, 0x10(%rsp)
cmpq 0x190(%rsp), %rdx
jne 0x1a1dfb
movq 0xd0(%rsp), %rax
xorl $0x1, %eax
movq 0x18(%rsp), %rdi
movq (%rdi), %rdx
orl %eax, 0xc8(%rsp)
jne 0x1a2e58
movq -0x18(%rdx), %rax
cmpl $0x0, 0xd0(%rdi,%rax)
jle 0x1a2e58
movq %rdx, 0xd0(%rsp)
imull 0x148(%rsp), %ecx
movq 0x120(%rsp), %rdx
imulq 0xf0(%rsp), %rdx
movq %rdx, 0xc8(%rsp)
movq 0xe0(%rsp), %rdx
movq %rdx, 0xd8(%rsp)
movq %rcx, %rdx
movq 0xa8(%rsp), %rcx
movl 0x30(%rcx), %esi
movl 0x38(%rcx), %edi
movq %rdi, 0x78(%rsp)
movl 0x10c(%rsp), %edi
movl 0x110(%rsp), %r8d
movl %r8d, 0x88(%rsp)
movslq %edx, %rdx
vpbroadcastd 0x2c(%rcx), %zmm11
movq %rdi, 0x30(%rsp)
movslq %edi, %rcx
movq %rcx, 0xc0(%rsp)
shlq $0x2, %rdx
movq %rdx, 0x48(%rsp)
movq $0x0, 0x10(%rsp)
vxorps %xmm12, %xmm12, %xmm12
vpmovsxbq 0x27560e(%rip), %zmm13 # 0x417878
vpmovsxbq 0x27560c(%rip), %zmm14 # 0x417880
vpmovsxbd 0x278202(%rip), %zmm15 # 0x41a480
vpternlogd $0xff, %zmm16, %zmm16, %zmm16
vxorps %xmm17, %xmm17, %xmm17
vpbroadcastq 0x2755d3(%rip), %zmm18 # 0x417868
vpbroadcastd 0x2778ed(%rip), %zmm19 # 0x419b8c
movw $0x400, %bx # imm = 0x400
movw $0x800, %r15w # imm = 0x800
movw $0x1000, %r12w # imm = 0x1000
movw $0x2000, %r13w # imm = 0x2000
movw $0x4000, %r14w # imm = 0x4000
movw $0x8000, %bp # imm = 0x8000
movl %esi, 0x98(%rsp)
vmovdqu64 %zmm11, 0x150(%rsp)
cmpl $0x0, 0x88(%rsp)
jle 0x1a2e2d
movq 0xc8(%rsp), %rdx
movq 0x10(%rsp), %rcx
imulq %rcx, %rdx
addq 0xd8(%rsp), %rdx
movq %rdx, 0x38(%rsp)
movq 0x18(%rsp), %rax
movq (%rax), %rdx
movq %rdx, 0x68(%rsp)
movq 0x48(%rax), %rdx
imulq 0x18(%rax), %rdx
imulq %rcx, %rdx
addq 0x8(%rax), %rdx
movq %rdx, 0x1a0(%rsp)
movl $0x0, 0x80(%rsp)
cmpl $0x0, 0x30(%rsp)
jle 0x1a2dee
movq 0xa8(%rsp), %rcx
movslq 0x2c(%rcx), %rdx
movq (%rcx), %rax
movq %rax, 0xb8(%rsp)
movq 0x10(%rcx), %rax
movq 0x40(%rcx), %rcx
imulq %rax, %rcx
movq %rcx, 0x60(%rsp)
movq 0x18(%rsp), %rcx
movq (%rcx), %rcx
movq %rcx, 0x50(%rsp)
movl 0x80(%rsp), %r8d
subl 0x19c(%rsp), %r8d
imulq %rax, %rdx
movq %rdx, 0x40(%rsp)
xorl %edx, %edx
movl %r8d, 0x24(%rsp)
movq 0x68(%rsp), %rax
movq -0x18(%rax), %rdi
movq 0x18(%rsp), %rax
cmpl $0x0, 0x10c(%rax,%rdi)
je 0x1a23b6
movq 0x1a8(%rax,%rdi), %rax
movq 0x10(%rsp), %rcx
vmovss (%rax,%rcx,4), %xmm4
jmp 0x1a23ba
vxorps %xmm4, %xmm4, %xmm4
movq %rdi, 0x28(%rsp)
cmpl $0x0, 0x78(%rsp)
movq %rdx, 0x70(%rsp)
jle 0x1a2bad
movq 0x50(%rsp), %rax
movq -0x18(%rax), %rax
movq 0x18(%rsp), %rcx
leaq (%rcx,%rax), %r9
leaq (%rcx,%rax), %rdi
addq $0xe4, %rdi
movl -0xc(%rdi), %r10d
movl %edx, %eax
subl 0x12c(%rsp), %eax
vpbroadcastd %eax, %zmm0
movq 0x1a0(%rsp), %rax
movq %rax, 0xa0(%rsp)
xorl %ecx, %ecx
movq %r9, 0x148(%rsp)
movl %r10d, 0x58(%rsp)
testl %r10d, %r10d
movq %rcx, 0x90(%rsp)
jle 0x1a2b82
movq 0x60(%rsp), %rax
imulq %rcx, %rax
addq 0xb8(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl 0xe0(%r9), %ecx
xorl %r11d, %r11d
movl %ecx, 0x130(%rsp)
movl %ecx, %eax
imull %r11d, %eax
addl %r8d, %eax
js 0x1a2b76
cltd
idivl 0xe8(%r9)
testl %edx, %edx
jne 0x1a2b76
cmpl %esi, %eax
jge 0x1a2b76
movslq 0xd4(%r9), %rcx
testq %rcx, %rcx
jle 0x1a2b6f
movslq %eax, %r10
imulq 0x40(%rsp), %r10
addq 0xb0(%rsp), %r10
movq %r9, %rdx
leal 0xf(%rcx), %r9d
andl $-0x10, %r9d
movq %rcx, %rax
decq %rax
vmovss %xmm4, %xmm12, %xmm7 # xmm7 = xmm4[0],xmm12[1,2,3]
vpbroadcastq %rax, %zmm1
vpbroadcastd 0xdc(%rdx), %zmm2
imull %r11d, %ecx
movq 0xa0(%rsp), %rax
leaq (%rax,%rcx,4), %r8
xorl %ecx, %ecx
vmovdqa64 %zmm14, %zmm3
vmovdqa64 %zmm13, %zmm5
vmovdqa64 %zmm15, %zmm6
vmovaps %zmm7, %zmm4
vpcmpleuq %zmm1, %zmm3, %k0
vpcmpleuq %zmm1, %zmm5, %k1
kunpckbw %k0, %k1, %k1
vpmulld %zmm6, %zmm2, %zmm7
vpaddd %zmm7, %zmm0, %zmm7
vpcmpgtd %zmm16, %zmm7, %k2 {%k1}
kmovd %k2, %eax
kmovq %k2, %k3
vxorps %xmm8, %xmm8, %xmm8
vpgatherdd (%rdi,%zmm17,4), %zmm8 {%k3}
testb $0x1, %al
je 0x1a2539
vmovd %xmm7, %eax
vmovd %xmm8, %esi
cltd
idivl %esi
vmovd %edx, %xmm9
kmovd %k2, %eax
testb $0x2, %al
je 0x1a255d
vpextrd $0x1, %xmm7, %eax
vpextrd $0x1, %xmm8, %esi
cltd
idivl %esi
vpinsrd $0x1, %edx, %xmm9, %xmm10
vinserti32x4 $0x0, %xmm10, %zmm9, %zmm9
kmovd %k2, %eax
testb $0x4, %al
je 0x1a2581
vpextrd $0x2, %xmm7, %eax
vpextrd $0x2, %xmm8, %esi
cltd
idivl %esi
vpinsrd $0x2, %edx, %xmm9, %xmm10
vinserti32x4 $0x0, %xmm10, %zmm9, %zmm9
kmovd %k2, %eax
testb $0x8, %al
je 0x1a25a5
vpextrd $0x3, %xmm7, %eax
vpextrd $0x3, %xmm8, %esi
cltd
idivl %esi
vpinsrd $0x3, %edx, %xmm9, %xmm10
vinserti32x4 $0x0, %xmm10, %zmm9, %zmm9
kmovd %k2, %eax
testb $0x10, %al
je 0x1a25d2
vextracti128 $0x1, %ymm7, %xmm10
vmovd %xmm10, %eax
vextracti128 $0x1, %ymm8, %xmm10
vmovd %xmm10, %esi
cltd
idivl %esi
movw $0x10, %ax
kmovd %eax, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testb $0x20, %al
je 0x1a2603
vextracti128 $0x1, %ymm7, %xmm10
vpextrd $0x1, %xmm10, %eax
vextracti128 $0x1, %ymm8, %xmm10
vpextrd $0x1, %xmm10, %esi
cltd
idivl %esi
movw $0x20, %ax
kmovd %eax, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testb $0x40, %al
je 0x1a2634
vextracti128 $0x1, %ymm7, %xmm10
vpextrd $0x2, %xmm10, %eax
vextracti128 $0x1, %ymm8, %xmm10
vpextrd $0x2, %xmm10, %esi
cltd
idivl %esi
movw $0x40, %ax
kmovd %eax, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testb $-0x80, %al
je 0x1a2665
vextracti128 $0x1, %ymm7, %xmm10
vpextrd $0x3, %xmm10, %eax
vextracti128 $0x1, %ymm8, %xmm10
vpextrd $0x3, %xmm10, %esi
cltd
idivl %esi
movw $0x80, %ax
kmovd %eax, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x100, %eax # imm = 0x100
je 0x1a2697
vextracti32x4 $0x2, %zmm7, %xmm10
vmovd %xmm10, %eax
vextracti32x4 $0x2, %zmm8, %xmm10
vmovd %xmm10, %esi
cltd
idivl %esi
movw $0x100, %ax # imm = 0x100
kmovd %eax, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x200, %eax # imm = 0x200
je 0x1a26cd
vextracti32x4 $0x2, %zmm7, %xmm10
vpextrd $0x1, %xmm10, %eax
vextracti32x4 $0x2, %zmm8, %xmm10
vpextrd $0x1, %xmm10, %esi
cltd
idivl %esi
movw $0x200, %ax # imm = 0x200
kmovd %eax, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x400, %eax # imm = 0x400
je 0x1a26ff
vextracti32x4 $0x2, %zmm7, %xmm10
vpextrd $0x2, %xmm10, %eax
vextracti32x4 $0x2, %zmm8, %xmm10
vpextrd $0x2, %xmm10, %esi
cltd
idivl %esi
kmovd %ebx, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x800, %eax # imm = 0x800
je 0x1a2732
vextracti32x4 $0x2, %zmm7, %xmm10
vpextrd $0x3, %xmm10, %eax
vextracti32x4 $0x2, %zmm8, %xmm10
vpextrd $0x3, %xmm10, %esi
cltd
idivl %esi
kmovd %r15d, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x1000, %eax # imm = 0x1000
je 0x1a2761
vextracti32x4 $0x3, %zmm7, %xmm10
vmovd %xmm10, %eax
vextracti32x4 $0x3, %zmm8, %xmm10
vmovd %xmm10, %esi
cltd
idivl %esi
kmovd %r12d, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x2000, %eax # imm = 0x2000
je 0x1a2794
vextracti32x4 $0x3, %zmm7, %xmm10
vpextrd $0x1, %xmm10, %eax
vextracti32x4 $0x3, %zmm8, %xmm10
vpextrd $0x1, %xmm10, %esi
cltd
idivl %esi
kmovd %r13d, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x4000, %eax # imm = 0x4000
je 0x1a27c7
vextracti32x4 $0x3, %zmm7, %xmm10
vpextrd $0x2, %xmm10, %eax
vextracti32x4 $0x3, %zmm8, %xmm10
vpextrd $0x2, %xmm10, %esi
cltd
idivl %esi
kmovd %r14d, %k3
vpbroadcastd %edx, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x8000, %eax # imm = 0x8000
je 0x1a27f9
vextracti32x4 $0x3, %zmm7, %xmm10
vpextrd $0x3, %xmm10, %eax
vextracti32x4 $0x3, %zmm8, %xmm10
vpextrd $0x3, %xmm10, %esi
cltd
idivl %esi
kmovd %ebp, %k3
vpbroadcastd %edx, %zmm9 {%k3}
vptestnmd %zmm9, %zmm9, %k2 {%k2}
kmovd %k2, %eax
testb $0x1, %al
je 0x1a2816
vmovd %xmm7, %eax
vmovd %xmm8, %esi
cltd
idivl %esi
vmovd %eax, %xmm9
kmovd %k2, %eax
testb $0x2, %al
je 0x1a283a
vpextrd $0x1, %xmm7, %eax
vpextrd $0x1, %xmm8, %esi
cltd
idivl %esi
vpinsrd $0x1, %eax, %xmm9, %xmm10
vinserti32x4 $0x0, %xmm10, %zmm9, %zmm9
kmovd %k2, %eax
testb $0x4, %al
je 0x1a285e
vpextrd $0x2, %xmm7, %eax
vpextrd $0x2, %xmm8, %esi
cltd
idivl %esi
vpinsrd $0x2, %eax, %xmm9, %xmm10
vinserti32x4 $0x0, %xmm10, %zmm9, %zmm9
kmovd %k2, %eax
testb $0x8, %al
je 0x1a2882
vpextrd $0x3, %xmm7, %eax
vpextrd $0x3, %xmm8, %esi
cltd
idivl %esi
vpinsrd $0x3, %eax, %xmm9, %xmm10
vinserti32x4 $0x0, %xmm10, %zmm9, %zmm9
kmovd %k2, %eax
testb $0x10, %al
je 0x1a28af
vextracti128 $0x1, %ymm7, %xmm10
vmovd %xmm10, %eax
vextracti128 $0x1, %ymm8, %xmm10
vmovd %xmm10, %esi
cltd
idivl %esi
movw $0x10, %dx
kmovd %edx, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testb $0x20, %al
je 0x1a28e0
vextracti128 $0x1, %ymm7, %xmm10
vpextrd $0x1, %xmm10, %eax
vextracti128 $0x1, %ymm8, %xmm10
vpextrd $0x1, %xmm10, %esi
cltd
idivl %esi
movw $0x20, %dx
kmovd %edx, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testb $0x40, %al
je 0x1a2911
vextracti128 $0x1, %ymm7, %xmm10
vpextrd $0x2, %xmm10, %eax
vextracti128 $0x1, %ymm8, %xmm10
vpextrd $0x2, %xmm10, %esi
cltd
idivl %esi
movw $0x40, %dx
kmovd %edx, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testb $-0x80, %al
je 0x1a2942
vextracti128 $0x1, %ymm7, %xmm10
vpextrd $0x3, %xmm10, %eax
vextracti128 $0x1, %ymm8, %xmm10
vpextrd $0x3, %xmm10, %esi
cltd
idivl %esi
movw $0x80, %dx
kmovd %edx, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x100, %eax # imm = 0x100
je 0x1a2974
vextracti32x4 $0x2, %zmm7, %xmm10
vmovd %xmm10, %eax
vextracti32x4 $0x2, %zmm8, %xmm10
vmovd %xmm10, %esi
cltd
idivl %esi
movw $0x100, %dx # imm = 0x100
kmovd %edx, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x200, %eax # imm = 0x200
je 0x1a29aa
vextracti32x4 $0x2, %zmm7, %xmm10
vpextrd $0x1, %xmm10, %eax
vextracti32x4 $0x2, %zmm8, %xmm10
vpextrd $0x1, %xmm10, %esi
cltd
idivl %esi
movw $0x200, %dx # imm = 0x200
kmovd %edx, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x400, %eax # imm = 0x400
je 0x1a29dc
vextracti32x4 $0x2, %zmm7, %xmm10
vpextrd $0x2, %xmm10, %eax
vextracti32x4 $0x2, %zmm8, %xmm10
vpextrd $0x2, %xmm10, %esi
cltd
idivl %esi
kmovd %ebx, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x800, %eax # imm = 0x800
je 0x1a2a0f
vextracti32x4 $0x2, %zmm7, %xmm10
vpextrd $0x3, %xmm10, %eax
vextracti32x4 $0x2, %zmm8, %xmm10
vpextrd $0x3, %xmm10, %esi
cltd
idivl %esi
kmovd %r15d, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x1000, %eax # imm = 0x1000
je 0x1a2a3e
vextracti32x4 $0x3, %zmm7, %xmm10
vmovd %xmm10, %eax
vextracti32x4 $0x3, %zmm8, %xmm10
vmovd %xmm10, %esi
cltd
idivl %esi
kmovd %r12d, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x2000, %eax # imm = 0x2000
je 0x1a2a71
vextracti32x4 $0x3, %zmm7, %xmm10
vpextrd $0x1, %xmm10, %eax
vextracti32x4 $0x3, %zmm8, %xmm10
vpextrd $0x1, %xmm10, %esi
cltd
idivl %esi
kmovd %r13d, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x4000, %eax # imm = 0x4000
je 0x1a2aa4
vextracti32x4 $0x3, %zmm7, %xmm10
vpextrd $0x2, %xmm10, %eax
vextracti32x4 $0x3, %zmm8, %xmm10
vpextrd $0x2, %xmm10, %esi
cltd
idivl %esi
kmovd %r14d, %k3
vpbroadcastd %eax, %zmm9 {%k3}
kmovd %k2, %eax
testl $0x8000, %eax # imm = 0x8000
je 0x1a2ad6
vextracti32x4 $0x3, %zmm7, %xmm7
vpextrd $0x3, %xmm7, %eax
vextracti32x4 $0x3, %zmm8, %xmm7
vpextrd $0x3, %xmm7, %esi
cltd
idivl %esi
kmovd %ebp, %k3
vpbroadcastd %eax, %zmm9 {%k3}
vpcmpgtd %zmm9, %zmm11, %k3 {%k2}
kmovq %k3, %k4
vpxor %xmm8, %xmm8, %xmm8
vgatherdps (%r10,%zmm9,4), %zmm8 {%k4}
vmovups (%r8,%rcx,4), %zmm10 {%k3} {z}
vpcmpnltd %zmm11, %zmm9, %k2 {%k2}
vmovaps %zmm4, %zmm7
vfmadd231ps %zmm8, %zmm10, %zmm7 {%k3} # zmm7 {%k3} = (zmm10 * zmm8) + zmm7
vmovaps %zmm4, %zmm7 {%k2}
addq $0x10, %rcx
vpaddq %zmm18, %zmm3, %zmm3
vpaddq %zmm18, %zmm5, %zmm5
vpaddd %zmm19, %zmm6, %zmm6
cmpq %rcx, %r9
jne 0x1a24e7
vmovaps %zmm7, %zmm4 {%k1}
vextractf64x4 $0x1, %zmm4, %ymm1
vaddps %zmm1, %zmm4, %zmm1
vextractf128 $0x1, %ymm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vaddps %xmm2, %xmm1, %xmm1
vhaddps %xmm1, %xmm1, %xmm4
movl 0x98(%rsp), %esi
movl 0x24(%rsp), %r8d
movq 0x148(%rsp), %r9
movl 0x58(%rsp), %r10d
movl 0x130(%rsp), %ecx
incl %r11d
cmpl %r10d, %r11d
jne 0x1a2459
movq 0x90(%rsp), %rcx
incq %rcx
movq 0xa0(%rsp), %rax
addq 0x48(%rsp), %rax
movq %rax, 0xa0(%rsp)
cmpq 0x78(%rsp), %rcx
jne 0x1a241e
movq 0x18(%rsp), %rax
movq 0x28(%rsp), %rdi
movl 0x114(%rax,%rdi), %eax
decl %eax
cmpl $0x5, %eax
ja 0x1a2dc0
leaq 0x277200(%rip), %rcx # 0x419dd0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movq 0x70(%rsp), %rdx
jmpq *%rax
vmaxss 0x2742a2(%rip), %xmm4, %xmm0 # 0x416e88
jmp 0x1a2dc9
vmovaps %xmm4, %xmm0
vmovaps %xmm4, 0x130(%rsp)
vzeroupper
callq 0x563e0
vaddss 0x272110(%rip), %xmm0, %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x70(%rsp), %rdx
movl 0x24(%rsp), %r8d
vpbroadcastd 0x276f66(%rip), %zmm19 # 0x419b8c
vpbroadcastq 0x274c38(%rip), %zmm18 # 0x417868
vxorps %xmm17, %xmm17, %xmm17
vpternlogd $0xff, %zmm16, %zmm16, %zmm16
vpmovsxbd 0x277839(%rip), %zmm15 # 0x41a480
vpmovsxbq 0x274c2f(%rip), %zmm14 # 0x417880
vpmovsxbq 0x274c1d(%rip), %zmm13 # 0x417878
vxorps %xmm12, %xmm12, %xmm12
vmovdqu64 0x150(%rsp), %zmm11
movl 0x98(%rsp), %esi
vmulss 0x130(%rsp), %xmm0, %xmm0
jmp 0x1a2dc9
movq 0x18(%rsp), %rax
movq 0x118(%rax,%rdi), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
jbe 0x1a2dc9
vmovaps %xmm1, %xmm0
jmp 0x1a2dc9
vmovss 0x2743a7(%rip), %xmm2 # 0x417058
vminss %xmm2, %xmm4, %xmm1
vxorps 0x274395(%rip){1to4}, %xmm1, %xmm0 # 0x417054
vcmpltss 0x274392(%rip), %xmm1, %k1 # 0x41705c
vmovss %xmm2, %xmm0, %xmm0 {%k1}
vzeroupper
callq 0x563e0
movq 0x70(%rsp), %rdx
movl 0x24(%rsp), %r8d
vpbroadcastd 0x276ea0(%rip), %zmm19 # 0x419b8c
vpbroadcastq 0x274b72(%rip), %zmm18 # 0x417868
vxorps %xmm17, %xmm17, %xmm17
vpternlogd $0xff, %zmm16, %zmm16, %zmm16
vpmovsxbd 0x277773(%rip), %zmm15 # 0x41a480
vpmovsxbq 0x274b69(%rip), %zmm14 # 0x417880
vpmovsxbq 0x274b57(%rip), %zmm13 # 0x417878
vxorps %xmm12, %xmm12, %xmm12
vmovdqu64 0x150(%rsp), %zmm11
movl 0x98(%rsp), %esi
vmovss 0x271fd8(%rip), %xmm1 # 0x414d18
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x1a2dc9
movq 0x18(%rsp), %rax
movq 0x118(%rax,%rdi), %rax
vmovss (%rax), %xmm0
vcmpgtss 0x274122(%rip), %xmm4, %k1 # 0x416e88
vmovss 0x271fa8(%rip), %xmm0 {%k1} # 0x414d18
vmulss %xmm4, %xmm0, %xmm0
jmp 0x1a2dc9
movq 0x18(%rsp), %rax
movq 0x118(%rax,%rdi), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vxorps 0x2742be(%rip){1to4}, %xmm2, %xmm0 # 0x417054
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
jb 0x1a2dc9
vmovss 0x271f6c(%rip), %xmm0 # 0x414d18
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x1a2de3
vmovaps %xmm4, %xmm0
jmp 0x1a2dc9
vmovaps %xmm4, %xmm0
movq 0x70(%rsp), %rdx
movq 0x38(%rsp), %rax
vmovss %xmm0, (%rax,%rdx,4)
incq %rdx
cmpq 0x30(%rsp), %rdx
jne 0x1a238a
jmp 0x1a2dee
vfmadd213ss %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm2
vmulss %xmm4, %xmm1, %xmm0
jmp 0x1a2dc9
movq 0xc0(%rsp), %rax
movq 0x38(%rsp), %rcx
leaq (%rcx,%rax,4), %rcx
movq %rcx, 0x38(%rsp)
movl 0x80(%rsp), %eax
incl %eax
movl %eax, 0x80(%rsp)
cmpl 0x88(%rsp), %eax
jne 0x1a232a
movq 0xd0(%rsp), %rax
movq -0x18(%rax), %rax
movq 0x10(%rsp), %rdi
incq %rdi
movq 0x18(%rsp), %rcx
movslq 0xd0(%rcx,%rax), %rcx
movq %rdi, 0x10(%rsp)
cmpq %rcx, %rdi
jl 0x1a22cd
movq 0x18(%rsp), %rdi
movq (%rdi), %rdx
addq -0x18(%rdx), %rdi
leaq 0xe0(%rsp), %rsi
movq 0x200(%rsp), %rbx
movq %rbx, %rdx
movq 0x1f0(%rsp), %rcx
vzeroupper
callq 0x198c74
cmpq $0x0, (%rbx)
je 0x1a2e96
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
xorl %r13d, %r13d
testq %rax, %rax
jne 0x1a2e9c
movl $0xffffff9c, %r13d # imm = 0xFFFFFF9C
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x1a2ed8
lock
decl (%rax)
jne 0x1a2ed8
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
je 0x1a2ecb
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1a2ed8
testq %rsi, %rsi
je 0x1a2ed8
movq %rsi, %rdi
callq 0x563b0
movl %r13d, %eax
addq $0x208, %rsp # imm = 0x208
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x1a2f38
jmp 0x1a2ef1
movq %rax, %rbx
movq 0xe8(%rsp), %rax
testq %rax, %rax
je 0x1a2f30
lock
decl (%rax)
jne 0x1a2f30
movq 0xe0(%rsp), %rsi
movq 0x100(%rsp), %rdi
testq %rdi, %rdi
jne 0x1a2f2a
testq %rsi, %rsi
je 0x1a2f30
movq %rsi, %rdi
callq 0x563b0
jmp 0x1a2f30
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/build_O3/src/layer/x86/deconvolution_x86_avx512.cpp
|
virtual thunk to ncnn::Deconvolution_x86_fma::create_pipeline(ncnn::Option const&)
|
int Deconvolution_x86_fma::create_pipeline(const Option& opt)
{
const int maxk = kernel_w * kernel_h;
int num_input = weight_data_size / maxk / num_output;
Mat weight_data_transposed(weight_data.w);
{
float* pt = weight_data_transposed;
const float* p = weight_data;
for (int i = 0; i < num_input * num_output; i++)
{
for (int k = 0; k < maxk; k++)
{
pt[maxk - 1 - k] = p[k];
}
p += maxk;
pt += maxk;
}
}
int elempack = 1;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX512F__
elempack = num_input % 16 == 0 ? 16 : num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#elif __AVX__
elempack = num_input % 8 == 0 ? 8 : num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
elempack = num_input % 4 == 0 ? 4 : 1;
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
// src = kw-kh-inch-outch
// dst = pb-pa-kw-kh-inch/pa-outch/pb
{
Mat weight_data_r2 = weight_data_transposed.reshape(maxk, num_input, num_output);
weight_data_tm.create(maxk, num_input / elempack, num_output / out_elempack, (size_t)4u * elempack * out_elempack, elempack * out_elempack);
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
{
float* g00 = weight_data_tm.channel(q / out_elempack);
for (int p = 0; p + (elempack - 1) < num_input; p += elempack)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < elempack; i++)
{
for (int j = 0; j < out_elempack; j++)
{
const float* k00 = weight_data_r2.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
|
pushq %rax
movq (%rdi), %rax
addq -0x30(%rax), %rdi
callq 0x1a2fd4
xorl %eax, %eax
popq %rcx
retq
nop
|
/ysh329[P]ncnn/build_O3/src/layer/x86/deconvolution_x86_fma.cpp
|
ncnn::Dropout_x86_avx::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int Dropout_x86_avx::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
if (scale == 1.f)
{
return 0;
}
#if __SSE2__
int dims = bottom_top_blob.dims;
int elempack = bottom_top_blob.elempack;
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
Mat tmp;
convert_packing(bottom_top_blob, tmp, 8, opt);
forward_inplace(tmp, opt);
convert_packing(tmp, bottom_top_blob, 16, opt);
return 0;
}
#endif // __AVX512F__
if (elempack == 8)
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
int size = w * h;
__m256 _scale = _mm256_set1_ps(scale);
if (dims == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
float* ptr = (float*)bottom_top_blob + i * 8;
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_mul_ps(_p, _scale);
_mm256_storeu_ps(ptr, _p);
}
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
float* ptr = bottom_top_blob.row(i);
for (int j = 0; j < w; j++)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_mul_ps(_p, _scale);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
}
}
if (dims == 3)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
for (int i = 0; i < size; i++)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_mul_ps(_p, _scale);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
}
}
return 0;
}
#endif // __AVX__
if (elempack == 4)
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
int size = w * h;
__m128 _scale = _mm_set1_ps(scale);
if (dims == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < w; i++)
{
float* ptr = (float*)bottom_top_blob + i * 4;
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_mul_ps(_p, _scale);
_mm_storeu_ps(ptr, _p);
}
}
if (dims == 2)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
float* ptr = bottom_top_blob.row(i);
for (int j = 0; j < w; j++)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_mul_ps(_p, _scale);
_mm_storeu_ps(ptr, _p);
ptr += 4;
}
}
}
if (dims == 3)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
for (int i = 0; i < size; i++)
{
__m128 _p = _mm_loadu_ps(ptr);
_p = _mm_mul_ps(_p, _scale);
_mm_storeu_ps(ptr, _p);
ptr += 4;
}
}
}
return 0;
}
#endif // __SSE2__
return Dropout::forward_inplace(bottom_top_blob, opt);
}
|
movq (%rdi), %rax
movq -0x18(%rax), %rax
vmovss 0xd0(%rdi,%rax), %xmm0
vmovss 0x2684e4(%rip), %xmm1 # 0x414d18
vucomiss %xmm0, %xmm1
jne 0x1ac840
xorl %eax, %eax
vzeroupper
retq
movl 0x18(%rsi), %ecx
movl 0x28(%rsi), %r8d
cmpl $0x4, %ecx
je 0x1ac89e
cmpl $0x8, %ecx
jne 0x1ac8ec
movl 0x2c(%rsi), %ecx
movl 0x30(%rsi), %eax
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
cmpl $0x3, %r8d
je 0x1ac972
cmpl $0x2, %r8d
je 0x1ac8f4
cmpl $0x1, %r8d
jne 0x1ac83a
testl %ecx, %ecx
jle 0x1ac83a
shlq $0x5, %rcx
xorl %eax, %eax
movq (%rsi), %rdx
vmulps (%rdx,%rax), %ymm0, %ymm1
vmovups %ymm1, (%rdx,%rax)
addq $0x20, %rax
cmpq %rax, %rcx
jne 0x1ac886
jmp 0x1ac83a
movl 0x2c(%rsi), %ecx
movl 0x30(%rsi), %eax
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
cmpl $0x3, %r8d
je 0x1ac9b7
cmpl $0x2, %r8d
je 0x1ac933
cmpl $0x1, %r8d
jne 0x1ac83a
testl %ecx, %ecx
jle 0x1ac83a
shlq $0x4, %rcx
xorl %eax, %eax
movq (%rsi), %rdx
vmulps (%rdx,%rax), %xmm0, %xmm1
vmovups %xmm1, (%rdx,%rax)
addq $0x10, %rax
cmpq %rax, %rcx
jne 0x1ac8d1
jmp 0x1ac83a
addq %rax, %rdi
jmp 0x1ac092
testl %eax, %eax
jle 0x1ac83a
xorl %edx, %edx
testl %ecx, %ecx
jle 0x1ac926
movslq 0x2c(%rsi), %rdi
imulq %rdx, %rdi
imulq 0x10(%rsi), %rdi
addq (%rsi), %rdi
movl %ecx, %r8d
vmulps (%rdi), %ymm0, %ymm1
vmovups %ymm1, (%rdi)
addq $0x20, %rdi
decl %r8d
jne 0x1ac915
incq %rdx
cmpq %rax, %rdx
jne 0x1ac8fe
jmp 0x1ac83a
testl %eax, %eax
jle 0x1ac83a
xorl %edx, %edx
testl %ecx, %ecx
jle 0x1ac965
movslq 0x2c(%rsi), %rdi
imulq %rdx, %rdi
imulq 0x10(%rsi), %rdi
addq (%rsi), %rdi
movl %ecx, %r8d
vmulps (%rdi), %xmm0, %xmm1
vmovups %xmm1, (%rdi)
addq $0x10, %rdi
decl %r8d
jne 0x1ac954
incq %rdx
cmpq %rax, %rdx
jne 0x1ac93d
jmp 0x1ac83a
movl 0x38(%rsi), %edx
testl %edx, %edx
jle 0x1ac83a
imull %ecx, %eax
xorl %ecx, %ecx
testl %eax, %eax
jle 0x1ac9aa
movq 0x40(%rsi), %rdi
imulq %rcx, %rdi
imulq 0x10(%rsi), %rdi
addq (%rsi), %rdi
movl %eax, %r8d
vmulps (%rdi), %ymm0, %ymm1
vmovups %ymm1, (%rdi)
addq $0x20, %rdi
decl %r8d
jne 0x1ac999
incq %rcx
cmpq %rdx, %rcx
jne 0x1ac982
jmp 0x1ac83a
movl 0x38(%rsi), %edx
testl %edx, %edx
jle 0x1ac83a
imull %ecx, %eax
xorl %ecx, %ecx
testl %eax, %eax
jle 0x1ac9ef
movq 0x40(%rsi), %rdi
imulq %rcx, %rdi
imulq 0x10(%rsi), %rdi
addq (%rsi), %rdi
movl %eax, %r8d
vmulps (%rdi), %xmm0, %xmm1
vmovups %xmm1, (%rdi)
addq $0x10, %rdi
decl %r8d
jne 0x1ac9de
incq %rcx
cmpq %rdx, %rcx
jne 0x1ac9c7
jmp 0x1ac83a
|
/ysh329[P]ncnn/build_O3/src/layer/x86/dropout_x86_avx.cpp
|
ncnn::ELU_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int ELU_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
int size = w * h * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _alpha512 = _mm512_set1_ps(alpha);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_mm512_storeu_ps(ptr, elu_avx512(_p, _alpha512));
ptr += 16;
}
#endif // __AVX512F__
__m256 _alpha256 = _mm256_set1_ps(alpha);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_mm256_storeu_ps(ptr, elu_avx(_p, _alpha256));
ptr += 8;
}
#endif // __AVX__
__m128 _alpha128 = _mm_set1_ps(alpha);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
_mm_store_ps(ptr, elu_sse(_p, _alpha128));
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
if (*ptr < 0.f)
*ptr = static_cast<float>(alpha * (exp(*ptr) - 1.f));
ptr++;
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rdi, 0x8(%rsp)
movq %rsi, 0x18(%rsp)
movslq 0x38(%rsi), %rax
movq %rax, 0x20(%rsp)
testq %rax, %rax
jle 0x1afb2d
movq 0x18(%rsp), %rax
movl 0x30(%rax), %ebp
imull 0x2c(%rax), %ebp
imull 0x18(%rax), %ebp
movq 0x8(%rsp), %rax
movq (%rax), %r12
movl %ebp, %eax
andl $-0x4, %eax
movl %eax, 0x14(%rsp)
xorl %edx, %edx
xorps %xmm6, %xmm6
movaps 0x267565(%rip), %xmm7 # 0x416ea0
movaps 0x26756d(%rip), %xmm8 # 0x416eb0
movaps 0x267575(%rip), %xmm9 # 0x416ec0
movaps 0x26757d(%rip), %xmm10 # 0x416ed0
movaps 0x267585(%rip), %xmm11 # 0x416ee0
movaps 0x2681bd(%rip), %xmm12 # 0x417b20
movaps 0x267595(%rip), %xmm13 # 0x416f00
movaps 0x26759d(%rip), %xmm14 # 0x416f10
movaps 0x2675a5(%rip), %xmm15 # 0x416f20
movaps 0x2675ae(%rip), %xmm1 # 0x416f30
movq 0x18(%rsp), %rax
movq 0x40(%rax), %r15
imulq %rdx, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
cmpl $0x4, %ebp
movq %rdx, 0x28(%rsp)
jl 0x1afa79
movq -0x18(%r12), %rax
movq 0x8(%rsp), %rcx
movss 0xd0(%rcx,%rax), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x30(%rsp)
movl $0x3, %eax
movaps %xmm1, %xmm0
movaps (%r15), %xmm4
movaps %xmm4, %xmm1
minps %xmm6, %xmm4
minps %xmm7, %xmm4
maxps %xmm8, %xmm4
movaps %xmm4, %xmm3
mulps %xmm9, %xmm3
addps %xmm10, %xmm3
cvttps2dq %xmm3, %xmm2
cvtdq2ps %xmm2, %xmm2
cmpltps %xmm2, %xmm3
andps %xmm11, %xmm3
subps %xmm3, %xmm2
movaps %xmm2, %xmm3
mulps %xmm12, %xmm3
addps %xmm4, %xmm3
movaps %xmm3, %xmm4
mulps %xmm3, %xmm4
movaps %xmm3, %xmm5
mulps %xmm13, %xmm5
addps %xmm14, %xmm5
mulps %xmm3, %xmm5
addps %xmm15, %xmm5
mulps %xmm3, %xmm5
addps %xmm0, %xmm5
mulps %xmm3, %xmm5
addps 0x267517(%rip), %xmm5 # 0x416f40
mulps %xmm3, %xmm5
addps %xmm10, %xmm5
mulps %xmm4, %xmm5
addps %xmm11, %xmm3
addps %xmm5, %xmm3
cvttps2dq %xmm2, %xmm2
pslld $0x17, %xmm2
paddd %xmm11, %xmm2
mulps %xmm3, %xmm2
maxps %xmm6, %xmm1
addps 0x26753b(%rip), %xmm2 # 0x416f90
mulps 0x30(%rsp), %xmm2
addps %xmm1, %xmm2
movaps %xmm2, (%r15)
addq $0x10, %r15
addl $0x4, %eax
cmpl %ebp, %eax
jl 0x1af9c9
movl 0x14(%rsp), %eax
movaps %xmm0, %xmm1
jmp 0x1afa7b
xorl %eax, %eax
movl %ebp, %ebx
subl %eax, %ebx
jle 0x1afb1a
xorl %r13d, %r13d
movss (%r15,%r13,4), %xmm0
xorps %xmm3, %xmm3
ucomiss %xmm0, %xmm3
jbe 0x1afb0e
movq -0x18(%r12), %r14
callq 0x563e0
movaps 0x267489(%rip), %xmm1 # 0x416f30
movaps 0x267471(%rip), %xmm15 # 0x416f20
movaps 0x267459(%rip), %xmm14 # 0x416f10
movaps 0x267441(%rip), %xmm13 # 0x416f00
movaps 0x268059(%rip), %xmm12 # 0x417b20
movaps 0x267411(%rip), %xmm11 # 0x416ee0
movaps 0x2673f9(%rip), %xmm10 # 0x416ed0
movaps 0x2673e1(%rip), %xmm9 # 0x416ec0
movaps 0x2673c9(%rip), %xmm8 # 0x416eb0
movaps 0x2673b2(%rip), %xmm7 # 0x416ea0
xorps %xmm6, %xmm6
addss 0x267597(%rip), %xmm0 # 0x417090
movq 0x8(%rsp), %rax
mulss 0xd0(%rax,%r14), %xmm0
movss %xmm0, (%r15,%r13,4)
incq %r13
cmpl %r13d, %ebx
jne 0x1afa88
movq 0x28(%rsp), %rdx
incq %rdx
cmpq 0x20(%rsp), %rdx
jne 0x1af982
xorl %eax, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/ysh329[P]ncnn/src/layer/x86/elu_x86.cpp
|
ncnn::Flatten_x86_avx::forward_int8(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Flatten_x86_avx::forward_int8(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int dims = bottom_blob.dims;
if (dims == 1)
{
top_blob = bottom_blob;
return 0;
}
int w = bottom_blob.w;
int h = bottom_blob.h;
int d = bottom_blob.d;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int size = w * h * d;
int total = size * channels * elempack;
int out_elempack = 1;
if (opt.use_packing_layout)
{
out_elempack = total % 8 == 0 ? 8 : 1;
}
size_t out_elemsize = elemsize / elempack * out_elempack;
if (out_elempack == 1)
{
return Flatten::forward(bottom_blob, top_blob, opt);
}
if (dims == 2 && elempack == 1) // out_elempack == 8
{
top_blob = bottom_blob;
top_blob.dims = 1;
top_blob.w = total / out_elempack;
top_blob.h = 1;
top_blob.cstep = top_blob.w;
top_blob.elemsize = out_elemsize;
top_blob.elempack = out_elempack;
return 0;
}
top_blob.create(total / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
if (dims == 2)
{
if (elempack == 8) // out_elempack == 8
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < h; i++)
{
const signed char* ptr = bottom_blob.row<const signed char>(i);
signed char* outptr0 = (signed char*)top_blob + w * i * 8;
signed char* outptr1 = (signed char*)top_blob + w * (i * 8 + 1);
signed char* outptr2 = (signed char*)top_blob + w * (i * 8 + 2);
signed char* outptr3 = (signed char*)top_blob + w * (i * 8 + 3);
signed char* outptr4 = (signed char*)top_blob + w * (i * 8 + 4);
signed char* outptr5 = (signed char*)top_blob + w * (i * 8 + 5);
signed char* outptr6 = (signed char*)top_blob + w * (i * 8 + 6);
signed char* outptr7 = (signed char*)top_blob + w * (i * 8 + 7);
int j = 0;
for (; j < w; j++)
{
*outptr0++ = ptr[0];
*outptr1++ = ptr[1];
*outptr2++ = ptr[2];
*outptr3++ = ptr[3];
*outptr4++ = ptr[4];
*outptr5++ = ptr[5];
*outptr6++ = ptr[6];
*outptr7++ = ptr[7];
ptr += 8;
}
}
}
}
if (dims == 3 || dims == 4)
{
if (elempack == 8) // out_elempack == 8
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const signed char* ptr = bottom_blob.channel(q);
signed char* outptr0 = (signed char*)top_blob + size * q * 8;
signed char* outptr1 = (signed char*)top_blob + size * (q * 8 + 1);
signed char* outptr2 = (signed char*)top_blob + size * (q * 8 + 2);
signed char* outptr3 = (signed char*)top_blob + size * (q * 8 + 3);
signed char* outptr4 = (signed char*)top_blob + size * (q * 8 + 4);
signed char* outptr5 = (signed char*)top_blob + size * (q * 8 + 5);
signed char* outptr6 = (signed char*)top_blob + size * (q * 8 + 6);
signed char* outptr7 = (signed char*)top_blob + size * (q * 8 + 7);
int i = 0;
for (; i < size; i++)
{
*outptr0++ = ptr[0];
*outptr1++ = ptr[1];
*outptr2++ = ptr[2];
*outptr3++ = ptr[3];
*outptr4++ = ptr[4];
*outptr5++ = ptr[5];
*outptr6++ = ptr[6];
*outptr7++ = ptr[7];
ptr += 8;
}
}
}
if (elempack == 1) // out_elempack == 8
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const signed char* ptr = bottom_blob.channel(q);
signed char* outptr = (signed char*)top_blob + size * q;
int i = 0;
for (; i < size; i++)
{
*outptr++ = *ptr++;
}
}
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, %r9
movq %rsi, %r14
movl 0x28(%rsi), %r12d
cmpl $0x1, %r12d
jne 0x1b685b
xorl %ebp, %ebp
cmpq %r14, %r9
je 0x1b6d48
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x1b6827
lock
incl (%rax)
movq 0x8(%r9), %rax
testq %rax, %rax
je 0x1b6c70
lock
decl (%rax)
jne 0x1b6c70
movq (%r9), %rsi
movq 0x20(%r9), %rdi
testq %rdi, %rdi
je 0x1b6c5d
movq (%rdi), %rax
movq %r9, %rbx
callq *0x18(%rax)
jmp 0x1b6c6d
movq %rcx, %r8
movslq 0x2c(%r14), %r13
movl 0x30(%r14), %edx
movl 0x38(%r14), %ecx
movq 0x10(%r14), %rax
movq %rdx, (%rsp)
imull %r13d, %edx
imull 0x34(%r14), %edx
movslq 0x18(%r14), %r10
movq %rcx, 0x18(%rsp)
movl %ecx, %ebp
imull %r10d, %ebp
movq %rdx, 0x10(%rsp)
imull %edx, %ebp
testb $0x7, %bpl
sete %sil
xorl %edx, %edx
divq %r10
movq %rax, %r15
movb 0x27(%r8), %al
xorl %ecx, %ecx
testb %sil, %al
setne %cl
leal (%rcx,%rcx,2), %ecx
shlq %cl, %r15
testb %sil, %al
je 0x1b6912
movl %r12d, %ebx
xorl $0x2, %ebx
movl %r10d, %eax
xorl $0x1, %eax
orl %ebx, %eax
jne 0x1b6935
cmpq %r14, %r9
je 0x1b6d1f
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x1b68de
lock
incl (%rax)
movq 0x8(%r9), %rax
testq %rax, %rax
je 0x1b6ce8
lock
decl (%rax)
jne 0x1b6ce8
movq (%r9), %rsi
movq 0x20(%r9), %rdi
testq %rdi, %rdi
je 0x1b6cd5
movq (%rdi), %rax
movq %r9, %rbx
callq *0x18(%rax)
jmp 0x1b6ce5
movq (%rdi), %rax
addq -0x18(%rax), %rdi
movq %r14, %rsi
movq %r9, %rdx
movq %r8, %rcx
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x1b1180
movq %r10, 0x20(%rsp)
sarl $0x3, %ebp
movq 0x8(%r8), %r8
movq %r9, %rdi
movl %ebp, %esi
movq %r15, %rdx
movl $0x8, %ecx
movq %r9, 0x8(%rsp)
callq 0x5a03c
movq 0x8(%rsp), %rcx
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, (%rcx)
je 0x1b6d48
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
je 0x1b6d48
movq 0x20(%rsp), %rax
xorl $0x8, %eax
orl %eax, %ebx
sete %al
cmpl $0x0, (%rsp)
setg %cl
andb %al, %cl
cmpb $0x1, %cl
jne 0x1b6a60
movl %r13d, %eax
leal (,%rax,8), %ecx
leaq (,%r13,8), %rdx
xorl %esi, %esi
movq %r13, %rdi
xorl %r8d, %r8d
testl %r13d, %r13d
jle 0x1b6a4e
movslq %esi, %r9
movq 0x8(%rsp), %r10
movq (%r10), %r11
movslq 0x2c(%r14), %rbx
leaq (%r11,%rdi), %r10
addq %r11, %r9
movq 0x10(%r14), %r11
imulq %r8, %r11
imulq %rbx, %r11
addq (%r14), %r11
xorl %ebx, %ebx
leaq (%r10,%rbx), %r15
movb (%r11,%rbx,8), %bpl
movb %bpl, (%r9,%rbx)
movb 0x1(%r11,%rbx,8), %bpl
movb %bpl, (%r10,%rbx)
movb 0x2(%r11,%rbx,8), %bpl
movb %bpl, (%r13,%r15)
addq %r13, %r15
movb 0x3(%r11,%rbx,8), %bpl
movb %bpl, (%r13,%r15)
addq %r13, %r15
movb 0x4(%r11,%rbx,8), %bpl
movb %bpl, (%r13,%r15)
addq %r13, %r15
movb 0x5(%r11,%rbx,8), %bpl
movb %bpl, (%r13,%r15)
addq %r13, %r15
movb 0x6(%r11,%rbx,8), %bpl
movb %bpl, (%r13,%r15)
addq %r13, %r15
movb 0x7(%r11,%rbx,8), %bpl
movb %bpl, (%r13,%r15)
incq %rbx
cmpl %ebx, %eax
jne 0x1b69e7
incq %r8
addq %rdx, %rdi
addl %ecx, %esi
cmpq (%rsp), %r8
jne 0x1b69b7
addl $-0x3, %r12d
xorl %ebp, %ebp
cmpl $0x1, %r12d
ja 0x1b6d48
cmpl $0x8, 0x20(%rsp)
sete %al
cmpl $0x0, 0x18(%rsp)
setg %cl
andb %al, %cl
cmpb $0x1, %cl
jne 0x1b6be6
movq 0x10(%rsp), %rax
leal (,%rax,8), %ecx
movl %ecx, 0x2c(%rsp)
cltq
movl %eax, %edx
leaq (,%rax,8), %rsi
movq %rsi, %rcx
subq %rax, %rcx
movq %rcx, 0x58(%rsp)
leaq (%rax,%rax), %rcx
movq %rcx, 0x50(%rsp)
leaq (%rcx,%rcx,2), %rcx
movq %rcx, 0x48(%rsp)
leaq (%rax,%rax,4), %rcx
movq %rcx, 0x40(%rsp)
leaq (,%rax,4), %rcx
movq %rcx, 0x38(%rsp)
movq %rax, 0x60(%rsp)
leaq (%rax,%rax,2), %rax
movq %rax, 0x30(%rsp)
movq $0x0, (%rsp)
xorl %r13d, %r13d
cmpl $0x0, 0x10(%rsp)
jle 0x1b6ba9
movslq (%rsp), %rax
movq 0x8(%rsp), %rcx
movq (%rcx), %r15
movq 0x40(%r14), %rbp
movq 0x58(%rsp), %rcx
leaq (%r15,%rcx), %rbx
movq 0x48(%rsp), %rcx
addq %r15, %rcx
movq 0x40(%rsp), %rdi
addq %r15, %rdi
movq 0x38(%rsp), %r8
addq %r15, %r8
movq 0x30(%rsp), %r9
addq %r15, %r9
movq 0x50(%rsp), %r10
addq %r15, %r10
movq 0x60(%rsp), %r11
addq %r15, %r11
addq %r15, %rax
imulq 0x10(%r14), %rbp
imulq %r13, %rbp
addq (%r14), %rbp
xorl %r15d, %r15d
movb (%rbp,%r15,8), %r12b
movb %r12b, (%rax,%r15)
movb 0x1(%rbp,%r15,8), %r12b
movb %r12b, (%r11,%r15)
movb 0x2(%rbp,%r15,8), %r12b
movb %r12b, (%r10,%r15)
movb 0x3(%rbp,%r15,8), %r12b
movb %r12b, (%r9,%r15)
movb 0x4(%rbp,%r15,8), %r12b
movb %r12b, (%r8,%r15)
movb 0x5(%rbp,%r15,8), %r12b
movb %r12b, (%rdi,%r15)
movb 0x6(%rbp,%r15,8), %r12b
movb %r12b, (%rcx,%r15)
movb 0x7(%rbp,%r15,8), %r12b
movb %r12b, (%rbx,%r15)
incq %r15
cmpl %r15d, %edx
jne 0x1b6b59
incq %r13
addq %rsi, 0x58(%rsp)
addq %rsi, 0x48(%rsp)
addq %rsi, 0x40(%rsp)
addq %rsi, 0x38(%rsp)
addq %rsi, 0x30(%rsp)
addq %rsi, 0x50(%rsp)
addq %rsi, 0x60(%rsp)
movq (%rsp), %rax
addl 0x2c(%rsp), %eax
movq %rax, (%rsp)
cmpq 0x18(%rsp), %r13
jne 0x1b6af3
cmpl $0x0, 0x18(%rsp)
setg %al
cmpl $0x1, 0x20(%rsp)
sete %cl
andb %al, %cl
cmpb $0x1, %cl
movq 0x8(%rsp), %r11
movl $0x0, %ebp
jne 0x1b6d48
movq 0x10(%rsp), %rcx
movslq %ecx, %rax
movl %ecx, %ecx
xorl %edx, %edx
xorl %esi, %esi
xorl %ebp, %ebp
cmpl $0x0, 0x10(%rsp)
jle 0x1b6c4b
movq 0x40(%r14), %rdi
movq (%r11), %r8
addq %rdx, %r8
imulq 0x10(%r14), %rdi
imulq %rsi, %rdi
addq (%r14), %rdi
xorl %r9d, %r9d
movb (%rdi,%r9), %r10b
movb %r10b, (%r8,%r9)
incq %r9
cmpl %r9d, %ecx
jne 0x1b6c3b
incq %rsi
addq %rax, %rdx
cmpq 0x18(%rsp), %rsi
jne 0x1b6c1b
jmp 0x1b6d48
testq %rsi, %rsi
je 0x1b6c70
movq %rsi, %rdi
movq %r9, %rbx
callq 0x563b0
movq %rbx, %r9
movq $0x0, 0x40(%r9)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%r9)
vmovups %xmm0, 0xc(%r9)
vmovups %xmm0, 0x28(%r9)
movl $0x0, 0x38(%r9)
vmovups (%r14), %xmm0
vmovups %xmm0, (%r9)
movq 0x10(%r14), %rax
movq %rax, 0x10(%r9)
movl 0x18(%r14), %eax
movl %eax, 0x18(%r9)
movq 0x20(%r14), %rax
movq %rax, 0x20(%r9)
vmovups 0x28(%r14), %xmm0
vmovups %xmm0, 0x28(%r9)
movl 0x38(%r14), %eax
movl %eax, 0x38(%r9)
movq 0x40(%r14), %rax
movq %rax, 0x40(%r9)
jmp 0x1b6d48
testq %rsi, %rsi
je 0x1b6ce8
movq %rsi, %rdi
movq %r9, %rbx
callq 0x563b0
movq %rbx, %r9
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%r9)
vmovups %xmm0, (%r9)
vmovups %xmm0, 0x28(%r9)
movl $0x0, 0x38(%r9)
vmovups (%r14), %xmm0
vmovups %xmm0, (%r9)
movq 0x20(%r14), %rax
movq %rax, 0x20(%r9)
movq 0x34(%r14), %rax
movq %rax, 0x34(%r9)
movl $0x1, %eax
movl %eax, 0x28(%r9)
sarl $0x3, %ebp
movl %ebp, 0x2c(%r9)
movl %eax, 0x30(%r9)
movslq %ebp, %rax
movq %rax, 0x40(%r9)
movq %r15, 0x10(%r9)
movl $0x8, 0x18(%r9)
xorl %ebp, %ebp
movl %ebp, %eax
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/ysh329[P]ncnn/build_O3/src/layer/x86/flatten_x86_avx.cpp
|
ncnn::InnerProduct::forward_int8(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int InnerProduct::forward_int8(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
const int num_input = weight_data_size / num_output;
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int size = w * h;
Mat bottom_blob_int8 = bottom_blob;
if (elemsize != 1)
{
Option opt_g = opt;
opt_g.blob_allocator = opt.workspace_allocator;
opt_g.use_packing_layout = false;
quantize_to_int8(bottom_blob, bottom_blob_int8, bottom_blob_int8_scales, opt_g);
}
if (bottom_blob.dims == 2 && w == num_input && h > 1)
{
// gemm
top_blob.create(num_output, h, 4u, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int j = 0; j < h; j++)
{
const signed char* m = bottom_blob_int8.row<signed char>(j);
float* outptr = top_blob.row(j);
for (int p = 0; p < num_output; p++)
{
const signed char* kptr = (const signed char*)weight_data + w * p;
int sum = 0;
for (int i = 0; i < w; i++)
{
sum += m[i] * kptr[i];
}
// dequantize and relu
float scale_in;
if (weight_data_int8_scales[p] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[0] * weight_data_int8_scales[p]);
float sumfp32 = sum * scale_in;
if (bias_term)
sumfp32 += bias_data[p];
outptr[p] = activation_ss(sumfp32, activation_type, activation_params);
}
}
return 0;
}
top_blob.create(num_output, 4u, opt.blob_allocator);
if (top_blob.empty())
return -100;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < num_output; p++)
{
float* outptr = top_blob;
int sum = 0;
int offset = size * channels * p;
// channels
for (int q = 0; q < channels; q++)
{
const signed char* w = (const signed char*)weight_data + offset + size * q;
const signed char* m = bottom_blob_int8.channel(q);
for (int i = 0; i < size; i++)
{
sum += m[i] * w[i];
}
}
// dequantize and relu
float scale_in;
if (weight_data_int8_scales[p] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[0] * weight_data_int8_scales[p]);
float sumfp32 = sum * scale_in;
if (bias_term)
sumfp32 += bias_data[p];
outptr[p] = activation_ss(sumfp32, activation_type, activation_params);
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x128, %rsp # imm = 0x128
movq %rcx, %r12
movq %rdx, %rbx
movq %rsi, %r13
movq %rdi, %r14
movl 0xd8(%rdi), %eax
cltd
idivl 0xd0(%rdi)
movl 0x38(%rsi), %r15d
movq 0x10(%rsi), %rsi
movq (%r13), %rcx
movq %rcx, 0x68(%rsp)
movq 0x8(%r13), %rcx
movq %rcx, 0x70(%rsp)
movq %rsi, 0x78(%rsp)
movl 0x18(%r13), %edx
movl %edx, 0x80(%rsp)
movq 0x20(%r13), %rdx
movq %rdx, 0x88(%rsp)
movups 0x28(%r13), %xmm0
movl 0x2c(%r13), %edx
movl %edx, (%rsp)
movl 0x30(%r13), %ebp
movups %xmm0, 0x90(%rsp)
movl %r15d, 0xa0(%rsp)
movq 0x40(%r13), %rdx
movq %rdx, 0xa8(%rsp)
testq %rcx, %rcx
je 0x1b8000
lock
incl (%rcx)
cmpq $0x1, %rsi
movl %eax, 0x18(%rsp)
je 0x1b805d
movups (%r12), %xmm0
movups 0x10(%r12), %xmm1
movups 0x20(%r12), %xmm2
movups 0x30(%r12), %xmm3
leaq 0xe0(%rsp), %rcx
movaps %xmm3, 0x30(%rcx)
movaps %xmm2, 0x20(%rcx)
movaps %xmm1, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0x10(%r12), %rax
movq %rax, 0x8(%rcx)
movb $0x0, 0x27(%rcx)
leaq 0x208(%r14), %rdx
leaq 0x68(%rsp), %rsi
movq %r13, %rdi
callq 0x5ce55
movl 0x18(%rsp), %eax
cmpl $0x2, 0x28(%r13)
jne 0x1b83a0
cmpl %eax, (%rsp)
sete %al
cmpl $0x2, %ebp
setge %cl
andb %al, %cl
movl 0xd0(%r14), %esi
cmpb $0x1, %cl
jne 0x1b83a7
movq 0x8(%r12), %r8
movl $0x4, %ecx
movq %rbx, %rdi
movl %ebp, %edx
callq 0x5b12e
movq (%rbx), %rax
movl $0xffffff9c, %r12d # imm = 0xFFFFFF9C
movq %rax, 0xd8(%rsp)
testq %rax, %rax
je 0x1b867c
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x1b867c
movq 0x68(%rsp), %r12
movslq 0x2c(%rbx), %rax
imulq 0x10(%rbx), %rax
movq %rax, 0xc8(%rsp)
movslq 0x94(%rsp), %rax
movl 0xd0(%r14), %r8d
movl 0xd4(%r14), %r9d
movq 0x130(%r14), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x1c0(%r14), %r10
movq %r14, 0xd0(%rsp)
movl 0xe0(%r14), %r14d
movl 0x18(%rsp), %edi
movslq %edi, %r11
movl %ebp, %ecx
movq %rcx, 0xb0(%rsp)
movl %edi, %ebp
imulq 0x78(%rsp), %rax
movq %rax, 0x28(%rsp)
movq $0x0, 0x20(%rsp)
decl %r14d
movq %r8, 0x10(%rsp)
movl %r9d, 0x38(%rsp)
movq %r10, 0x30(%rsp)
movq %r11, 0x8(%rsp)
testl %r8d, %r8d
jle 0x1b837b
movq 0xc8(%rsp), %rbx
imulq 0x20(%rsp), %rbx
addq 0xd8(%rsp), %rbx
movq 0xd0(%rsp), %rax
movq 0x208(%rax), %rcx
movq %rcx, 0x40(%rsp)
movq 0xe8(%rax), %rcx
movq %rcx, (%rsp)
movq 0x178(%rax), %rax
movq %rax, 0x48(%rsp)
movq 0xc0(%rsp), %r13
xorl %r15d, %r15d
xorps %xmm4, %xmm4
xorps %xmm0, %xmm0
testl %edi, %edi
jle 0x1b81d7
xorl %eax, %eax
xorl %ecx, %ecx
movsbl (%r12,%rax), %edx
movsbl (%r13,%rax), %esi
imull %edx, %esi
addl %esi, %ecx
incq %rax
cmpq %rax, %rbp
jne 0x1b81b8
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
movss (%r10,%r15,4), %xmm1
ucomiss %xmm1, %xmm4
je 0x1b81f7
movq 0x40(%rsp), %rax
mulss (%rax), %xmm1
movss 0x25cb25(%rip), %xmm4 # 0x414d18
divss %xmm1, %xmm4
mulss %xmm0, %xmm4
testl %r9d, %r9d
je 0x1b820b
movq 0x48(%rsp), %rax
addss (%rax,%r15,4), %xmm4
cmpl $0x5, %r14d
ja 0x1b8229
leaq 0x262a28(%rip), %rcx # 0x41ac40
movslq (%rcx,%r14,4), %rax
addq %rcx, %rax
jmpq *%rax
maxss 0x25ec5f(%rip), %xmm4 # 0x416e88
movaps %xmm4, %xmm0
jmp 0x1b8366
movaps %xmm4, %xmm0
movaps %xmm4, 0x50(%rsp)
callq 0x563e0
addss 0x25cad2(%rip), %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x8(%rsp), %r11
movq 0x30(%rsp), %r10
movl 0x38(%rsp), %r9d
movq 0x10(%rsp), %r8
movl 0x18(%rsp), %edi
mulss 0x50(%rsp), %xmm0
jmp 0x1b8366
movq (%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x1b8363
jmp 0x1b8366
movss 0x25edbf(%rip), %xmm2 # 0x417058
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x25c049(%rip), %xmm0 # 0x4142f0
cmpltss 0x25edac(%rip), %xmm4 # 0x41705c
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x563e0
movq 0x8(%rsp), %r11
movq 0x30(%rsp), %r10
movl 0x38(%rsp), %r9d
movq 0x10(%rsp), %r8
movl 0x18(%rsp), %edi
movaps %xmm0, %xmm1
movss 0x25ca31(%rip), %xmm0 # 0x414d18
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x1b8366
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x25ca14(%rip), %xmm2 # 0x414d18
andps %xmm2, %xmm1
movq (%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x1b8366
movq (%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x25bfbe(%rip), %xmm3 # 0x4142f0
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x1b8366
movss 0x25c9d2(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x1b8229
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
addq %r11, %r13
cmpq %r8, %r15
jne 0x1b81aa
movq 0x20(%rsp), %rcx
incq %rcx
addq 0x28(%rsp), %r12
movq %rcx, 0x20(%rsp)
cmpq 0xb0(%rsp), %rcx
jne 0x1b8155
jmp 0x1b8679
movl 0xd0(%r14), %esi
movq 0x8(%r12), %rcx
movl $0x4, %edx
movq %rbx, %rdi
callq 0x5b024
movq (%rbx), %r10
movl $0xffffff9c, %r12d # imm = 0xFFFFFF9C
testq %r10, %r10
je 0x1b867c
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
testq %rax, %rax
je 0x1b867c
movslq 0xd0(%r14), %rax
movq %rax, 0x48(%rsp)
testq %rax, %rax
jle 0x1b8679
imull (%rsp), %ebp
movl %ebp, %eax
imull %r15d, %eax
movq 0x130(%r14), %rdx
movq 0x68(%rsp), %rcx
movq %rcx, 0x38(%rsp)
movq 0xa8(%rsp), %r12
movq 0x1c0(%r14), %rcx
movq %rcx, (%rsp)
movq 0x208(%r14), %rcx
movq %rcx, 0x20(%rsp)
movl 0xd4(%r14), %ecx
movl %ecx, 0x40(%rsp)
movl 0xe0(%r14), %ecx
movq 0x178(%r14), %rsi
movq %rsi, 0x30(%rsp)
movq 0xe8(%r14), %rsi
movq %rsi, 0x50(%rsp)
movslq %ebp, %rbx
cltq
movq %rax, 0x18(%rsp)
movl %ebx, %r14d
imulq 0x78(%rsp), %r12
xorl %r13d, %r13d
decl %ecx
movq %r10, 0x28(%rsp)
movq %rcx, 0x10(%rsp)
xorps %xmm0, %xmm0
testl %r15d, %r15d
jle 0x1b84c8
xorl %eax, %eax
movq 0x38(%rsp), %rcx
movq %rdx, %r11
xorl %esi, %esi
testl %ebp, %ebp
jle 0x1b84ab
xorl %edi, %edi
movsbl (%rcx,%rdi), %r8d
movsbl (%rdx,%rdi), %r9d
imull %r8d, %r9d
addl %r9d, %esi
incq %rdi
cmpq %rdi, %r14
jne 0x1b8492
incq %rax
addq %rbx, %rdx
addq %r12, %rcx
cmpq %r15, %rax
jne 0x1b848c
xorps %xmm0, %xmm0
cvtsi2ss %esi, %xmm0
movq %r11, %rdx
movq 0x10(%rsp), %rcx
movq (%rsp), %rax
movss (%rax,%r13,4), %xmm1
xorps %xmm4, %xmm4
ucomiss %xmm1, %xmm4
je 0x1b84ef
movq 0x20(%rsp), %rax
mulss (%rax), %xmm1
movss 0x25c82d(%rip), %xmm4 # 0x414d18
divss %xmm1, %xmm4
mulss %xmm0, %xmm4
cmpl $0x0, 0x40(%rsp)
je 0x1b8505
movq 0x30(%rsp), %rax
addss (%rax,%r13,4), %xmm4
cmpl $0x5, %ecx
ja 0x1b8522
leaq 0x262717(%rip), %rsi # 0x41ac28
movslq (%rsi,%rcx,4), %rax
addq %rsi, %rax
jmpq *%rax
maxss 0x25e966(%rip), %xmm4 # 0x416e88
movaps %xmm4, %xmm0
jmp 0x1b8660
movaps %xmm4, %xmm0
movq %rdx, 0x8(%rsp)
movaps %xmm4, 0xb0(%rsp)
callq 0x563e0
addss 0x25c7d1(%rip), %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x10(%rsp), %rcx
movq 0x8(%rsp), %rdx
movq 0x28(%rsp), %r10
mulss 0xb0(%rsp), %xmm0
jmp 0x1b8660
movq 0x50(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x1b865d
jmp 0x1b8660
movss 0x25eac3(%rip), %xmm2 # 0x417058
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x25bd4d(%rip), %xmm0 # 0x4142f0
cmpltss 0x25eab0(%rip), %xmm4 # 0x41705c
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
movq %rdx, 0x8(%rsp)
callq 0x563e0
movq 0x10(%rsp), %rcx
movq 0x8(%rsp), %rdx
movq 0x28(%rsp), %r10
movaps %xmm0, %xmm1
movss 0x25c739(%rip), %xmm0 # 0x414d18
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x1b8660
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x25c71c(%rip), %xmm2 # 0x414d18
andps %xmm2, %xmm1
movq 0x50(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x1b8660
movq 0x50(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x25bcc4(%rip), %xmm3 # 0x4142f0
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x1b8660
movss 0x25c6d8(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x1b8522
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movss %xmm0, (%r10,%r13,4)
incq %r13
addq 0x18(%rsp), %rdx
cmpq 0x48(%rsp), %r13
jne 0x1b8478
xorl %r12d, %r12d
movq 0x70(%rsp), %rax
testq %rax, %rax
je 0x1b86b2
lock
decl (%rax)
jne 0x1b86b2
movq 0x68(%rsp), %rsi
movq 0x88(%rsp), %rdi
testq %rdi, %rdi
je 0x1b86a5
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1b86b2
testq %rsi, %rsi
je 0x1b86b2
movq %rsi, %rdi
callq 0x563b0
movl %r12d, %eax
addq $0x128, %rsp # imm = 0x128
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x1b870c
jmp 0x1b86cb
movq %rax, %rbx
movq 0x70(%rsp), %rax
testq %rax, %rax
je 0x1b8704
lock
decl (%rax)
jne 0x1b8704
movq 0x68(%rsp), %rsi
movq 0x88(%rsp), %rdi
testq %rdi, %rdi
jne 0x1b86fe
testq %rsi, %rsi
je 0x1b8704
movq %rsi, %rdi
callq 0x563b0
jmp 0x1b8704
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/innerproduct.cpp
|
ncnn::InnerProduct_x86::create_pipeline_int8_x86(ncnn::Option const&)
|
int InnerProduct_x86::create_pipeline_int8_x86(const Option& opt)
{
const int num_input = weight_data_size / num_output;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack = num_output % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
// src = inch-outch
// dst = pb-inch-outch/pb
{
Mat weight_data_r2 = weight_data.reshape(num_input, num_output);
weight_data_tm.create(num_input, num_output / out_elempack, (size_t)out_elempack, out_elempack);
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
{
signed char* g0 = weight_data_tm.row<signed char>(q / out_elempack);
for (int p = 0; p < num_input; p++)
{
for (int j = 0; j < out_elempack; j++)
{
*g0++ = weight_data_r2.row<signed char>(q + j)[p];
}
}
}
}
scale_in_data.create(num_output);
for (int p = 0; p < num_output; p++)
{
// dequantize
float scale_in;
if (weight_data_int8_scales[p] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[0] * weight_data_int8_scales[p]);
scale_in_data[p] = scale_in;
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rax
leaq 0x130(%rdi,%rax), %rsi
movl -0x60(%rsi), %ecx
movl -0x58(%rsi), %eax
cltd
idivl %ecx
movl %eax, %ebp
testb $0x7, %cl
sete %r12b
andb 0x27(%rbx), %r12b
movl $0x8, %eax
movl $0x1, %r15d
cmovnel %eax, %r15d
leaq 0x8(%rsp), %rdi
movl %ebp, %edx
xorl %r8d, %r8d
callq 0x5a808
leaq 0x10(%r14), %rdi
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %eax
cltd
idivl %r15d
movl %ebp, %esi
movl %eax, %edx
movq %r15, %rcx
movl %r15d, %r8d
xorl %r9d, %r9d
callq 0x5a14a
movq (%r14), %rax
movq -0x18(%rax), %r8
cmpl 0xd0(%r14,%r8), %r15d
jg 0x1b8c98
leal -0x1(%r15), %edx
movzbl %r12b, %ecx
leal (%rcx,%rcx,2), %ecx
movl %ebp, %esi
xorl %edi, %edi
testl %ebp, %ebp
jle 0x1b8c84
movl %edi, %r8d
shrl %cl, %r8d
movslq 0x3c(%r14), %r9
imulq %r9, %r8
imulq 0x20(%r14), %r8
addq 0x10(%r14), %r8
xorl %r9d, %r9d
xorl %r10d, %r10d
leaq (%rdi,%r10), %r11
movslq 0x34(%rsp), %r12
imulq %r11, %r12
imulq 0x18(%rsp), %r12
addq 0x8(%rsp), %r12
movb (%r9,%r12), %r11b
movb %r11b, (%r8,%r10)
incq %r10
cmpq %r10, %r15
jne 0x1b8c4d
incq %r9
addq %r10, %r8
cmpq %rsi, %r9
jne 0x1b8c4a
movq -0x18(%rax), %r8
addq %r15, %rdi
leaq (%rdi,%rdx), %r9
movslq 0xd0(%r14,%r8), %r10
cmpq %r10, %r9
jl 0x1b8c2c
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1b8ccb
lock
decl (%rax)
jne 0x1b8ccb
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x1b8cbe
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1b8ccb
testq %rsi, %rsi
je 0x1b8ccb
movq %rsi, %rdi
callq 0x563b0
leaq 0x58(%r14), %r15
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %esi
movl $0x4, %edx
movq %r15, %rdi
xorl %ecx, %ecx
callq 0x5b024
movq (%r14), %rax
movq -0x18(%rax), %rcx
leaq (%r14,%rcx), %r12
cmpl $0x0, 0xd0(%r14,%rcx)
jle 0x1b8d55
movq (%r15), %rcx
xorl %edx, %edx
movss 0x25c008(%rip), %xmm0 # 0x414d18
movq 0x1c0(%r12), %rsi
movss (%rsi,%rdx,4), %xmm1
xorps %xmm2, %xmm2
ucomiss %xmm1, %xmm2
je 0x1b8d38
movq 0x208(%r12), %rsi
mulss (%rsi), %xmm1
movaps %xmm0, %xmm2
divss %xmm1, %xmm2
movss %xmm2, (%rcx,%rdx,4)
incq %rdx
movq -0x18(%rax), %rsi
leaq (%r14,%rsi), %r12
movslq 0xd0(%r14,%rsi), %rsi
cmpq %rsi, %rdx
jl 0x1b8d10
cmpb $0x0, (%rbx)
je 0x1b8dc9
leaq 0x130(%r12), %rbx
movq 0x138(%r12), %rax
testq %rax, %rax
je 0x1b8d9e
lock
decl (%rax)
jne 0x1b8d9e
movq 0x130(%r12), %rsi
movq 0x150(%r12), %rdi
testq %rdi, %rdi
je 0x1b8d91
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1b8d9e
testq %rsi, %rsi
je 0x1b8d9e
movq %rsi, %rdi
callq 0x563b0
movq $0x0, 0x170(%r12)
xorps %xmm0, %xmm0
movups %xmm0, 0xc(%rbx)
movups %xmm0, (%rbx)
movups %xmm0, 0x158(%r12)
movl $0x0, 0x168(%r12)
xorl %eax, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
jmp 0x1b8e18
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1b8e10
lock
decl (%rax)
jne 0x1b8e10
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x1b8e0a
testq %rsi, %rsi
je 0x1b8e10
movq %rsi, %rdi
callq 0x563b0
jmp 0x1b8e10
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/innerproduct_x86.cpp
|
ncnn::InnerProduct_x86::forward_fp16s(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int InnerProduct_x86::forward_fp16s(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
const int num_input = weight_data_size / num_output;
if (bottom_blob.dims == 2 && bottom_blob.w == num_input && bottom_blob.h * bottom_blob.elempack > 1)
{
// gemm
int h = bottom_blob.h;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
top_blob.create(num_output, h, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
innerproduct_gemm_fp16s_sse(bottom_blob, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
return 0;
}
// flatten
Mat bottom_blob_flattened = bottom_blob;
if (bottom_blob.dims != 1)
{
Option opt_flatten = opt;
opt_flatten.blob_allocator = opt.workspace_allocator;
flatten->forward(bottom_blob, bottom_blob_flattened, opt_flatten);
}
size_t elemsize = bottom_blob_flattened.elemsize;
int elempack = bottom_blob_flattened.elempack;
int out_elempack = 1;
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#endif
}
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#if __AVX512F__
if (out_elempack == 16)
{
innerproduct_fp16s_pack16_avx512(bottom_blob_flattened, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
}
#endif // __AVX512F__
if (out_elempack == 8)
{
innerproduct_fp16s_pack8_avx(bottom_blob_flattened, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
}
if (out_elempack == 4)
{
innerproduct_fp16s_pack4_sse(bottom_blob_flattened, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
}
if (out_elempack == 1)
{
innerproduct_fp16s_sse(bottom_blob_flattened, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
}
return 0;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x90, %rsp
movq %rcx, %r14
movq %rdx, %rbx
movq %rdi, %r15
movl 0x28(%rsi), %ecx
cmpl $0x2, %ecx
jne 0x1c0497
movq (%r15), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r15,%rax), %r10d
movl 0xd8(%r15,%rax), %eax
cltd
idivl %r10d
movl 0x2c(%rsi), %edi
cmpl %eax, %edi
jne 0x1c049a
movl 0x18(%rsi), %r8d
movl 0x30(%rsi), %edx
movl %r8d, %r9d
imull %edx, %r9d
movl %eax, %edi
cmpl $0x2, %r9d
jl 0x1c049a
movq 0x10(%rsi), %rcx
movq 0x8(%r14), %r9
movq %rbx, %rdi
movl %r10d, %esi
callq 0x5a14a
cmpq $0x0, (%rbx)
je 0x1c05eb
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
xorl %ecx, %ecx
testq %rax, %rax
movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C
cmovnel %ecx, %ebx
jmp 0x1c05f0
movl 0x2c(%rsi), %edi
movq (%rsi), %rax
movq %rax, 0x8(%rsp)
movq 0x8(%rsi), %rax
movq %rax, 0x10(%rsp)
movq 0x10(%rsi), %rdx
movq %rdx, 0x18(%rsp)
movl 0x18(%rsi), %edx
movl %edx, 0x20(%rsp)
movq 0x20(%rsi), %rdx
movq %rdx, 0x28(%rsp)
movl %ecx, 0x30(%rsp)
movl %edi, 0x34(%rsp)
movq 0x30(%rsi), %rdx
movq %rdx, 0x38(%rsp)
movl 0x38(%rsi), %edx
movl %edx, 0x40(%rsp)
movq 0x40(%rsi), %rdx
movq %rdx, 0x48(%rsp)
testq %rax, %rax
je 0x1c04f0
lock
incl (%rax)
movl 0x28(%rsi), %ecx
cmpl $0x1, %ecx
je 0x1c0533
movups (%r14), %xmm0
movups 0x10(%r14), %xmm1
movups 0x20(%r14), %xmm2
movups 0x30(%r14), %xmm3
leaq 0x50(%rsp), %rcx
movaps %xmm3, 0x30(%rcx)
movaps %xmm2, 0x20(%rcx)
movaps %xmm1, 0x10(%rcx)
movaps %xmm0, (%rcx)
movq 0x10(%r14), %rax
movq %rax, 0x8(%rcx)
movq 0x8(%r15), %rdi
movq (%rdi), %rax
leaq 0x8(%rsp), %rdx
callq *0x38(%rax)
movq 0x18(%rsp), %rax
movslq 0x20(%rsp), %rdi
movq (%r15), %rcx
movq -0x18(%rcx), %rcx
movl 0xd0(%r15,%rcx), %esi
movl $0x1, %ecx
cmpb $0x1, 0x27(%r14)
jne 0x1c0571
xorl %ecx, %ecx
testb $0x3, %sil
sete %cl
testb $0x7, %sil
leal 0x1(%rcx,%rcx,2), %edx
movl $0x8, %ecx
cmovnel %edx, %ecx
xorl %edx, %edx
divq %rdi
movl %ecx, %r9d
imulq %rax, %r9
movl %esi, %eax
cltd
idivl %ecx
movq 0x8(%r14), %r8
movq %rbx, %rdi
movl %eax, %esi
movq %r9, %rdx
callq 0x5a03c
cmpq $0x0, (%rbx)
je 0x1c05b1
movslq 0x38(%rbx), %rax
imulq 0x40(%rbx), %rax
xorl %ecx, %ecx
testq %rax, %rax
movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C
cmovnel %ecx, %ebx
jmp 0x1c05b6
movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1c05f0
lock
decl (%rax)
jne 0x1c05f0
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x1c05dc
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1c05f0
testq %rsi, %rsi
je 0x1c05f0
movq %rsi, %rdi
callq 0x563b0
jmp 0x1c05f0
movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C
movl %ebx, %eax
addq $0x90, %rsp
popq %rbx
popq %r14
popq %r15
retq
jmp 0x1c0641
jmp 0x1c0603
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1c0639
lock
decl (%rax)
jne 0x1c0639
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x1c0633
testq %rsi, %rsi
je 0x1c0639
movq %rsi, %rdi
callq 0x563b0
jmp 0x1c0639
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
nop
|
/ysh329[P]ncnn/src/layer/x86/innerproduct_x86.cpp
|
ncnn::InnerProduct_x86_avx512::create_pipeline_int8_x86(ncnn::Option const&)
|
int InnerProduct_x86_avx512::create_pipeline_int8_x86(const Option& opt)
{
const int num_input = weight_data_size / num_output;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack = num_output % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
// src = inch-outch
// dst = pb-inch-outch/pb
{
Mat weight_data_r2 = weight_data.reshape(num_input, num_output);
weight_data_tm.create(num_input, num_output / out_elempack, (size_t)out_elempack, out_elempack);
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
{
signed char* g0 = weight_data_tm.row<signed char>(q / out_elempack);
for (int p = 0; p < num_input; p++)
{
for (int j = 0; j < out_elempack; j++)
{
*g0++ = weight_data_r2.row<signed char>(q + j)[p];
}
}
}
}
scale_in_data.create(num_output);
for (int p = 0; p < num_output; p++)
{
// dequantize
float scale_in;
if (weight_data_int8_scales[p] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[0] * weight_data_int8_scales[p]);
scale_in_data[p] = scale_in;
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rax
leaq 0x130(%rdi,%rax), %rsi
movl -0x60(%rsi), %ecx
movl -0x58(%rsi), %eax
cltd
idivl %ecx
movl %eax, %ebp
testb $0x7, %cl
sete %r12b
andb 0x27(%rbx), %r12b
movl $0x8, %eax
movl $0x1, %r15d
cmovnel %eax, %r15d
leaq 0x8(%rsp), %rdi
movl %ebp, %edx
xorl %r8d, %r8d
callq 0x5a808
leaq 0x10(%r14), %rdi
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %eax
cltd
idivl %r15d
movl %ebp, %esi
movl %eax, %edx
movq %r15, %rcx
movl %r15d, %r8d
xorl %r9d, %r9d
callq 0x5a14a
movq (%r14), %rax
movq -0x18(%rax), %r8
cmpl 0xd0(%r14,%r8), %r15d
jg 0x1c0c16
leal -0x1(%r15), %edx
movzbl %r12b, %ecx
leal (%rcx,%rcx,2), %ecx
movl %ebp, %esi
xorl %edi, %edi
testl %ebp, %ebp
jle 0x1c0c02
movl %edi, %r8d
shrl %cl, %r8d
movslq 0x3c(%r14), %r9
imulq %r9, %r8
imulq 0x20(%r14), %r8
addq 0x10(%r14), %r8
xorl %r9d, %r9d
xorl %r10d, %r10d
leaq (%rdi,%r10), %r11
movslq 0x34(%rsp), %r12
imulq %r11, %r12
imulq 0x18(%rsp), %r12
addq 0x8(%rsp), %r12
movb (%r9,%r12), %r11b
movb %r11b, (%r8,%r10)
incq %r10
cmpq %r10, %r15
jne 0x1c0bcb
incq %r9
addq %r10, %r8
cmpq %rsi, %r9
jne 0x1c0bc8
movq -0x18(%rax), %r8
addq %r15, %rdi
leaq (%rdi,%rdx), %r9
movslq 0xd0(%r14,%r8), %r10
cmpq %r10, %r9
jl 0x1c0baa
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1c0c49
lock
decl (%rax)
jne 0x1c0c49
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x1c0c3c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1c0c49
testq %rsi, %rsi
je 0x1c0c49
movq %rsi, %rdi
callq 0x563b0
leaq 0x58(%r14), %r15
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %esi
movl $0x4, %edx
movq %r15, %rdi
xorl %ecx, %ecx
callq 0x5b024
movq (%r14), %rax
movq -0x18(%rax), %rcx
leaq (%r14,%rcx), %r12
cmpl $0x0, 0xd0(%r14,%rcx)
jle 0x1c0cd2
movq (%r15), %rcx
xorl %edx, %edx
vmovss 0x25408a(%rip), %xmm0 # 0x414d18
movq 0x1c0(%r12), %rsi
vmovss (%rsi,%rdx,4), %xmm2
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm2, %xmm1
je 0x1c0cb5
movq 0x208(%r12), %rsi
vmulss (%rsi), %xmm2, %xmm1
vdivss %xmm1, %xmm0, %xmm1
vmovss %xmm1, (%rcx,%rdx,4)
incq %rdx
movq -0x18(%rax), %rsi
leaq (%r14,%rsi), %r12
movslq 0xd0(%r14,%rsi), %rsi
cmpq %rsi, %rdx
jl 0x1c0c8e
cmpb $0x0, (%rbx)
je 0x1c0d4a
leaq 0x130(%r12), %rbx
movq 0x138(%r12), %rax
testq %rax, %rax
je 0x1c0d1b
lock
decl (%rax)
jne 0x1c0d1b
movq 0x130(%r12), %rsi
movq 0x150(%r12), %rdi
testq %rdi, %rdi
je 0x1c0d0e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1c0d1b
testq %rsi, %rsi
je 0x1c0d1b
movq %rsi, %rdi
callq 0x563b0
movq $0x0, 0x170(%r12)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rbx)
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0x158(%r12)
movl $0x0, 0x168(%r12)
xorl %eax, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
jmp 0x1c0d99
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1c0d91
lock
decl (%rax)
jne 0x1c0d91
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x1c0d8b
testq %rsi, %rsi
je 0x1c0d91
movq %rsi, %rdi
callq 0x563b0
jmp 0x1c0d91
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
nop
|
/ysh329[P]ncnn/build_O3/src/layer/x86/innerproduct_x86_avx512.cpp
|
ncnn::InnerProduct_x86_avx512::forward_fp16s(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int InnerProduct_x86_avx512::forward_fp16s(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
const int num_input = weight_data_size / num_output;
if (bottom_blob.dims == 2 && bottom_blob.w == num_input && bottom_blob.h * bottom_blob.elempack > 1)
{
// gemm
int h = bottom_blob.h;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
top_blob.create(num_output, h, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
innerproduct_gemm_fp16s_sse(bottom_blob, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
return 0;
}
// flatten
Mat bottom_blob_flattened = bottom_blob;
if (bottom_blob.dims != 1)
{
Option opt_flatten = opt;
opt_flatten.blob_allocator = opt.workspace_allocator;
flatten->forward(bottom_blob, bottom_blob_flattened, opt_flatten);
}
size_t elemsize = bottom_blob_flattened.elemsize;
int elempack = bottom_blob_flattened.elempack;
int out_elempack = 1;
if (opt.use_packing_layout)
{
#if __AVX512F__
out_elempack = num_output % 16 == 0 ? 16 : num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#endif
}
size_t out_elemsize = elemsize / elempack * out_elempack;
top_blob.create(num_output / out_elempack, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#if __AVX512F__
if (out_elempack == 16)
{
innerproduct_fp16s_pack16_avx512(bottom_blob_flattened, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
}
#endif // __AVX512F__
if (out_elempack == 8)
{
innerproduct_fp16s_pack8_avx(bottom_blob_flattened, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
}
if (out_elempack == 4)
{
innerproduct_fp16s_pack4_sse(bottom_blob_flattened, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
}
if (out_elempack == 1)
{
innerproduct_fp16s_sse(bottom_blob_flattened, top_blob, weight_data_tm, bias_data, activation_type, activation_params, opt);
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x3e8, %rsp # imm = 0x3E8
movq %rcx, %r12
movq %rdx, %r13
movq %rsi, %r15
movq %rdi, %rbp
movl 0x28(%rsi), %ecx
cmpl $0x2, %ecx
jne 0x1dfe10
movq (%rbp), %rax
movq -0x18(%rax), %rax
movl 0xd0(%rbp,%rax), %esi
movl 0xd8(%rbp,%rax), %eax
cltd
idivl %esi
movl 0x2c(%r15), %edi
cmpl %eax, %edi
jne 0x1dfe14
movl 0x18(%r15), %r8d
movl 0x30(%r15), %edx
movl %r8d, %r9d
imull %edx, %r9d
movl %eax, %edi
cmpl $0x2, %r9d
jl 0x1dfe14
movq 0x10(%r15), %rcx
movq 0x8(%r12), %r9
movq %r13, %rdi
callq 0x5a14a
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
cmpq $0x0, (%r13)
je 0x1e27bc
movslq 0x38(%r13), %rax
imulq 0x40(%r13), %rax
testq %rax, %rax
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
je 0x1e27bc
leaq 0x10(%rbp), %rdx
movq (%rbp), %rax
movq -0x18(%rax), %rax
leaq (%rax,%rbp), %r9
addq $0xe8, %r9
movl -0x8(%r9), %r8d
movq 0x90(%r9), %rcx
movzbl 0x27(%r12), %eax
movl %eax, (%rsp)
movq %r15, %rdi
movq %r13, %rsi
callq 0x1e2850
xorl %eax, %eax
jmp 0x1e27bc
movl 0x2c(%r15), %edi
movq 0x8(%r15), %rax
vmovups (%r15), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x10(%r15), %rdx
movq %rdx, 0x80(%rsp)
movl 0x18(%r15), %edx
movl %edx, 0x88(%rsp)
movq 0x20(%r15), %rdx
movq %rdx, 0x90(%rsp)
movl %ecx, 0x98(%rsp)
movl %edi, 0x9c(%rsp)
movq 0x30(%r15), %rdx
movq %rdx, 0xa0(%rsp)
movl 0x38(%r15), %edx
movl %edx, 0xa8(%rsp)
movq 0x40(%r15), %rdx
movq %rdx, 0xb0(%rsp)
testq %rax, %rax
je 0x1dfe83
lock
incl (%rax)
movl 0x28(%r15), %ecx
cmpl $0x1, %ecx
je 0x1dfebb
vmovups (%r12), %zmm0
leaq 0x3a0(%rsp), %rcx
vmovups %zmm0, (%rcx)
movq 0x10(%r12), %rax
movq %rax, 0x8(%rcx)
movq 0x8(%rbp), %rdi
movq (%rdi), %rax
leaq 0x70(%rsp), %rdx
movq %r15, %rsi
vzeroupper
callq *0x38(%rax)
movq 0x80(%rsp), %rax
movslq 0x88(%rsp), %rsi
movq (%rbp), %rcx
movq -0x18(%rcx), %rcx
movl 0xd0(%rbp,%rcx), %ecx
movl $0x1, %r15d
cmpb $0x1, 0x27(%r12)
jne 0x1dff11
testb $0xf, %cl
je 0x1dff03
testb $0x7, %cl
je 0x1dff0b
xorl %edx, %edx
testb $0x3, %cl
sete %dl
leal (%rdx,%rdx,2), %r15d
incl %r15d
jmp 0x1dff11
movl $0x10, %r15d
jmp 0x1dff11
movl $0x8, %r15d
xorl %edx, %edx
divq %rsi
movl %r15d, %r9d
imulq %rax, %r9
movl %ecx, %eax
cltd
idivl %r15d
movq 0x8(%r12), %r8
movq %r13, %rdi
movl %eax, %esi
movq %r9, %rdx
movl %r15d, %ecx
callq 0x5a03c
movl $0xffffff9c, 0x14(%rsp) # imm = 0xFFFFFF9C
cmpq $0x0, (%r13)
je 0x1e277c
movslq 0x38(%r13), %rax
imulq 0x40(%r13), %rax
testq %rax, %rax
je 0x1e277c
cmpl $0x7, %r15d
movq %r13, 0x20(%rsp)
movq %rbp, 0x18(%rsp)
jg 0x1e055b
cmpl $0x1, %r15d
je 0x1e0ba2
movl $0x0, 0x14(%rsp)
cmpl $0x4, %r15d
jne 0x1e277c
movq (%rbp), %rax
movslq 0x2c(%r13), %rcx
testq %rcx, %rcx
jle 0x1e277c
movq -0x18(%rax), %rdx
movl 0xe0(%rbp,%rdx), %esi
movq 0x178(%rbp,%rdx), %rdi
movl 0x88(%rsp), %r8d
imull 0x9c(%rsp), %r8d
movl %r8d, %r9d
andl $-0x8, %r9d
xorl %r10d, %r10d
decl %esi
leaq 0x23b2db(%rip), %r11 # 0x41b2b0
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x237075(%rip), %xmm25 # 0x417058
vbroadcastss 0x23706f(%rip), %xmm26 # 0x41705c
vbroadcastss 0x23469a(%rip), %xmm4 # 0x414690
vbroadcastss 0x237060(%rip), %xmm22 # 0x417060
vbroadcastss 0x234d0f(%rip), %xmm6 # 0x414d18
vbroadcastss 0x237052(%rip), %xmm7 # 0x417064
vbroadcastss 0x237051(%rip), %xmm9 # 0x41706c
vbroadcastss 0x23704c(%rip), %xmm10 # 0x417070
vbroadcastss 0x237047(%rip), %xmm11 # 0x417074
vbroadcastss 0x237042(%rip), %xmm12 # 0x417078
vbroadcastss 0x23703d(%rip), %xmm13 # 0x41707c
vpbroadcastd 0x234cd0(%rip), %xmm14 # 0x414d18
vbroadcastss 0x237017(%rip), %xmm15 # 0x417068
vbroadcastss 0x23704d(%rip), %xmm27 # 0x4170a8
vbroadcastss 0x237047(%rip), %xmm28 # 0x4170ac
vbroadcastss 0x237041(%rip), %xmm29 # 0x4170b0
vbroadcastss 0x23703b(%rip), %xmm30 # 0x4170b4
vbroadcastss 0x238d69(%rip), %xmm31 # 0x418dec
vxorps %xmm1, %xmm1, %xmm1
testq %rdi, %rdi
je 0x1e0098
movq %r10, %rbx
shlq $0x4, %rbx
vmovups (%rdi,%rbx), %xmm1
movslq 0x3c(%rbp), %rbx
imulq %r10, %rbx
imulq 0x20(%rbp), %rbx
addq 0x10(%rbp), %rbx
movq 0x70(%rsp), %r14
cmpl $0x8, %r8d
jl 0x1e0182
vxorps %xmm16, %xmm16, %xmm16
movl $0x7, %ebp
vxorps %xmm17, %xmm17, %xmm17
vxorps %xmm18, %xmm18, %xmm18
vxorps %xmm8, %xmm8, %xmm8
vbroadcastss (%r14), %xmm19
vbroadcastss 0x4(%r14), %xmm20
vbroadcastss 0x8(%r14), %xmm21
vbroadcastss 0xc(%r14), %xmm23
vinsertf32x4 $0x1, %xmm20, %ymm19, %ymm19
vinsertf32x4 $0x1, %xmm23, %ymm21, %ymm20
vbroadcastss 0x10(%r14), %xmm21
vbroadcastss 0x14(%r14), %xmm23
vbroadcastss 0x18(%r14), %xmm24
vinsertf32x4 $0x1, %xmm23, %ymm21, %ymm21
vbroadcastss 0x1c(%r14), %xmm23
vinsertf32x4 $0x1, %xmm23, %ymm24, %ymm23
vlddqu (%rbx), %ymm2
vlddqu 0x20(%rbx), %ymm3
vcvtph2ps %xmm2, %ymm24
vfmadd231ps %ymm24, %ymm19, %ymm16 # ymm16 = (ymm19 * ymm24) + ymm16
vextractf128 $0x1, %ymm2, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps %ymm2, %ymm20, %ymm17 # ymm17 = (ymm20 * ymm2) + ymm17
vcvtph2ps %xmm3, %ymm2
vfmadd231ps %ymm2, %ymm21, %ymm18 # ymm18 = (ymm21 * ymm2) + ymm18
vextractf128 $0x1, %ymm3, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps %ymm2, %ymm23, %ymm8 # ymm8 = (ymm23 * ymm2) + ymm8
addq $0x20, %r14
addq $0x40, %rbx
addl $0x8, %ebp
cmpl %r8d, %ebp
jl 0x1e00d4
movl %r9d, %r12d
jmp 0x1e019c
xorl %r12d, %r12d
vxorps %xmm8, %xmm8, %xmm8
vxorps %xmm18, %xmm18, %xmm18
vxorps %xmm17, %xmm17, %xmm17
vxorps %xmm16, %xmm16, %xmm16
movl %r12d, %ebp
orl $0x3, %ebp
cmpl %r8d, %ebp
jge 0x1e0206
vbroadcastss (%r14), %xmm2
vbroadcastss 0x4(%r14), %xmm3
vbroadcastss 0x8(%r14), %xmm19
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vbroadcastss 0xc(%r14), %xmm3
vinsertf32x4 $0x1, %xmm3, %ymm19, %ymm3
vlddqu (%rbx), %ymm5
vcvtph2ps %xmm5, %ymm19
vfmadd231ps %ymm19, %ymm2, %ymm16 # ymm16 = (ymm2 * ymm19) + ymm16
vextractf128 $0x1, %ymm5, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps %ymm2, %ymm3, %ymm17 # ymm17 = (ymm3 * ymm2) + ymm17
addq $0x10, %r14
addq $0x20, %rbx
leal 0x4(%r12), %ebp
addl $0x7, %r12d
cmpl %r8d, %r12d
movl %ebp, %r12d
jl 0x1e01a7
movl %r8d, %r13d
subl %r12d, %r13d
jle 0x1e022d
xorl %r12d, %r12d
movq 0x18(%rsp), %rbp
vcvtph2ps (%rbx,%r12,8), %xmm2
vfmadd231ps (%r14,%r12,4){1to4}, %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
incq %r12
cmpl %r12d, %r13d
jne 0x1e0216
jmp 0x1e0232
movq 0x18(%rsp), %rbp
vaddps %ymm8, %ymm18, %ymm2
vaddps %ymm16, %ymm17, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm1, %xmm2, %xmm1
cmpl $0x5, %esi
ja 0x1e052a
movslq (%r11,%rsi,4), %rbx
addq %r11, %rbx
movq 0x20(%rsp), %r13
jmpq *%rbx
vmaxps %xmm0, %xmm1, %xmm1
jmp 0x1e052f
vminps %xmm25, %xmm1, %xmm2
vmaxps %xmm26, %xmm2, %xmm2
vmovaps %xmm22, %xmm3
vfmadd213ps %xmm4, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm4
vcvttps2dq %xmm3, %xmm5
vcvtdq2ps %xmm5, %xmm5
vcmpltps %xmm5, %xmm3, %k1
vsubps %xmm6, %xmm5, %xmm5 {%k1}
vfmsub231ps %xmm7, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm7) - xmm2
vfnmsub231ps %xmm15, %xmm5, %xmm2 # xmm2 = -(xmm5 * xmm15) - xmm2
vmulps %xmm2, %xmm2, %xmm3
vmovaps %xmm9, %xmm8
vfmadd213ps %xmm10, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm10
vfmadd213ps %xmm11, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm11
vfmadd213ps %xmm12, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm12
vfmadd213ps %xmm13, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm13
vfmadd213ps %xmm4, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm4
vfmadd213ps %xmm2, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm8) + xmm2
vaddps %xmm6, %xmm8, %xmm2
vcvttps2dq %xmm5, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm3, %xmm14, %xmm3
vfmadd213ps %xmm6, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm6
vcmpleps %xmm0, %xmm3, %k1
vmaxps 0x236d8c(%rip){1to4}, %xmm3, %xmm2 # 0x417080
vpsrld $0x17, %xmm2, %xmm3
vpbroadcastd 0x236d82(%rip), %xmm5 # 0x417084
vpternlogd $0xea, 0x234383(%rip){1to4}, %xmm5, %xmm2 # 0x414690
vcmpltps 0x236d74(%rip){1to4}, %xmm2, %k2 # 0x41708c
vaddps 0x236d6e(%rip){1to4}, %xmm2, %xmm5 # 0x417090
vaddps %xmm2, %xmm5, %xmm5 {%k2}
vmulps %xmm5, %xmm5, %xmm2
vbroadcastss 0x236d5f(%rip), %xmm8 # 0x417094
vfmadd213ps 0x236d59(%rip){1to4}, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + mem
vfmadd213ps 0x236d53(%rip){1to4}, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + mem
vfmadd213ps 0x236d4d(%rip){1to4}, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + mem
vfmadd213ps 0x236d47(%rip){1to4}, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + mem
vfmadd213ps %xmm27, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + xmm27
vfmadd213ps %xmm28, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + xmm28
vfmadd213ps %xmm29, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + xmm29
vfmadd213ps %xmm30, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm8) + xmm30
vmulps %xmm5, %xmm2, %xmm16
vmulps %xmm8, %xmm16, %xmm8
vpaddd 0x236cfd(%rip){1to4}, %xmm3, %xmm3 # 0x417088
vcvtdq2ps %xmm3, %xmm3
vsubps %xmm6, %xmm3, %xmm3 {%k2}
vfmadd231ps %xmm15, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm15) + xmm8
vfmsub231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) - xmm8
vsubps %xmm5, %xmm8, %xmm2
vfnmadd231ps %xmm3, %xmm7, %xmm2 # xmm2 = -(xmm7 * xmm3) + xmm2
vaddps %xmm2, %xmm2, %xmm2
vbroadcastss 0x23738e(%rip), %xmm2 {%k1} # 0x417744
vminps %xmm25, %xmm2, %xmm2
vmaxps %xmm26, %xmm2, %xmm2
vmovaps %xmm22, %xmm3
vfmadd213ps %xmm4, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm4
vcvttps2dq %xmm3, %xmm5
vcvtdq2ps %xmm5, %xmm5
vcmpltps %xmm5, %xmm3, %k1
vsubps %xmm6, %xmm5, %xmm5 {%k1}
vfmsub231ps %xmm7, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm7) - xmm2
vfnmsub231ps %xmm15, %xmm5, %xmm2 # xmm2 = -(xmm5 * xmm15) - xmm2
vmulps %xmm2, %xmm2, %xmm3
vmovaps %xmm9, %xmm8
vfmadd213ps %xmm10, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm10
vfmadd213ps %xmm11, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm11
vfmadd213ps %xmm12, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm12
vfmadd213ps %xmm13, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm13
vfmadd213ps %xmm4, %xmm2, %xmm8 # xmm8 = (xmm2 * xmm8) + xmm4
vfmadd213ps %xmm2, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm8) + xmm2
vaddps %xmm6, %xmm8, %xmm2
vcvttps2dq %xmm5, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm3, %xmm14, %xmm3
vfmadd213ps %xmm6, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm6
vrcpps %xmm3, %xmm2
vaddps %xmm2, %xmm2, %xmm5
vfmsub213ps %xmm31, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm3) - xmm31
vfnmadd213ps %xmm5, %xmm2, %xmm3 # xmm3 = -(xmm2 * xmm3) + xmm5
vfmsub231ps %xmm3, %xmm1, %xmm1 # xmm1 = (xmm1 * xmm3) - xmm1
jmp 0x1e052f
movq 0xe8(%rbp,%rdx), %rbx
vmaxps (%rbx){1to4}, %xmm1, %xmm1
vminps 0x4(%rbx){1to4}, %xmm1, %xmm1
jmp 0x1e052f
vxorps 0x236bea(%rip){1to4}, %xmm1, %xmm1 # 0x417054
vminps %xmm25, %xmm1, %xmm1
vmaxps %xmm26, %xmm1, %xmm1
vmovaps %xmm4, %xmm2
vfmadd231ps %xmm22, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm22) + xmm2
vcvttps2dq %xmm2, %xmm3
vcvtdq2ps %xmm3, %xmm3
vcmpltps %xmm3, %xmm2, %k1
vsubps %xmm6, %xmm3, %xmm3 {%k1}
vfmsub231ps %xmm7, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm7) - xmm1
vfmsub231ps 0x238944(%rip){1to4}, %xmm3, %xmm1 # xmm1 = (xmm3 * mem) - xmm1
vmulps %xmm1, %xmm1, %xmm2
vmovaps %xmm9, %xmm5
vfmadd213ps %xmm10, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm10
vfmadd213ps %xmm11, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm11
vfmadd213ps %xmm12, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm12
vfmadd213ps %xmm13, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm13
vfmadd213ps %xmm4, %xmm1, %xmm5 # xmm5 = (xmm1 * xmm5) + xmm4
vfmadd213ps %xmm1, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm5) + xmm1
vaddps %xmm6, %xmm5, %xmm2
vcvttps2dq %xmm3, %xmm1
vpslld $0x17, %xmm1, %xmm1
vpaddd %xmm1, %xmm14, %xmm1
vfmadd213ps %xmm6, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) + xmm6
vrcpps %xmm1, %xmm2
vfmsub213ps %xmm6, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm1) - xmm6
vfnmadd132ps %xmm2, %xmm2, %xmm1 # xmm1 = -(xmm1 * xmm2) + xmm2
jmp 0x1e052f
movq 0xe8(%rbp,%rdx), %rbx
vmaxps %xmm0, %xmm1, %xmm2
vminps %xmm0, %xmm1, %xmm1
vfmadd132ps (%rbx){1to4}, %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
jmp 0x1e052f
movq 0xe8(%rbp,%rdx), %rbx
vbroadcastss (%rbx), %xmm2
vfmadd213ps 0x4(%rbx){1to4}, %xmm1, %xmm2 # xmm2 = (xmm1 * xmm2) + mem
vmaxps %xmm0, %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm1
jmp 0x1e052f
movq 0x20(%rsp), %r13
movq (%r13), %rbx
movq %r10, %r14
shlq $0x4, %r14
vmovups %xmm1, (%rbx,%r14)
incq %r10
cmpq %rcx, %r10
jne 0x1e0083
cmpl $0x1, %r15d
je 0x1e0bae
jmp 0x1e1d3f
cmpl $0x8, %r15d
je 0x1e1702
movl $0x0, 0x14(%rsp)
cmpl $0x10, %r15d
jne 0x1e277c
movslq 0x2c(%r13), %rax
testq %rax, %rax
jle 0x1e277c
movq (%rbp), %rcx
movq -0x18(%rcx), %rcx
movl 0xe0(%rbp,%rcx), %edx
movq 0x178(%rbp,%rcx), %rsi
movl 0x88(%rsp), %edi
imull 0x9c(%rsp), %edi
vmovaps 0x23af8c(%rip), %zmm0 # 0x41b540
vmovaps 0x23afc2(%rip), %zmm1 # 0x41b580
vmovaps 0x23aff8(%rip), %zmm2 # 0x41b5c0
vmovaps 0x23b02e(%rip), %zmm3 # 0x41b600
vmovaps 0x23b1a4(%rip), %zmm4 # 0x41b780
vmovaps 0x23b55a(%rip), %zmm25 # 0x41bb40
vbroadcastss 0x236a64(%rip), %zmm6 # 0x417054
vxorps %zmm6, %zmm25, %zmm7
vmovaps 0x23b500(%rip), %zmm8 # 0x41bb00
vxorps %zmm6, %zmm8, %zmm9
vmovaps 0x23b030(%rip), %zmm10 # 0x41b640
vmovaps 0x23b066(%rip), %zmm11 # 0x41b680
vmovaps 0x23b09c(%rip), %zmm12 # 0x41b6c0
vmovaps 0x23b0d2(%rip), %zmm13 # 0x41b700
vmovaps 0x23b108(%rip), %zmm14 # 0x41b740
vmovdqa64 0x23b17e(%rip), %zmm15 # 0x41b7c0
vmovaps 0x23b434(%rip), %zmm26 # 0x41ba80
vmovaps 0x23b46a(%rip), %zmm27 # 0x41bac0
vxorps %zmm6, %zmm4, %zmm28
movl %edi, %r8d
andl $-0x8, %r8d
xorl %r9d, %r9d
decl %edx
leaq 0x23aca1(%rip), %r10 # 0x41b310
vxorps %xmm29, %xmm29, %xmm29
vbroadcastss 0x234699(%rip), %zmm30 # 0x414d18
vbroadcastss 0x23875b(%rip), %zmm31 # 0x418de4
vxorps %xmm16, %xmm16, %xmm16
testq %rsi, %rsi
je 0x1e06a2
movq %r9, %r11
shlq $0x6, %r11
vmovups (%rsi,%r11), %zmm16
movslq 0x3c(%rbp), %r11
imulq %r9, %r11
imulq 0x20(%rbp), %r11
addq 0x10(%rbp), %r11
movq 0x70(%rsp), %rbx
cmpl $0x8, %edi
jl 0x1e0775
vxorps %xmm17, %xmm17, %xmm17
movl $0x7, %ebp
vxorps %xmm20, %xmm20, %xmm20
vxorps %xmm19, %xmm19, %xmm19
vxorps %xmm22, %xmm22, %xmm22
vxorps %xmm21, %xmm21, %xmm21
vxorps %xmm23, %xmm23, %xmm23
vxorps %xmm18, %xmm18, %xmm18
vcvtph2ps (%r11), %zmm24
vfmadd231ps (%rbx){1to16}, %zmm24, %zmm16 # zmm16 = (zmm24 * mem) + zmm16
vcvtph2ps 0x20(%r11), %zmm24
vfmadd231ps 0x4(%rbx){1to16}, %zmm24, %zmm17 # zmm17 = (zmm24 * mem) + zmm17
vcvtph2ps 0x40(%r11), %zmm24
vfmadd231ps 0x8(%rbx){1to16}, %zmm24, %zmm20 # zmm20 = (zmm24 * mem) + zmm20
vcvtph2ps 0x60(%r11), %zmm24
vfmadd231ps 0xc(%rbx){1to16}, %zmm24, %zmm19 # zmm19 = (zmm24 * mem) + zmm19
vcvtph2ps 0x80(%r11), %zmm24
vfmadd231ps 0x10(%rbx){1to16}, %zmm24, %zmm22 # zmm22 = (zmm24 * mem) + zmm22
vcvtph2ps 0xa0(%r11), %zmm24
vfmadd231ps 0x14(%rbx){1to16}, %zmm24, %zmm21 # zmm21 = (zmm24 * mem) + zmm21
vcvtph2ps 0xc0(%r11), %zmm24
vfmadd231ps 0x18(%rbx){1to16}, %zmm24, %zmm23 # zmm23 = (zmm24 * mem) + zmm23
vcvtph2ps 0xe0(%r11), %zmm24
vfmadd231ps 0x1c(%rbx){1to16}, %zmm24, %zmm18 # zmm18 = (zmm24 * mem) + zmm18
addq $0x20, %rbx
addq $0x100, %r11 # imm = 0x100
addl $0x8, %ebp
cmpl %edi, %ebp
jl 0x1e06f0
movl %r8d, %r14d
jmp 0x1e07a2
xorl %r14d, %r14d
vxorps %xmm18, %xmm18, %xmm18
vxorps %xmm23, %xmm23, %xmm23
vxorps %xmm21, %xmm21, %xmm21
vxorps %xmm22, %xmm22, %xmm22
vxorps %xmm19, %xmm19, %xmm19
vxorps %xmm20, %xmm20, %xmm20
vxorps %xmm17, %xmm17, %xmm17
movl %r14d, %ebp
orl $0x3, %ebp
cmpl %edi, %ebp
jge 0x1e07fa
vcvtph2ps (%r11), %zmm24
vfmadd231ps (%rbx){1to16}, %zmm24, %zmm16 # zmm16 = (zmm24 * mem) + zmm16
vcvtph2ps 0x20(%r11), %zmm24
vfmadd231ps 0x4(%rbx){1to16}, %zmm24, %zmm17 # zmm17 = (zmm24 * mem) + zmm17
vcvtph2ps 0x40(%r11), %zmm24
vfmadd231ps 0x8(%rbx){1to16}, %zmm24, %zmm20 # zmm20 = (zmm24 * mem) + zmm20
vcvtph2ps 0x60(%r11), %zmm24
vfmadd231ps 0xc(%rbx){1to16}, %zmm24, %zmm19 # zmm19 = (zmm24 * mem) + zmm19
addq $0x10, %rbx
subq $-0x80, %r11
leal 0x4(%r14), %ebp
addl $0x7, %r14d
cmpl %edi, %r14d
movl %ebp, %r14d
jl 0x1e07ac
movl %edi, %r15d
subl %r14d, %r15d
jle 0x1e082a
xorl %r14d, %r14d
movq 0x18(%rsp), %rbp
vlddqu (%r11), %ymm5
vcvtph2ps %ymm5, %zmm5
vfmadd231ps (%rbx,%r14,4){1to16}, %zmm5, %zmm16 # zmm16 = (zmm5 * mem) + zmm16
addq $0x20, %r11
incq %r14
cmpl %r14d, %r15d
jne 0x1e080a
jmp 0x1e082f
movq 0x18(%rsp), %rbp
vaddps %zmm18, %zmm23, %zmm5
vaddps %zmm22, %zmm21, %zmm18
vaddps %zmm18, %zmm5, %zmm5
vaddps %zmm20, %zmm19, %zmm18
vaddps %zmm17, %zmm18, %zmm17
vaddps %zmm17, %zmm5, %zmm5
vaddps %zmm16, %zmm5, %zmm16
cmpl $0x5, %edx
ja 0x1e0b7f
movslq (%r10,%rdx,4), %r11
addq %r10, %r11
jmpq *%r11
vmaxps %zmm29, %zmm16, %zmm16
jmp 0x1e0b7f
vminps %zmm1, %zmm16, %zmm5
vmaxps %zmm2, %zmm5, %zmm5
vmovaps %zmm3, %zmm17
vfmadd213ps %zmm4, %zmm5, %zmm17 # zmm17 = (zmm5 * zmm17) + zmm4
vrndscaleps $0x1, %zmm17, %zmm18
vcmpltps %zmm18, %zmm17, %k1
vsubps %zmm0, %zmm18, %zmm18 {%k1}
vfmadd231ps %zmm7, %zmm18, %zmm5 # zmm5 = (zmm18 * zmm7) + zmm5
vfmadd231ps %zmm9, %zmm18, %zmm5 # zmm5 = (zmm18 * zmm9) + zmm5
vmulps %zmm5, %zmm5, %zmm17
vmovaps %zmm5, %zmm19
vfmadd213ps %zmm11, %zmm10, %zmm19 # zmm19 = (zmm10 * zmm19) + zmm11
vfmadd213ps %zmm12, %zmm5, %zmm19 # zmm19 = (zmm5 * zmm19) + zmm12
vfmadd213ps %zmm13, %zmm5, %zmm19 # zmm19 = (zmm5 * zmm19) + zmm13
vfmadd213ps %zmm14, %zmm5, %zmm19 # zmm19 = (zmm5 * zmm19) + zmm14
vfmadd213ps %zmm4, %zmm5, %zmm19 # zmm19 = (zmm5 * zmm19) + zmm4
vfmadd213ps %zmm5, %zmm17, %zmm19 # zmm19 = (zmm17 * zmm19) + zmm5
vaddps %zmm0, %zmm19, %zmm5
vcvttps2dq %zmm18, %zmm17
vpaddd %zmm15, %zmm17, %zmm17
vpslld $0x17, %zmm17, %zmm17
vfmadd213ps %zmm30, %zmm5, %zmm17 # zmm17 = (zmm5 * zmm17) + zmm30
vmaxps 0x23aef8(%rip), %zmm17, %zmm5 # 0x41b800
vpsrld $0x17, %zmm5, %zmm18
vpternlogd $0xec, 0x23af26(%rip), %zmm4, %zmm5 # 0x41b840
vcmpltps 0x23af5b(%rip), %zmm5, %k1 # 0x41b880
vsubps %zmm0, %zmm5, %zmm19
vaddps %zmm5, %zmm19, %zmm19 {%k1}
vmulps %zmm19, %zmm19, %zmm5
vmovaps %zmm19, %zmm20
vmovaps 0x23afb9(%rip), %zmm21 # 0x41b900
vfmadd132ps 0x23af6f(%rip), %zmm21, %zmm20 # zmm20 = (zmm20 * mem) + zmm21
vfmadd213ps 0x23afe5(%rip), %zmm19, %zmm20 # zmm20 = (zmm19 * zmm20) + mem
vfmadd213ps 0x23b01b(%rip), %zmm19, %zmm20 # zmm20 = (zmm19 * zmm20) + mem
vfmadd213ps 0x23b051(%rip), %zmm19, %zmm20 # zmm20 = (zmm19 * zmm20) + mem
vfmadd213ps 0x23b087(%rip), %zmm19, %zmm20 # zmm20 = (zmm19 * zmm20) + mem
vfmadd213ps 0x23b0bd(%rip), %zmm19, %zmm20 # zmm20 = (zmm19 * zmm20) + mem
vfmadd213ps %zmm26, %zmm19, %zmm20 # zmm20 = (zmm19 * zmm20) + zmm26
vfmadd213ps %zmm27, %zmm19, %zmm20 # zmm20 = (zmm19 * zmm20) + zmm27
vmulps %zmm19, %zmm5, %zmm21
vfmadd213ps %zmm19, %zmm20, %zmm21 # zmm21 = (zmm20 * zmm21) + zmm19
vcmpleps %zmm29, %zmm17, %k2
vpsubd %zmm15, %zmm18, %zmm17
vcvtdq2ps %zmm17, %zmm17
vaddps %zmm0, %zmm17, %zmm18
vmovaps %zmm17, %zmm18 {%k1}
vfmadd231ps %zmm8, %zmm18, %zmm21 # zmm21 = (zmm18 * zmm8) + zmm21
vfmadd231ps %zmm5, %zmm28, %zmm21 # zmm21 = (zmm28 * zmm5) + zmm21
vfmadd231ps %zmm18, %zmm25, %zmm21 # zmm21 = (zmm25 * zmm18) + zmm21
vmulps %zmm31, %zmm21, %zmm5
vbroadcastss 0x236d68(%rip), %zmm5 {%k2} # 0x417744
vminps %zmm1, %zmm5, %zmm5
vmaxps %zmm2, %zmm5, %zmm5
vmovaps %zmm3, %zmm17
vfmadd213ps %zmm4, %zmm5, %zmm17 # zmm17 = (zmm5 * zmm17) + zmm4
vrndscaleps $0x1, %zmm17, %zmm18
vcmpltps %zmm18, %zmm17, %k1
vsubps %zmm0, %zmm18, %zmm18 {%k1}
vfmadd231ps %zmm7, %zmm18, %zmm5 # zmm5 = (zmm18 * zmm7) + zmm5
vfmadd231ps %zmm9, %zmm18, %zmm5 # zmm5 = (zmm18 * zmm9) + zmm5
vmulps %zmm5, %zmm5, %zmm17
vmovaps %zmm5, %zmm19
vfmadd213ps %zmm11, %zmm10, %zmm19 # zmm19 = (zmm10 * zmm19) + zmm11
vfmadd213ps %zmm12, %zmm5, %zmm19 # zmm19 = (zmm5 * zmm19) + zmm12
vfmadd213ps %zmm13, %zmm5, %zmm19 # zmm19 = (zmm5 * zmm19) + zmm13
vfmadd213ps %zmm14, %zmm5, %zmm19 # zmm19 = (zmm5 * zmm19) + zmm14
vfmadd213ps %zmm4, %zmm5, %zmm19 # zmm19 = (zmm5 * zmm19) + zmm4
vfmadd213ps %zmm5, %zmm17, %zmm19 # zmm19 = (zmm17 * zmm19) + zmm5
vaddps %zmm0, %zmm19, %zmm5
vcvttps2dq %zmm18, %zmm17
vpaddd %zmm15, %zmm17, %zmm17
vpslld $0x17, %zmm17, %zmm17
vfmadd213ps %zmm30, %zmm5, %zmm17 # zmm17 = (zmm5 * zmm17) + zmm30
vrcp14ps %zmm17, %zmm5
vfmsub213ps %zmm30, %zmm5, %zmm17 # zmm17 = (zmm5 * zmm17) - zmm30
vfnmadd132ps %zmm5, %zmm5, %zmm17 # zmm17 = -(zmm17 * zmm5) + zmm5
vfnmsub213ps %zmm30, %zmm31, %zmm17 # zmm17 = -(zmm31 * zmm17) - zmm30
vmulps %zmm16, %zmm17, %zmm16
jmp 0x1e0b7f
movq 0xe8(%rbp,%rcx), %r11
vmaxps (%r11){1to16}, %zmm16, %zmm5
vminps 0x4(%r11){1to16}, %zmm5, %zmm16
jmp 0x1e0b7f
vxorps %zmm6, %zmm16, %zmm5
vminps %zmm1, %zmm5, %zmm5
vmaxps %zmm2, %zmm5, %zmm5
vmovaps %zmm3, %zmm16
vfmadd213ps %zmm4, %zmm5, %zmm16 # zmm16 = (zmm5 * zmm16) + zmm4
vrndscaleps $0x1, %zmm16, %zmm17
vcmpltps %zmm17, %zmm16, %k1
vsubps %zmm0, %zmm17, %zmm17 {%k1}
vfmadd231ps %zmm7, %zmm17, %zmm5 # zmm5 = (zmm17 * zmm7) + zmm5
vfmadd231ps %zmm9, %zmm17, %zmm5 # zmm5 = (zmm17 * zmm9) + zmm5
vmulps %zmm5, %zmm5, %zmm16
vmovaps %zmm5, %zmm18
vfmadd213ps %zmm11, %zmm10, %zmm18 # zmm18 = (zmm10 * zmm18) + zmm11
vfmadd213ps %zmm12, %zmm5, %zmm18 # zmm18 = (zmm5 * zmm18) + zmm12
vfmadd213ps %zmm13, %zmm5, %zmm18 # zmm18 = (zmm5 * zmm18) + zmm13
vfmadd213ps %zmm14, %zmm5, %zmm18 # zmm18 = (zmm5 * zmm18) + zmm14
vfmadd213ps %zmm4, %zmm5, %zmm18 # zmm18 = (zmm5 * zmm18) + zmm4
vfmadd213ps %zmm5, %zmm16, %zmm18 # zmm18 = (zmm16 * zmm18) + zmm5
vaddps %zmm0, %zmm18, %zmm5
vcvttps2dq %zmm17, %zmm16
vpaddd %zmm15, %zmm16, %zmm16
vpslld $0x17, %zmm16, %zmm16
vfmadd213ps %zmm30, %zmm5, %zmm16 # zmm16 = (zmm5 * zmm16) + zmm30
vrcp14ps %zmm16, %zmm5
vfmsub213ps %zmm30, %zmm5, %zmm16 # zmm16 = (zmm5 * zmm16) - zmm30
vfnmadd132ps %zmm5, %zmm5, %zmm16 # zmm16 = -(zmm16 * zmm5) + zmm5
jmp 0x1e0b7f
movq 0xe8(%rbp,%rcx), %r11
vcmpltps %zmm29, %zmm16, %k1
vmulps (%r11){1to16}, %zmm16, %zmm16 {%k1}
jmp 0x1e0b7f
movq 0xe8(%rbp,%rcx), %r11
vbroadcastss (%r11), %zmm5
vfmadd213ps 0x4(%r11){1to16}, %zmm16, %zmm5 # zmm5 = (zmm16 * zmm5) + mem
vmaxps %zmm29, %zmm5, %zmm5
vminps %zmm30, %zmm5, %zmm5
vmulps %zmm16, %zmm5, %zmm16
movq (%r13), %r11
movq %r9, %rbx
shlq $0x6, %rbx
vmovups %zmm16, (%r11,%rbx)
incq %r9
cmpq %rax, %r9
jne 0x1e0689
jmp 0x1e1d3f
movl $0x0, 0x14(%rsp)
movq (%rbp), %rax
movq -0x18(%rax), %rax
movl 0xe0(%rbp,%rax), %ecx
movq %rcx, 0x40(%rsp)
movq %rax, 0x28(%rsp)
movq 0x178(%rbp,%rax), %r12
movl 0x88(%rsp), %edi
imull 0x9c(%rsp), %edi
movslq 0x2c(%r13), %rcx
movq %rcx, %rax
movq %rcx, 0x38(%rsp)
sarl $0x3, %eax
testl %eax, %eax
movq %r12, 0x30(%rsp)
movl %edi, 0x10(%rsp)
jle 0x1e16ce
movl %edi, %ecx
andl $-0x8, %ecx
movl %ecx, 0x6c(%rsp)
movl %eax, %r14d
movl $0x1, %edx
movl $0x2, %esi
movl $0x3, %r8d
movl $0x4, %r10d
movl $0x5, %r11d
movl $0x6, %ebx
movl $0x7, %r13d
xorl %r15d, %r15d
xorl %r9d, %r9d
movq %r14, 0x1f0(%rsp)
movq %r13, 0x48(%rsp)
movq %r9, 0x120(%rsp)
leaq (,%r9,8), %rax
vxorps %xmm9, %xmm9, %xmm9
testq %r12, %r12
je 0x1e0c60
vmovups (%r12,%rax,4), %ymm9
movslq 0x3c(%rbp), %r9
movq 0x10(%rbp), %r13
movq 0x20(%rbp), %r12
movq 0x70(%rsp), %rbp
cmpl $0x8, %edi
movq %rdx, 0x158(%rsp)
movq %rsi, 0x150(%rsp)
movq %r8, 0x148(%rsp)
movq %r10, 0x140(%rsp)
movq %r11, 0x138(%rsp)
movq %rbx, 0x130(%rsp)
movq %r15, 0x128(%rsp)
movq %rax, 0x1f8(%rsp)
jl 0x1e0e95
movq %r12, %rax
imulq %r15, %rax
imulq %r9, %rax
movq %rax, 0xe0(%rsp)
leaq (%rax,%r13), %rcx
movq %r12, %rax
imulq %rdx, %rax
imulq %r9, %rax
movq %rax, 0xd0(%rsp)
leaq (%rax,%r13), %rdx
movq %r12, %rax
imulq %rsi, %rax
imulq %r9, %rax
movq %rax, 0xc0(%rsp)
leaq (%rax,%r13), %rsi
movq %r12, %r14
imulq %r8, %r14
imulq %r9, %r14
movl %edi, %eax
movq %r14, 0x60(%rsp)
leaq (%r14,%r13), %rdi
movq %r12, %r8
imulq %r10, %r8
imulq %r9, %r8
movq %r8, 0x58(%rsp)
addq %r13, %r8
movq %r12, %r10
imulq %r11, %r10
imulq %r9, %r10
movq %r10, 0x50(%rsp)
addq %r13, %r10
movq %r12, %r15
imulq %rbx, %r15
imulq %r9, %r15
leaq (%r15,%r13), %r11
imulq 0x48(%rsp), %r12
imulq %r9, %r12
movq %r13, 0x170(%rsp)
leaq (%r12,%r13), %rbx
vxorps %xmm10, %xmm10, %xmm10
vxorps %xmm11, %xmm11, %xmm11
vxorps %xmm13, %xmm13, %xmm13
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm2, %xmm2, %xmm2
vpxor %xmm14, %xmm14, %xmm14
movl $0x7, %r14d
xorl %r13d, %r13d
movq %rbp, %r9
vmovups (%rbp,%r13,2), %ymm0
vmovaps %ymm1, %ymm16
vlddqu (%rcx,%r13), %xmm1
vcvtph2ps %xmm1, %ymm1
vmovaps %ymm2, %ymm17
vlddqu (%rdx,%r13), %xmm2
vcvtph2ps %xmm2, %ymm2
vlddqu (%rsi,%r13), %xmm3
vcvtph2ps %xmm3, %ymm3
vlddqu (%rdi,%r13), %xmm4
vcvtph2ps %xmm4, %ymm4
vlddqu (%r8,%r13), %xmm5
vcvtph2ps %xmm5, %ymm5
vlddqu (%r10,%r13), %xmm6
vcvtph2ps %xmm6, %ymm6
vlddqu (%r11,%r13), %xmm7
vcvtph2ps %xmm7, %ymm7
vlddqu (%rbx,%r13), %xmm8
vcvtph2ps %xmm8, %ymm8
vfmadd231ps %ymm1, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm1) + ymm14
vmovaps %ymm16, %ymm1
vfmadd231ps %ymm2, %ymm0, %ymm17 # ymm17 = (ymm0 * ymm2) + ymm17
vmovaps %ymm17, %ymm2
vfmadd231ps %ymm3, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm3) + ymm15
vfmadd231ps %ymm4, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm4) + ymm1
vfmadd231ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm5) + ymm12
vfmadd231ps %ymm6, %ymm0, %ymm13 # ymm13 = (ymm0 * ymm6) + ymm13
vfmadd231ps %ymm7, %ymm0, %ymm11 # ymm11 = (ymm0 * ymm7) + ymm11
vfmadd231ps %ymm8, %ymm0, %ymm10 # ymm10 = (ymm0 * ymm8) + ymm10
addq $0x20, %r9
addq $0x10, %r13
addl $0x8, %r14d
cmpl %eax, %r14d
jl 0x1e0d92
movq 0x170(%rsp), %rcx
addq %r13, %rcx
addq %rcx, 0xe0(%rsp)
addq %rcx, 0xd0(%rsp)
addq %rcx, 0xc0(%rsp)
addq %rcx, 0x60(%rsp)
addq %rcx, 0x58(%rsp)
addq %rcx, 0x50(%rsp)
addq %rcx, %r15
addq %rcx, %r12
movl 0x6c(%rsp), %ecx
movq %r9, %rbp
movl %eax, %edi
movq 0x1f0(%rsp), %r14
jmp 0x1e0f54
movq %rax, %rdx
orq $0x1, %rdx
movq %rax, %rsi
orq $0x2, %rsi
movq %rax, %r10
orq $0x3, %r10
movq %rax, %r11
orq $0x4, %r11
movq %rax, %rbx
orq $0x5, %rbx
movq %rax, %r15
orq $0x6, %r15
movq %rax, %rcx
orq $0x7, %rcx
imulq %r9, %r12
movq %r12, %r8
imulq %rax, %r8
addq %r13, %r8
movq %r8, 0xe0(%rsp)
imulq %r12, %rdx
addq %r13, %rdx
movq %rdx, 0xd0(%rsp)
imulq %r12, %rsi
addq %r13, %rsi
movq %rsi, 0xc0(%rsp)
imulq %r12, %r10
addq %r13, %r10
movq %r10, 0x60(%rsp)
imulq %r12, %r11
addq %r13, %r11
movq %r11, 0x58(%rsp)
imulq %r12, %rbx
addq %r13, %rbx
movq %rbx, 0x50(%rsp)
imulq %r12, %r15
addq %r13, %r15
imulq %rcx, %r12
addq %r13, %r12
xorl %ecx, %ecx
vpxor %xmm14, %xmm14, %xmm14
vxorps %xmm2, %xmm2, %xmm2
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm13, %xmm13, %xmm13
vxorps %xmm11, %xmm11, %xmm11
vxorps %xmm10, %xmm10, %xmm10
vmovups %ymm2, 0x2a0(%rsp)
vmovups %ymm1, 0x2c0(%rsp)
vmovups %ymm15, 0x2e0(%rsp)
vmovups %ymm14, 0x300(%rsp)
vmovups %ymm13, 0x320(%rsp)
vmovups %ymm12, 0x340(%rsp)
vmovups %ymm11, 0x360(%rsp)
vmovups %ymm10, 0x380(%rsp)
movl %edi, %r13d
subl %ecx, %r13d
jle 0x1e11a7
xorl %ebx, %ebx
vmovups %ymm9, 0x170(%rsp)
vmovss (%rbp,%rbx,4), %xmm0
vmovaps %xmm0, 0x110(%rsp)
movq 0xe0(%rsp), %rax
movzwl (%rax,%rbx,2), %edi
vzeroupper
callq 0x5c068
vmovaps %xmm0, 0x200(%rsp)
vmovss (%rbp,%rbx,4), %xmm0
vmovaps %xmm0, 0x100(%rsp)
movq 0xd0(%rsp), %rax
movzwl (%rax,%rbx,2), %edi
callq 0x5c068
vmovaps %xmm0, 0xf0(%rsp)
vmovss (%rbp,%rbx,4), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
movq 0xc0(%rsp), %rax
movzwl (%rax,%rbx,2), %edi
callq 0x5c068
vmovaps %xmm0, 0x1d0(%rsp)
vmovss (%rbp,%rbx,4), %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
movq 0x60(%rsp), %rax
movzwl (%rax,%rbx,2), %edi
callq 0x5c068
vmovaps %xmm0, 0x1b0(%rsp)
vmovss (%rbp,%rbx,4), %xmm0
vmovaps %xmm0, 0x160(%rsp)
movq 0x58(%rsp), %rax
movzwl (%rax,%rbx,2), %edi
callq 0x5c068
vmovaps %xmm0, 0x1a0(%rsp)
vmovss (%rbp,%rbx,4), %xmm0
vmovaps %xmm0, 0x280(%rsp)
movq 0x50(%rsp), %rax
movzwl (%rax,%rbx,2), %edi
callq 0x5c068
vmovaps %xmm0, 0x260(%rsp)
vmovss (%rbp,%rbx,4), %xmm0
vmovaps %xmm0, 0x240(%rsp)
movzwl (%r15,%rbx,2), %edi
callq 0x5c068
vmovaps %xmm0, 0x220(%rsp)
vmovss (%rbp,%rbx,4), %xmm0
vmovaps %xmm0, 0x190(%rsp)
movzwl (%r12,%rbx,2), %edi
callq 0x5c068
vmovaps 0x1a0(%rsp), %xmm1
vinsertps $0x10, 0x260(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x220(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,1,2],xmm0[0]
vmovaps 0x200(%rsp), %xmm1
vinsertps $0x10, 0xf0(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x1d0(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, 0x1b0(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
vinsertf128 $0x1, %xmm0, %ymm1, %ymm0
vmovaps 0x160(%rsp), %xmm1
vinsertps $0x10, 0x280(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x240(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, 0x190(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
vmovaps 0x110(%rsp), %xmm2
vinsertps $0x10, 0x100(%rsp), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x1e0(%rsp), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vinsertps $0x30, 0x1c0(%rsp), %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],mem[0]
vinsertf128 $0x1, %xmm1, %ymm2, %ymm1
vmovups 0x170(%rsp), %ymm9
vfmadd231ps %ymm1, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm1) + ymm9
incq %rbx
cmpl %ebx, %r13d
jne 0x1e0faa
vmovups 0x300(%rsp), %ymm0
vhaddps 0x2a0(%rsp), %ymm0, %ymm0
vmovups 0x2e0(%rsp), %ymm1
vhaddps 0x2c0(%rsp), %ymm1, %ymm1
vhaddps %ymm1, %ymm0, %ymm0
vmovups 0x340(%rsp), %ymm1
vhaddps 0x320(%rsp), %ymm1, %ymm1
vmovups 0x360(%rsp), %ymm2
vhaddps 0x380(%rsp), %ymm2, %ymm2
vhaddps %ymm2, %ymm1, %ymm1
vblendps $0xf0, %ymm1, %ymm0, %ymm2 # ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
vperm2f128 $0x21, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm1[0,1]
vaddps %ymm0, %ymm9, %ymm0
vaddps %ymm2, %ymm0, %ymm0
movq 0x40(%rsp), %rax
decl %eax
cmpl $0x5, %eax
ja 0x1e1671
leaq 0x23a0a6(%rip), %rcx # 0x41b2c8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movq 0x20(%rsp), %rcx
movq 0x18(%rsp), %rbp
movq 0x30(%rsp), %r12
movq 0x158(%rsp), %rdx
movq 0x150(%rsp), %rsi
movq 0x148(%rsp), %r8
movq 0x140(%rsp), %r10
movq 0x138(%rsp), %r11
movq 0x130(%rsp), %rbx
movq 0x48(%rsp), %r13
movq 0x128(%rsp), %r15
movq 0x120(%rsp), %r9
jmpq *%rax
vmaxps 0x238099(%rip), %ymm0, %ymm0 # 0x419320
jmp 0x1e162f
vbroadcastss 0x235dc3(%rip), %ymm6 # 0x417058
vminps %ymm6, %ymm0, %ymm1
vbroadcastss 0x235dba(%rip), %ymm7 # 0x41705c
vmaxps %ymm7, %ymm1, %ymm1
vbroadcastss 0x235db1(%rip), %ymm9 # 0x417060
vmovaps %ymm9, %ymm2
vbroadcastss 0x2333d4(%rip), %ymm8 # 0x414690
vfmadd213ps %ymm8, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm8
vroundps $0x1, %ymm2, %ymm3
vcmpltps %ymm3, %ymm2, %k1
vbroadcastss 0x233a41(%rip), %ymm10 # 0x414d18
vsubps %ymm10, %ymm3, %ymm3 {%k1}
vbroadcastss 0x235d7e(%rip), %ymm11 # 0x417064
vfmsub231ps %ymm11, %ymm3, %ymm1 # ymm1 = (ymm3 * ymm11) - ymm1
vbroadcastss 0x235d73(%rip), %ymm18 # 0x417068
vfnmsub231ps %ymm18, %ymm3, %ymm1 # ymm1 = -(ymm3 * ymm18) - ymm1
vmulps %ymm1, %ymm1, %ymm2
vbroadcastss 0x235d64(%rip), %ymm12 # 0x41706c
vmovaps %ymm12, %ymm4
vbroadcastss 0x235d5b(%rip), %ymm13 # 0x417070
vfmadd213ps %ymm13, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm13
vbroadcastss 0x235d51(%rip), %ymm14 # 0x417074
vfmadd213ps %ymm14, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm14
vbroadcastss 0x235d47(%rip), %ymm15 # 0x417078
vfmadd213ps %ymm15, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm15
vbroadcastss 0x235d3c(%rip), %ymm16 # 0x41707c
vfmadd213ps %ymm16, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm16
vfmadd213ps %ymm8, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm8
vfmadd213ps %ymm1, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) + ymm1
vaddps %ymm4, %ymm10, %ymm1
vcvttps2dq %ymm3, %ymm2
vpslld $0x17, %ymm2, %ymm2
vpbroadcastd 0x2339b1(%rip), %ymm17 # 0x414d18
vpaddd %ymm17, %ymm2, %ymm2
vfmadd213ps %ymm10, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm10
vcmpleps 0x237fa3(%rip), %ymm2, %k1 # 0x419320
vmaxps 0x235cf9(%rip){1to8}, %ymm2, %ymm1 # 0x417080
vpsrld $0x17, %ymm1, %ymm2
vpbroadcastd 0x235cef(%rip), %ymm3 # 0x417084
vpternlogd $0xea, 0x2332f0(%rip){1to8}, %ymm3, %ymm1 # 0x414690
vcmpltps 0x235ce1(%rip){1to8}, %ymm1, %k2 # 0x41708c
vbroadcastss 0x235cdb(%rip), %ymm19 # 0x417090
vaddps %ymm19, %ymm1, %ymm3
vaddps %ymm1, %ymm3, %ymm3 {%k2}
vmulps %ymm3, %ymm3, %ymm1
vbroadcastss 0x235cc6(%rip), %ymm4 # 0x417094
vfmadd213ps 0x235cc0(%rip){1to8}, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + mem
vfmadd213ps 0x235cba(%rip){1to8}, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + mem
vfmadd213ps 0x235cb4(%rip){1to8}, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + mem
vfmadd213ps 0x235cae(%rip){1to8}, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + mem
vfmadd213ps 0x235ca8(%rip){1to8}, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + mem
vfmadd213ps 0x235ca2(%rip){1to8}, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + mem
vfmadd213ps 0x235c9c(%rip){1to8}, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + mem
vfmadd213ps 0x235c96(%rip){1to8}, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm4) + mem
vmulps %ymm3, %ymm1, %ymm5
vmulps %ymm4, %ymm5, %ymm4
vpaddd 0x235c58(%rip){1to8}, %ymm2, %ymm2 # 0x417088
vcvtdq2ps %ymm2, %ymm2
vsubps %ymm10, %ymm2, %ymm2 {%k2}
vfmadd231ps %ymm18, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm18) + ymm4
vfmsub231ps %ymm1, %ymm8, %ymm4 # ymm4 = (ymm8 * ymm1) - ymm4
vsubps %ymm3, %ymm4, %ymm1
vfmsub231ps %ymm2, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm2) - ymm1
vbroadcastss 0x23798d(%rip), %ymm5 # 0x418de4
vmulps %ymm5, %ymm1, %ymm1
vbroadcastss 0x2362df(%rip), %ymm1 {%k1} # 0x417744
vminps %ymm6, %ymm1, %ymm1
vmaxps %ymm7, %ymm1, %ymm1
vmovaps %ymm9, %ymm2
vfmadd213ps %ymm8, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm8
vroundps $0x1, %ymm2, %ymm3
vcmpltps %ymm3, %ymm2, %k1
vsubps %ymm10, %ymm3, %ymm3 {%k1}
vfmsub231ps %ymm11, %ymm3, %ymm1 # ymm1 = (ymm3 * ymm11) - ymm1
vfnmsub231ps %ymm18, %ymm3, %ymm1 # ymm1 = -(ymm3 * ymm18) - ymm1
vmulps %ymm1, %ymm1, %ymm2
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm13, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm13
vfmadd213ps %ymm14, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm14
vfmadd213ps %ymm15, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm15
vfmadd213ps %ymm16, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm16
vfmadd213ps %ymm8, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm8
vfmadd213ps %ymm1, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm4) + ymm1
vaddps %ymm4, %ymm10, %ymm1
vcvttps2dq %ymm3, %ymm2
vpslld $0x17, %ymm2, %ymm2
vpaddd %ymm17, %ymm2, %ymm2
vfmadd213ps %ymm10, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + ymm10
vrcpps %ymm2, %ymm1
vfmsub213ps %ymm10, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) - ymm10
vfnmadd132ps %ymm1, %ymm1, %ymm2 # ymm2 = -(ymm2 * ymm1) + ymm1
vfnmadd213ps %ymm19, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm19
vmulps %ymm0, %ymm2, %ymm0
jmp 0x1e162f
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vmaxps (%rax){1to8}, %ymm0, %ymm0
vminps 0x4(%rax){1to8}, %ymm0, %ymm0
jmp 0x1e162f
vxorps 0x235b3b(%rip){1to8}, %ymm0, %ymm0 # 0x417054
vminps 0x235b35(%rip){1to8}, %ymm0, %ymm0 # 0x417058
vmaxps 0x235b2f(%rip){1to8}, %ymm0, %ymm0 # 0x41705c
vbroadcastss 0x23315a(%rip), %ymm4 # 0x414690
vmovaps %ymm4, %ymm1
vfmadd231ps 0x235b1c(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm0 * mem) + ymm1
vroundps $0x1, %ymm1, %ymm2
vcmpltps %ymm2, %ymm1, %k1
vbroadcastss 0x2337be(%rip), %ymm5 # 0x414d18
vsubps %ymm5, %ymm2, %ymm2 {%k1}
vfmsub231ps 0x235afa(%rip){1to8}, %ymm2, %ymm0 # ymm0 = (ymm2 * mem) - ymm0
vfmsub231ps 0x237874(%rip){1to8}, %ymm2, %ymm0 # ymm0 = (ymm2 * mem) - ymm0
vmulps %ymm0, %ymm0, %ymm1
vbroadcastss 0x235aeb(%rip), %ymm3 # 0x41706c
vfmadd213ps 0x235ae5(%rip){1to8}, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem
vfmadd213ps 0x235adf(%rip){1to8}, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem
vfmadd213ps 0x235ad9(%rip){1to8}, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem
vfmadd213ps 0x235ad3(%rip){1to8}, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem
vfmadd213ps %ymm4, %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + ymm4
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vaddps %ymm5, %ymm3, %ymm1
vcvttps2dq %ymm2, %ymm0
vpslld $0x17, %ymm0, %ymm0
vpaddd 0x23374e(%rip){1to8}, %ymm0, %ymm0 # 0x414d18
vfmadd213ps %ymm5, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm5
vrcpps %ymm0, %ymm1
vfmsub213ps %ymm5, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) - ymm5
vfnmadd132ps %ymm1, %ymm1, %ymm0 # ymm0 = -(ymm0 * ymm1) + ymm1
jmp 0x1e162f
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vxorps %xmm2, %xmm2, %xmm2
vmaxps %ymm2, %ymm0, %ymm1
vminps %ymm2, %ymm0, %ymm0
vfmadd132ps (%rax){1to8}, %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
jmp 0x1e162f
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vbroadcastss (%rax), %ymm1
vfmadd213ps 0x4(%rax){1to8}, %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem
vmaxps 0x237cff(%rip), %ymm1, %ymm1 # 0x419320
vminps 0x2336ed(%rip){1to8}, %ymm1, %ymm1 # 0x414d18
vmulps %ymm0, %ymm1, %ymm0
movl 0x10(%rsp), %edi
movq (%rcx), %rax
movq 0x1f8(%rsp), %rcx
vmovups %ymm0, (%rax,%rcx,4)
incq %r9
addq $0x8, %r15
addq $0x8, %rdx
addq $0x8, %rsi
addq $0x8, %r8
addq $0x8, %r10
addq $0x8, %r11
addq $0x8, %rbx
addq $0x8, %r13
cmpq %r14, %r9
jne 0x1e0c3b
jmp 0x1e16ce
movq 0x20(%rsp), %rcx
movq 0x18(%rsp), %rbp
movq 0x30(%rsp), %r12
movl 0x10(%rsp), %edi
movq 0x158(%rsp), %rdx
movq 0x150(%rsp), %rsi
movq 0x148(%rsp), %r8
movq 0x140(%rsp), %r10
movq 0x138(%rsp), %r11
movq 0x130(%rsp), %rbx
movq 0x48(%rsp), %r13
movq 0x128(%rsp), %r15
movq 0x120(%rsp), %r9
jmp 0x1e1633
movq 0x38(%rsp), %rax
movq %rax, %rsi
andq $-0x8, %rsi
shrl $0x2, %eax
andl $0x1, %eax
je 0x1e249e
testq %r12, %r12
movq %rax, 0x1a0(%rsp)
je 0x1e1d4c
vmovups (%r12,%rsi,4), %xmm7
jmp 0x1e1d50
movl $0x0, 0x14(%rsp)
movslq 0x2c(%r13), %rcx
testq %rcx, %rcx
jle 0x1e277c
movq (%rbp), %rax
movq -0x18(%rax), %rdx
movl 0xe0(%rbp,%rdx), %esi
movq 0x178(%rbp,%rdx), %rdi
movl 0x88(%rsp), %r8d
imull 0x9c(%rsp), %r8d
movl %r8d, %r9d
andl $-0x8, %r9d
xorl %r10d, %r10d
decl %esi
leaq 0x239b46(%rip), %r11 # 0x41b298
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x2358f8(%rip), %ymm24 # 0x417058
vbroadcastss 0x2358f3(%rip), %ymm3 # 0x41705c
vbroadcastss 0x232f1e(%rip), %ymm4 # 0x414690
vbroadcastss 0x2358e5(%rip), %ymm5 # 0x417060
vbroadcastss 0x233594(%rip), %ymm6 # 0x414d18
vbroadcastss 0x2358d7(%rip), %ymm7 # 0x417064
vbroadcastss 0x2358d6(%rip), %ymm9 # 0x41706c
vbroadcastss 0x2358d1(%rip), %ymm10 # 0x417070
vbroadcastss 0x2358cc(%rip), %ymm11 # 0x417074
vbroadcastss 0x2358c7(%rip), %ymm12 # 0x417078
vbroadcastss 0x2358c2(%rip), %ymm13 # 0x41707c
vpbroadcastd 0x233555(%rip), %ymm14 # 0x414d18
vbroadcastss 0x23589c(%rip), %ymm15 # 0x417068
vbroadcastss 0x2358ba(%rip), %ymm21 # 0x417090
vbroadcastss 0x2358b4(%rip), %ymm22 # 0x417094
vbroadcastss 0x2358ba(%rip), %ymm26 # 0x4170a4
vbroadcastss 0x2358b4(%rip), %ymm27 # 0x4170a8
vbroadcastss 0x2358ae(%rip), %ymm28 # 0x4170ac
vbroadcastss 0x2358a8(%rip), %ymm29 # 0x4170b0
vbroadcastss 0x2358a2(%rip), %ymm30 # 0x4170b4
vbroadcastss 0x2375c8(%rip), %ymm31 # 0x418de4
vxorps %xmm1, %xmm1, %xmm1
testq %rdi, %rdi
je 0x1e1831
movq %r10, %rbx
shlq $0x5, %rbx
vmovups (%rdi,%rbx), %ymm1
movslq 0x3c(%rbp), %rbx
imulq %r10, %rbx
imulq 0x20(%rbp), %rbx
addq 0x10(%rbp), %rbx
movq 0x70(%rsp), %r14
cmpl $0x8, %r8d
jl 0x1e1926
vxorps %xmm8, %xmm8, %xmm8
movl $0x7, %ebp
vxorps %xmm18, %xmm18, %xmm18
vxorps %xmm17, %xmm17, %xmm17
vxorps %xmm20, %xmm20, %xmm20
vxorps %xmm19, %xmm19, %xmm19
vxorps %xmm23, %xmm23, %xmm23
vxorps %xmm16, %xmm16, %xmm16
vlddqu (%rbx), %ymm2
vcvtph2ps %xmm2, %ymm25
vfmadd231ps (%r14){1to8}, %ymm25, %ymm1 # ymm1 = (ymm25 * mem) + ymm1
vextractf128 $0x1, %ymm2, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps 0x4(%r14){1to8}, %ymm2, %ymm8 # ymm8 = (ymm2 * mem) + ymm8
vlddqu 0x20(%rbx), %ymm2
vcvtph2ps %xmm2, %ymm25
vfmadd231ps 0x8(%r14){1to8}, %ymm25, %ymm18 # ymm18 = (ymm25 * mem) + ymm18
vextractf128 $0x1, %ymm2, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps 0xc(%r14){1to8}, %ymm2, %ymm17 # ymm17 = (ymm2 * mem) + ymm17
vlddqu 0x40(%rbx), %ymm2
vcvtph2ps %xmm2, %ymm25
vfmadd231ps 0x10(%r14){1to8}, %ymm25, %ymm20 # ymm20 = (ymm25 * mem) + ymm20
vextractf128 $0x1, %ymm2, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps 0x14(%r14){1to8}, %ymm2, %ymm19 # ymm19 = (ymm2 * mem) + ymm19
vlddqu 0x60(%rbx), %ymm2
vcvtph2ps %xmm2, %ymm25
vfmadd231ps 0x18(%r14){1to8}, %ymm25, %ymm23 # ymm23 = (ymm25 * mem) + ymm23
vextractf128 $0x1, %ymm2, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps 0x1c(%r14){1to8}, %ymm2, %ymm16 # ymm16 = (ymm2 * mem) + ymm16
addq $0x20, %r14
subq $-0x80, %rbx
addl $0x8, %ebp
cmpl %r8d, %ebp
jl 0x1e187f
movl %r9d, %ebp
jmp 0x1e1951
xorl %ebp, %ebp
vxorps %xmm16, %xmm16, %xmm16
vxorps %xmm23, %xmm23, %xmm23
vxorps %xmm19, %xmm19, %xmm19
vxorps %xmm20, %xmm20, %xmm20
vxorps %xmm17, %xmm17, %xmm17
vxorps %xmm18, %xmm18, %xmm18
vxorps %xmm8, %xmm8, %xmm8
movl %ebp, %r12d
orl $0x3, %r12d
cmpl %r8d, %r12d
jge 0x1e19bc
movl %ebp, %r12d
vlddqu (%rbx), %ymm2
vcvtph2ps %xmm2, %ymm25
vfmadd231ps (%r14){1to8}, %ymm25, %ymm1 # ymm1 = (ymm25 * mem) + ymm1
vextractf128 $0x1, %ymm2, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps 0x4(%r14){1to8}, %ymm2, %ymm8 # ymm8 = (ymm2 * mem) + ymm8
vlddqu 0x20(%rbx), %ymm2
vcvtph2ps %xmm2, %ymm25
vfmadd231ps 0x8(%r14){1to8}, %ymm25, %ymm18 # ymm18 = (ymm25 * mem) + ymm18
vextractf128 $0x1, %ymm2, %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps 0xc(%r14){1to8}, %ymm2, %ymm17 # ymm17 = (ymm2 * mem) + ymm17
addq $0x10, %r14
addq $0x40, %rbx
leal 0x4(%r12), %ebp
addl $0x7, %r12d
cmpl %r8d, %r12d
jl 0x1e195d
movl %r8d, %r12d
subl %ebp, %r12d
jle 0x1e19ea
xorl %r13d, %r13d
movq 0x18(%rsp), %rbp
vlddqu (%rbx), %xmm2
vcvtph2ps %xmm2, %ymm2
vfmadd231ps (%r14,%r13,4){1to8}, %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
addq $0x10, %rbx
incq %r13
cmpl %r13d, %r12d
jne 0x1e19cc
jmp 0x1e19ef
movq 0x18(%rsp), %rbp
vaddps %ymm16, %ymm23, %ymm2
vaddps %ymm20, %ymm19, %ymm16
vaddps %ymm16, %ymm2, %ymm2
vaddps %ymm18, %ymm17, %ymm16
vaddps %ymm8, %ymm16, %ymm8
vaddps %ymm2, %ymm8, %ymm2
vaddps %ymm1, %ymm2, %ymm1
cmpl $0x5, %esi
ja 0x1e1d09
movslq (%r11,%rsi,4), %rbx
addq %r11, %rbx
movq 0x20(%rsp), %r13
jmpq *%rbx
vmaxps %ymm0, %ymm1, %ymm1
jmp 0x1e1d0e
vminps %ymm24, %ymm1, %ymm2
vmaxps %ymm3, %ymm2, %ymm2
vmovaps %ymm5, %ymm8
vfmadd213ps %ymm4, %ymm2, %ymm8 # ymm8 = (ymm2 * ymm8) + ymm4
vrndscaleps $0x1, %ymm8, %ymm16
vcmpltps %ymm16, %ymm8, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm2 # ymm2 = (ymm16 * ymm7) - ymm2
vfnmsub231ps %ymm15, %ymm16, %ymm2 # ymm2 = -(ymm16 * ymm15) - ymm2
vmulps %ymm2, %ymm2, %ymm8
vmovaps %ymm9, %ymm17
vfmadd213ps %ymm10, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm4
vfmadd213ps %ymm2, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm2
vaddps %ymm6, %ymm17, %ymm2
vcvttps2dq %ymm16, %ymm8
vpslld $0x17, %ymm8, %ymm8
vpaddd %ymm14, %ymm8, %ymm8
vfmadd213ps %ymm6, %ymm2, %ymm8 # ymm8 = (ymm2 * ymm8) + ymm6
vcmpleps %ymm0, %ymm8, %k1
vmaxps 0x2355bd(%rip){1to8}, %ymm8, %ymm2 # 0x417080
vpsrld $0x17, %ymm2, %ymm8
vpbroadcastd 0x2355b2(%rip), %ymm16 # 0x417084
vpternlogd $0xea, 0x232bb3(%rip){1to8}, %ymm16, %ymm2 # 0x414690
vcmpltps 0x2355a4(%rip){1to8}, %ymm2, %k2 # 0x41708c
vaddps %ymm21, %ymm2, %ymm16
vaddps %ymm2, %ymm16, %ymm16 {%k2}
vmulps %ymm16, %ymm16, %ymm2
vmovaps %ymm22, %ymm17
vfmadd213ps 0x23558e(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x235588(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps 0x235582(%rip){1to8}, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + mem
vfmadd213ps %ymm26, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm26
vfmadd213ps %ymm27, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm27
vfmadd213ps %ymm28, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm28
vfmadd213ps %ymm29, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm29
vfmadd213ps %ymm30, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm17) + ymm30
vmulps %ymm16, %ymm2, %ymm18
vmulps %ymm17, %ymm18, %ymm17
vpaddd 0x235536(%rip){1to8}, %ymm8, %ymm8 # 0x417088
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm6, %ymm8, %ymm8 {%k2}
vfmadd231ps %ymm15, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm15) + ymm17
vfmsub231ps %ymm2, %ymm4, %ymm17 # ymm17 = (ymm4 * ymm2) - ymm17
vsubps %ymm16, %ymm17, %ymm2
vfmsub231ps %ymm8, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm8) - ymm2
vmulps %ymm31, %ymm2, %ymm2
vbroadcastss 0x235bc0(%rip), %ymm2 {%k1} # 0x417744
vminps %ymm24, %ymm2, %ymm2
vmaxps %ymm3, %ymm2, %ymm2
vmovaps %ymm5, %ymm8
vfmadd213ps %ymm4, %ymm2, %ymm8 # ymm8 = (ymm2 * ymm8) + ymm4
vrndscaleps $0x1, %ymm8, %ymm16
vcmpltps %ymm16, %ymm8, %k1
vsubps %ymm6, %ymm16, %ymm16 {%k1}
vfmsub231ps %ymm7, %ymm16, %ymm2 # ymm2 = (ymm16 * ymm7) - ymm2
vfnmsub231ps %ymm15, %ymm16, %ymm2 # ymm2 = -(ymm16 * ymm15) - ymm2
vmulps %ymm2, %ymm2, %ymm8
vmovaps %ymm9, %ymm17
vfmadd213ps %ymm10, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm10
vfmadd213ps %ymm11, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm11
vfmadd213ps %ymm12, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm12
vfmadd213ps %ymm13, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm13
vfmadd213ps %ymm4, %ymm2, %ymm17 # ymm17 = (ymm2 * ymm17) + ymm4
vfmadd213ps %ymm2, %ymm8, %ymm17 # ymm17 = (ymm8 * ymm17) + ymm2
vaddps %ymm6, %ymm17, %ymm2
vcvttps2dq %ymm16, %ymm8
vpslld $0x17, %ymm8, %ymm8
vpaddd %ymm14, %ymm8, %ymm8
vfmadd213ps %ymm6, %ymm2, %ymm8 # ymm8 = (ymm2 * ymm8) + ymm6
vrcpps %ymm8, %ymm2
vfmsub213ps %ymm6, %ymm2, %ymm8 # ymm8 = (ymm2 * ymm8) - ymm6
vfnmadd132ps %ymm2, %ymm2, %ymm8 # ymm8 = -(ymm8 * ymm2) + ymm2
vfnmadd213ps %ymm21, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm8) + ymm21
vmulps %ymm1, %ymm8, %ymm1
jmp 0x1e1d0e
movq 0xe8(%rbp,%rdx), %rbx
vmaxps (%rbx){1to8}, %ymm1, %ymm1
vminps 0x4(%rbx){1to8}, %ymm1, %ymm1
jmp 0x1e1d0e
vxorps 0x235411(%rip){1to8}, %ymm1, %ymm1 # 0x417054
vminps %ymm24, %ymm1, %ymm1
vmaxps %ymm3, %ymm1, %ymm1
vmovaps %ymm4, %ymm2
vfmadd231ps %ymm5, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm5) + ymm2
vroundps $0x1, %ymm2, %ymm8
vcmpltps %ymm8, %ymm2, %k1
vsubps %ymm6, %ymm8, %ymm8 {%k1}
vfmsub231ps %ymm7, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm7) - ymm1
vfmsub231ps 0x237170(%rip){1to8}, %ymm8, %ymm1 # ymm1 = (ymm8 * mem) - ymm1
vmulps %ymm1, %ymm1, %ymm2
vmovaps %ymm9, %ymm16
vfmadd213ps %ymm10, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm10
vfmadd213ps %ymm11, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm11
vfmadd213ps %ymm12, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm12
vfmadd213ps %ymm13, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm13
vfmadd213ps %ymm4, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm4
vfmadd213ps %ymm1, %ymm2, %ymm16 # ymm16 = (ymm2 * ymm16) + ymm1
vaddps %ymm6, %ymm16, %ymm2
vcvttps2dq %ymm8, %ymm1
vpslld $0x17, %ymm1, %ymm1
vpaddd %ymm1, %ymm14, %ymm1
vfmadd213ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) + ymm6
vrcpps %ymm1, %ymm2
vfmsub213ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm1) - ymm6
vfnmadd132ps %ymm2, %ymm2, %ymm1 # ymm1 = -(ymm1 * ymm2) + ymm2
jmp 0x1e1d0e
movq 0xe8(%rbp,%rdx), %rbx
vmaxps %ymm0, %ymm1, %ymm2
vminps %ymm0, %ymm1, %ymm1
vfmadd132ps (%rbx){1to8}, %ymm2, %ymm1 # ymm1 = (ymm1 * mem) + ymm2
jmp 0x1e1d0e
movq 0xe8(%rbp,%rdx), %rbx
vbroadcastss (%rbx), %ymm2
vfmadd213ps 0x4(%rbx){1to8}, %ymm1, %ymm2 # ymm2 = (ymm1 * ymm2) + mem
vmaxps %ymm0, %ymm2, %ymm2
vminps %ymm6, %ymm2, %ymm2
vmulps %ymm1, %ymm2, %ymm1
jmp 0x1e1d0e
movq 0x20(%rsp), %r13
movq (%r13), %rbx
movq %r10, %r14
shlq $0x5, %r14
vmovups %ymm1, (%rbx,%r14)
incq %r10
cmpq %rcx, %r10
jne 0x1e181c
cmpl $0x1, %r15d
je 0x1e0bae
cmpl $0x4, %r15d
je 0x1dff91
movl $0x0, 0x14(%rsp)
jmp 0x1e277c
vxorps %xmm7, %xmm7, %xmm7
movq %rsi, %r15
orq $0x1, %r15
movq %rsi, %r12
orq $0x2, %r12
movq %rsi, %rax
orq $0x3, %rax
movslq 0x3c(%rbp), %r13
imulq 0x20(%rbp), %r13
movq 0x10(%rbp), %rcx
movq %r13, %rbp
imulq %rsi, %rbp
addq %rcx, %rbp
imulq %r13, %r15
addq %rcx, %r15
imulq %r13, %r12
addq %rcx, %r12
imulq %rax, %r13
addq %rcx, %r13
movq 0x70(%rsp), %rcx
cmpl $0x8, %edi
jl 0x1e1e16
movl %edi, %eax
andl $-0x8, %eax
vxorps %xmm5, %xmm5, %xmm5
movl $0x7, %edx
vxorps %xmm8, %xmm8, %xmm8
vxorps %xmm9, %xmm9, %xmm9
vxorps %xmm6, %xmm6, %xmm6
vmovups (%rcx), %ymm0
vlddqu (%rbp), %xmm1
vcvtph2ps %xmm1, %ymm1
vlddqu (%r15), %xmm2
vcvtph2ps %xmm2, %ymm2
vlddqu (%r12), %xmm3
vcvtph2ps %xmm3, %ymm3
vlddqu (%r13), %xmm4
vcvtph2ps %xmm4, %ymm4
vfmadd231ps %ymm1, %ymm0, %ymm6 # ymm6 = (ymm0 * ymm1) + ymm6
vfmadd231ps %ymm2, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm2) + ymm9
vfmadd231ps %ymm3, %ymm0, %ymm8 # ymm8 = (ymm0 * ymm3) + ymm8
vfmadd231ps %ymm4, %ymm0, %ymm5 # ymm5 = (ymm0 * ymm4) + ymm5
addq $0x20, %rcx
addq $0x10, %rbp
addq $0x10, %r15
addq $0x10, %r12
addq $0x10, %r13
addl $0x8, %edx
cmpl %edi, %edx
jl 0x1e1db7
jmp 0x1e1e2a
xorl %eax, %eax
vxorps %xmm6, %xmm6, %xmm6
vxorps %xmm9, %xmm9, %xmm9
vxorps %xmm8, %xmm8, %xmm8
vxorps %xmm5, %xmm5, %xmm5
movl %eax, %edx
orl $0x3, %edx
vxorps %xmm10, %xmm10, %xmm10
cmpl %edi, %edx
movq %rsi, 0x160(%rsp)
vmovups %ymm5, 0x280(%rsp)
vmovups %ymm6, 0x260(%rsp)
vmovups %ymm8, 0x240(%rsp)
vmovups %ymm9, 0x220(%rsp)
jge 0x1e1ecd
vxorps %xmm11, %xmm11, %xmm11
xorl %edx, %edx
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm13, %xmm13, %xmm13
movq %rcx, %r14
movl %eax, %esi
vcvtph2ps (%rbp,%rdx), %xmm0
vcvtph2ps (%r15,%rdx), %xmm1
vcvtph2ps (%r12,%rdx), %xmm2
vmovups (%rcx,%rdx,2), %xmm3
vcvtph2ps (%r13,%rdx), %xmm4
vfmadd231ps %xmm0, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm0) + xmm10
vfmadd231ps %xmm1, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm1) + xmm13
vfmadd231ps %xmm2, %xmm3, %xmm12 # xmm12 = (xmm3 * xmm2) + xmm12
vfmadd231ps %xmm4, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm4) + xmm11
addq $0x10, %r14
leal 0x4(%rsi), %eax
addq $0x8, %rdx
addl $0x7, %esi
cmpl %edi, %esi
jl 0x1e1e78
addq %rdx, %rbp
addq %rdx, %r15
addq %rdx, %r12
addq %rdx, %r13
jmp 0x1e1edf
movq %rcx, %r14
vxorps %xmm13, %xmm13, %xmm13
vxorps %xmm12, %xmm12, %xmm12
vxorps %xmm11, %xmm11, %xmm11
vmovaps %xmm13, 0xc0(%rsp)
vmovaps %xmm12, 0xd0(%rsp)
vmovaps %xmm11, 0xe0(%rsp)
vmovaps %xmm10, 0x190(%rsp)
movl %edi, %ecx
subl %eax, %ecx
movq %rcx, 0x1b0(%rsp)
jle 0x1e2010
xorl %ebx, %ebx
vmovaps %xmm7, 0x170(%rsp)
vmovss (%r14,%rbx,4), %xmm0
vmovaps %xmm0, 0x110(%rsp)
movzwl (%rbp,%rbx,2), %edi
vzeroupper
callq 0x5c068
vmovaps %xmm0, 0x200(%rsp)
vmovss (%r14,%rbx,4), %xmm0
vmovaps %xmm0, 0x100(%rsp)
movzwl (%r15,%rbx,2), %edi
callq 0x5c068
vmovaps %xmm0, 0xf0(%rsp)
vmovss (%r14,%rbx,4), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
movzwl (%r12,%rbx,2), %edi
callq 0x5c068
vmovaps %xmm0, 0x1d0(%rsp)
vmovss (%r14,%rbx,4), %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
movzwl (%r13,%rbx,2), %edi
callq 0x5c068
vmovaps 0x200(%rsp), %xmm1
vinsertps $0x10, 0xf0(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x1d0(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,1,2],xmm0[0]
vmovaps 0x110(%rsp), %xmm1
vinsertps $0x10, 0x100(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x1e0(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x30, 0x1c0(%rsp), %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
vmovaps 0x170(%rsp), %xmm7
vfmadd231ps %xmm1, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm1) + xmm7
incq %rbx
cmpl %ebx, 0x1b0(%rsp)
jne 0x1e1f17
vmovups 0x260(%rsp), %ymm0
vhaddps 0x220(%rsp), %ymm0, %ymm0
vmovups 0x240(%rsp), %ymm1
vhaddps 0x280(%rsp), %ymm1, %ymm1
vhaddps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vmovaps 0x190(%rsp), %xmm4
vmovaps 0xc0(%rsp), %xmm8
vunpcklps %xmm8, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
vmovaps 0xe0(%rsp), %xmm5
vmovaps 0xd0(%rsp), %xmm6
vunpcklps %xmm5, %xmm6, %xmm3 # xmm3 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
vunpckhps %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm8[2],xmm4[3],xmm8[3]
vunpckhps %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
vmovlhps %xmm3, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm3[0]
vunpckhpd %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[1],xmm3[1]
vaddps %xmm6, %xmm2, %xmm2
vmovlhps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0]
vunpckhpd %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[1],xmm5[1]
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm0, %xmm7, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm0, %xmm2, %xmm0
movq 0x40(%rsp), %rax
decl %eax
cmpl $0x5, %eax
ja 0x1e27d1
leaq 0x23922d(%rip), %rcx # 0x41b2e0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
movq 0x20(%rsp), %rcx
movq 0x18(%rsp), %rbp
movq 0x30(%rsp), %r12
movq 0x160(%rsp), %rsi
jmpq *%rax
vxorps %xmm1, %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
jmp 0x1e248a
vbroadcastss 0x234f6f(%rip), %xmm6 # 0x417058
vminps %xmm6, %xmm0, %xmm1
vbroadcastss 0x234f66(%rip), %xmm7 # 0x41705c
vbroadcastss 0x234f61(%rip), %xmm4 # 0x417060
vmaxps %xmm7, %xmm1, %xmm13
vbroadcastss 0x232584(%rip), %xmm2 # 0x414690
vmovaps %xmm4, %xmm1
vfmadd213ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm1) + xmm2
vcvttps2dq %xmm1, %xmm3
vcvtdq2ps %xmm3, %xmm14
vcmpltps %xmm14, %xmm1, %k1
vbroadcastss 0x232beb(%rip), %xmm1 # 0x414d18
vbroadcastss 0x234f2e(%rip), %xmm3 # 0x417064
vsubps %xmm1, %xmm14, %xmm14 {%k1}
vfmsub231ps %xmm3, %xmm14, %xmm13 # xmm13 = (xmm14 * xmm3) - xmm13
vbroadcastss 0x234f1e(%rip), %xmm8 # 0x417068
vfnmsub231ps %xmm8, %xmm14, %xmm13 # xmm13 = -(xmm14 * xmm8) - xmm13
vmulps %xmm13, %xmm13, %xmm15
vbroadcastss 0x234f13(%rip), %xmm9 # 0x417070
vbroadcastss 0x234f06(%rip), %xmm5 # 0x41706c
vmovaps %xmm5, %xmm16
vfmadd213ps %xmm9, %xmm13, %xmm16 # xmm16 = (xmm13 * xmm16) + xmm9
vbroadcastss 0x234ef9(%rip), %xmm10 # 0x417074
vbroadcastss 0x234ef4(%rip), %xmm11 # 0x417078
vfmadd213ps %xmm10, %xmm13, %xmm16 # xmm16 = (xmm13 * xmm16) + xmm10
vfmadd213ps %xmm11, %xmm13, %xmm16 # xmm16 = (xmm13 * xmm16) + xmm11
vbroadcastss 0x234ee3(%rip), %xmm12 # 0x41707c
vfmadd213ps %xmm12, %xmm13, %xmm16 # xmm16 = (xmm13 * xmm16) + xmm12
vfmadd213ps %xmm2, %xmm13, %xmm16 # xmm16 = (xmm13 * xmm16) + xmm2
vfmadd213ps %xmm13, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + xmm13
vaddps %xmm1, %xmm16, %xmm15
vcvttps2dq %xmm14, %xmm13
vpslld $0x17, %xmm13, %xmm14
vpbroadcastd 0x232b53(%rip), %xmm13 # 0x414d18
vpaddd %xmm13, %xmm14, %xmm14
vfmadd213ps %xmm1, %xmm15, %xmm14 # xmm14 = (xmm15 * xmm14) + xmm1
vmaxps 0x234ea7(%rip){1to4}, %xmm14, %xmm15 # 0x417080
vxorps %xmm16, %xmm16, %xmm16
vpsrld $0x17, %xmm15, %xmm17
vpbroadcastd 0x2324a0(%rip), %xmm18 # 0x414690
vpternlogd $0xf8, 0x234e89(%rip){1to4}, %xmm15, %xmm18 # 0x417084
vpaddd 0x234e83(%rip){1to4}, %xmm17, %xmm15 # 0x417088
vcmpleps %xmm16, %xmm14, %k1
vcvtdq2ps %xmm15, %xmm14
vcmpltps 0x234e70(%rip){1to4}, %xmm18, %k2 # 0x41708c
vaddps 0x234e6a(%rip){1to4}, %xmm18, %xmm15 # 0x417090
vsubps %xmm1, %xmm14, %xmm14 {%k2}
vaddps %xmm18, %xmm15, %xmm15 {%k2}
vbroadcastss 0x234e58(%rip), %xmm16 # 0x417094
vfmadd213ps 0x234e52(%rip){1to4}, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + mem
vfmadd213ps 0x234e4c(%rip){1to4}, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + mem
vfmadd213ps 0x234e46(%rip){1to4}, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + mem
vfmadd213ps 0x234e40(%rip){1to4}, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + mem
vfmadd213ps 0x234e3a(%rip){1to4}, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + mem
vfmadd213ps 0x234e34(%rip){1to4}, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + mem
vfmadd213ps 0x234e2e(%rip){1to4}, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + mem
vmulps %xmm15, %xmm15, %xmm17
vfmadd213ps 0x234e22(%rip){1to4}, %xmm15, %xmm16 # xmm16 = (xmm15 * xmm16) + mem
vmulps %xmm15, %xmm17, %xmm18
vmulps %xmm16, %xmm18, %xmm16
vfmadd231ps %xmm8, %xmm14, %xmm16 # xmm16 = (xmm14 * xmm8) + xmm16
vfmsub231ps %xmm17, %xmm2, %xmm16 # xmm16 = (xmm2 * xmm17) - xmm16
vsubps %xmm15, %xmm16, %xmm15
vfnmadd231ps %xmm14, %xmm3, %xmm15 # xmm15 = -(xmm3 * xmm14) + xmm15
vaddps %xmm15, %xmm15, %xmm14
vbroadcastss 0x235480(%rip), %xmm14 {%k1} # 0x417744
vminps %xmm6, %xmm14, %xmm6
vmaxps %xmm7, %xmm6, %xmm6
vfmadd213ps %xmm2, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm4) + xmm2
vcvttps2dq %xmm4, %xmm7
vcvtdq2ps %xmm7, %xmm7
vcmpltps %xmm7, %xmm4, %k1
vsubps %xmm1, %xmm7, %xmm7 {%k1}
vfmsub213ps %xmm6, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm3) - xmm6
vfnmsub231ps %xmm8, %xmm7, %xmm3 # xmm3 = -(xmm7 * xmm8) - xmm3
vmulps %xmm3, %xmm3, %xmm4
vfmadd213ps %xmm9, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm5) + xmm9
vfmadd213ps %xmm10, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm5) + xmm10
vfmadd213ps %xmm11, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm5) + xmm11
vfmadd213ps %xmm12, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm5) + xmm12
vfmadd213ps %xmm2, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm5) + xmm2
vfmadd213ps %xmm3, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm5) + xmm3
vaddps %xmm1, %xmm5, %xmm2
vcvttps2dq %xmm7, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm3, %xmm13, %xmm3
vfmadd213ps %xmm1, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm1
vrcpps %xmm3, %xmm1
vaddps %xmm1, %xmm1, %xmm2
vfmsub213ps 0x236ab2(%rip){1to4}, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) - mem
vfnmadd213ps %xmm2, %xmm1, %xmm3 # xmm3 = -(xmm1 * xmm3) + xmm2
vfmsub231ps %xmm3, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm3) - xmm0
jmp 0x1e248a
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vmaxps (%rax){1to4}, %xmm0, %xmm0
vminps 0x4(%rax){1to4}, %xmm0, %xmm0
jmp 0x1e248a
vxorps 0x234ce2(%rip){1to4}, %xmm0, %xmm0 # 0x417054
vminps 0x234cdc(%rip){1to4}, %xmm0, %xmm0 # 0x417058
vmaxps 0x234cd6(%rip){1to4}, %xmm0, %xmm0 # 0x41705c
vbroadcastss 0x232301(%rip), %xmm1 # 0x414690
vbroadcastss 0x234cc8(%rip), %xmm2 # 0x417060
vfmadd213ps %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1
vcvttps2dq %xmm2, %xmm3
vcvtdq2ps %xmm3, %xmm3
vbroadcastss 0x23296a(%rip), %xmm4 # 0x414d18
vcmpltps %xmm3, %xmm2, %k1
vsubps %xmm4, %xmm3, %xmm3 {%k1}
vfmsub231ps 0x234c9f(%rip){1to4}, %xmm3, %xmm0 # xmm0 = (xmm3 * mem) - xmm0
vfmsub231ps 0x236a19(%rip){1to4}, %xmm3, %xmm0 # xmm0 = (xmm3 * mem) - xmm0
vbroadcastss 0x234c94(%rip), %xmm2 # 0x41706c
vfmadd213ps 0x234c8e(%rip){1to4}, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem
vfmadd213ps 0x234c88(%rip){1to4}, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem
vfmadd213ps 0x234c82(%rip){1to4}, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem
vfmadd213ps 0x234c7c(%rip){1to4}, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + mem
vmulps %xmm0, %xmm0, %xmm5
vfmadd213ps %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1
vfmadd213ps %xmm0, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm0
vaddps %xmm4, %xmm2, %xmm1
vcvttps2dq %xmm3, %xmm0
vpslld $0x17, %xmm0, %xmm0
vpaddd 0x2328f3(%rip){1to4}, %xmm0, %xmm0 # 0x414d18
vfmadd213ps %xmm4, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm4
vrcpps %xmm0, %xmm1
vfmsub213ps %xmm4, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) - xmm4
vfnmadd132ps %xmm1, %xmm1, %xmm0 # xmm0 = -(xmm0 * xmm1) + xmm1
jmp 0x1e248a
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vxorps %xmm1, %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm2
vminps %xmm1, %xmm0, %xmm0
vfmadd132ps (%rax){1to4}, %xmm2, %xmm0 # xmm0 = (xmm0 * mem) + xmm2
jmp 0x1e248a
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vbroadcastss (%rax), %xmm1
vfmadd213ps 0x4(%rax){1to4}, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + mem
vxorps %xmm2, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vminps 0x232892(%rip){1to4}, %xmm1, %xmm1 # 0x414d18
vmulps %xmm0, %xmm1, %xmm0
movl 0x10(%rsp), %edi
movq (%rcx), %rax
vmovups %xmm0, (%rax,%rsi,4)
movq 0x1a0(%rsp), %rax
leal (%rsi,%rax,4), %eax
cmpl 0x38(%rsp), %eax
jge 0x1e277c
movl %edi, %ecx
andl $-0x8, %ecx
movl %ecx, 0xf0(%rsp)
movslq %eax, %r14
leaq 0x238e37(%rip), %r15 # 0x41b2f8
vxorps %xmm2, %xmm2, %xmm2
testq %r12, %r12
je 0x1e24d0
vmovss (%r12,%r14,4), %xmm2
movslq 0x3c(%rbp), %r12
imulq %r14, %r12
imulq 0x20(%rbp), %r12
addq 0x10(%rbp), %r12
movq 0x70(%rsp), %r13
cmpl $0x8, %edi
jl 0x1e251d
vxorps %xmm1, %xmm1, %xmm1
movl $0x7, %eax
vlddqu (%r12), %xmm0
vcvtph2ps %xmm0, %ymm0
vfmadd231ps (%r13), %ymm0, %ymm1 # ymm1 = (ymm0 * mem) + ymm1
addq $0x20, %r13
addq $0x10, %r12
addl $0x8, %eax
cmpl %edi, %eax
jl 0x1e24f4
movl 0xf0(%rsp), %eax
jmp 0x1e2523
xorl %eax, %eax
vxorps %xmm1, %xmm1, %xmm1
movl %eax, %ecx
orl $0x3, %ecx
vxorps %xmm3, %xmm3, %xmm3
cmpl %edi, %ecx
vmovups %ymm1, 0x200(%rsp)
jge 0x1e255b
vcvtph2ps (%r12), %xmm0
vfmadd231ps (%r13), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
addq $0x10, %r13
addq $0x8, %r12
leal 0x4(%rax), %ecx
addl $0x7, %eax
cmpl %edi, %eax
movl %ecx, %eax
jl 0x1e2539
jmp 0x1e255d
movl %eax, %ecx
vmovaps %xmm3, 0x100(%rsp)
movl %edi, %ebp
subl %ecx, %ebp
jle 0x1e25ae
xorl %ebx, %ebx
vmovss %xmm2, 0x170(%rsp)
vmovss (%r13,%rbx,4), %xmm0
vmovss %xmm0, 0x110(%rsp)
movzwl (%r12,%rbx,2), %edi
vzeroupper
callq 0x5c068
vmovss 0x170(%rsp), %xmm2
vfmadd231ss 0x110(%rsp), %xmm0, %xmm2 # xmm2 = (xmm0 * mem) + xmm2
incq %rbx
cmpl %ebx, %ebp
jne 0x1e256e
vmovups 0x200(%rsp), %ymm1
vextractf128 $0x1, %ymm1, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vmovaps 0x100(%rsp), %xmm3
vshufps $0x41, %xmm0, %xmm3, %xmm1 # xmm1 = xmm3[1,0],xmm0[0,1]
vshufps $0xeb, %xmm0, %xmm3, %xmm0 # xmm0 = xmm3[3,2],xmm0[2,3]
vaddps %xmm1, %xmm0, %xmm0
vshufpd $0x1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0]
vaddps %xmm1, %xmm0, %xmm0
vhaddps %xmm0, %xmm0, %xmm0
vaddss %xmm0, %xmm2, %xmm4
movq 0x40(%rsp), %rax
decl %eax
cmpl $0x5, %eax
ja 0x1e273e
movslq (%r15,%rax,4), %rax
addq %r15, %rax
movq 0x20(%rsp), %rbx
movq 0x18(%rsp), %rbp
movq 0x30(%rsp), %r12
jmpq *%rax
vmaxss 0x23486f(%rip), %xmm4, %xmm0 # 0x416e88
jmp 0x1e2751
vmovaps %xmm4, %xmm0
vmovss %xmm4, 0x170(%rsp)
vzeroupper
callq 0x563e0
vaddss 0x2326dd(%rip), %xmm0, %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
vmulss 0x170(%rsp), %xmm0, %xmm0
jmp 0x1e2751
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
movl 0x10(%rsp), %edi
jbe 0x1e2755
vmovaps %xmm1, %xmm0
jmp 0x1e2755
vmovss 0x2349d0(%rip), %xmm2 # 0x417058
vminss %xmm2, %xmm4, %xmm1
vxorps 0x2349be(%rip){1to4}, %xmm1, %xmm0 # 0x417054
vcmpltss 0x2349bb(%rip), %xmm1, %k1 # 0x41705c
vmovss %xmm2, %xmm0, %xmm0 {%k1}
vzeroupper
callq 0x563e0
vmovss 0x232661(%rip), %xmm1 # 0x414d18
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x1e2751
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vmovss (%rax), %xmm0
vcmpgtss 0x2347a8(%rip), %xmm4, %k1 # 0x416e88
vmovss 0x23262e(%rip), %xmm0 {%k1} # 0x414d18
vmulss %xmm4, %xmm0, %xmm0
jmp 0x1e2751
movq 0x28(%rsp), %rax
movq 0xe8(%rbp,%rax), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vxorps 0x234944(%rip){1to4}, %xmm2, %xmm0 # 0x417054
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
movl 0x10(%rsp), %edi
jb 0x1e2755
vmovss 0x2325ee(%rip), %xmm0 # 0x414d18
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x1e2771
vmovaps %xmm4, %xmm0
jmp 0x1e2755
vmovaps %xmm4, %xmm0
movq 0x20(%rsp), %rbx
movq 0x18(%rsp), %rbp
movq 0x30(%rsp), %r12
movl 0x10(%rsp), %edi
movq (%rbx), %rax
vmovss %xmm0, (%rax,%r14,4)
incq %r14
cmpq 0x38(%rsp), %r14
jne 0x1e24c1
jmp 0x1e1d3f
vfmadd213ss %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm1) + xmm2
vmulss %xmm4, %xmm1, %xmm0
jmp 0x1e2755
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x1e27b8
lock
decl (%rax)
jne 0x1e27b8
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x1e27a8
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x1e27b8
testq %rsi, %rsi
je 0x1e27b8
movq %rsi, %rdi
vzeroupper
callq 0x563b0
movl 0x14(%rsp), %eax
addq $0x3e8, %rsp # imm = 0x3E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq 0x20(%rsp), %rcx
movq 0x18(%rsp), %rbp
movq 0x30(%rsp), %r12
movl 0x10(%rsp), %edi
movq 0x160(%rsp), %rsi
jmp 0x1e248e
jmp 0x1e283c
jmp 0x1e27fb
jmp 0x1e27fb
jmp 0x1e27fb
jmp 0x1e27fb
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x1e2834
lock
decl (%rax)
jne 0x1e2834
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x1e282e
testq %rsi, %rsi
je 0x1e2834
movq %rsi, %rdi
callq 0x563b0
jmp 0x1e2834
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/build_O3/src/layer/x86/innerproduct_x86_avx512.cpp
|
ncnn::InnerProduct_x86_fma::create_pipeline_int8_x86(ncnn::Option const&)
|
int InnerProduct_x86_fma::create_pipeline_int8_x86(const Option& opt)
{
const int num_input = weight_data_size / num_output;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
out_elempack = num_output % 8 == 0 ? 8 : 1;
}
#endif // __SSE2__
// src = inch-outch
// dst = pb-inch-outch/pb
{
Mat weight_data_r2 = weight_data.reshape(num_input, num_output);
weight_data_tm.create(num_input, num_output / out_elempack, (size_t)out_elempack, out_elempack);
for (int q = 0; q + (out_elempack - 1) < num_output; q += out_elempack)
{
signed char* g0 = weight_data_tm.row<signed char>(q / out_elempack);
for (int p = 0; p < num_input; p++)
{
for (int j = 0; j < out_elempack; j++)
{
*g0++ = weight_data_r2.row<signed char>(q + j)[p];
}
}
}
}
scale_in_data.create(num_output);
for (int p = 0; p < num_output; p++)
{
// dequantize
float scale_in;
if (weight_data_int8_scales[p] == 0)
scale_in = 0;
else
scale_in = 1.f / (bottom_blob_int8_scales[0] * weight_data_int8_scales[p]);
scale_in_data[p] = scale_in;
}
if (opt.lightmode)
{
weight_data.release();
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x50, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq (%rdi), %rax
movq -0x18(%rax), %rax
leaq 0x130(%rdi,%rax), %rsi
movl -0x60(%rsi), %ecx
movl -0x58(%rsi), %eax
cltd
idivl %ecx
movl %eax, %ebp
testb $0x7, %cl
sete %r12b
andb 0x27(%rbx), %r12b
movl $0x8, %eax
movl $0x1, %r15d
cmovnel %eax, %r15d
leaq 0x8(%rsp), %rdi
movl %ebp, %edx
xorl %r8d, %r8d
callq 0x5a808
leaq 0x10(%r14), %rdi
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %eax
cltd
idivl %r15d
movl %ebp, %esi
movl %eax, %edx
movq %r15, %rcx
movl %r15d, %r8d
xorl %r9d, %r9d
callq 0x5a14a
movq (%r14), %rax
movq -0x18(%rax), %r8
cmpl 0xd0(%r14,%r8), %r15d
jg 0x1f3994
leal -0x1(%r15), %edx
movzbl %r12b, %ecx
leal (%rcx,%rcx,2), %ecx
movl %ebp, %esi
xorl %edi, %edi
testl %ebp, %ebp
jle 0x1f3980
movl %edi, %r8d
shrl %cl, %r8d
movslq 0x3c(%r14), %r9
imulq %r9, %r8
imulq 0x20(%r14), %r8
addq 0x10(%r14), %r8
xorl %r9d, %r9d
xorl %r10d, %r10d
leaq (%rdi,%r10), %r11
movslq 0x34(%rsp), %r12
imulq %r11, %r12
imulq 0x18(%rsp), %r12
addq 0x8(%rsp), %r12
movb (%r9,%r12), %r11b
movb %r11b, (%r8,%r10)
incq %r10
cmpq %r10, %r15
jne 0x1f3949
incq %r9
addq %r10, %r8
cmpq %rsi, %r9
jne 0x1f3946
movq -0x18(%rax), %r8
addq %r15, %rdi
leaq (%rdi,%rdx), %r9
movslq 0xd0(%r14,%r8), %r10
cmpq %r10, %r9
jl 0x1f3928
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1f39c7
lock
decl (%rax)
jne 0x1f39c7
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x1f39ba
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1f39c7
testq %rsi, %rsi
je 0x1f39c7
movq %rsi, %rdi
callq 0x563b0
leaq 0x58(%r14), %r15
movq (%r14), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r14,%rax), %esi
movl $0x4, %edx
movq %r15, %rdi
xorl %ecx, %ecx
callq 0x5b024
movq (%r14), %rax
movq -0x18(%rax), %rcx
leaq (%r14,%rcx), %r12
cmpl $0x0, 0xd0(%r14,%rcx)
jle 0x1f3a50
movq (%r15), %rcx
xorl %edx, %edx
vmovss 0x22130c(%rip), %xmm0 # 0x414d18
movq 0x1c0(%r12), %rsi
vmovss (%rsi,%rdx,4), %xmm2
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm2, %xmm1
je 0x1f3a33
movq 0x208(%r12), %rsi
vmulss (%rsi), %xmm2, %xmm1
vdivss %xmm1, %xmm0, %xmm1
vmovss %xmm1, (%rcx,%rdx,4)
incq %rdx
movq -0x18(%rax), %rsi
leaq (%r14,%rsi), %r12
movslq 0xd0(%r14,%rsi), %rsi
cmpq %rsi, %rdx
jl 0x1f3a0c
cmpb $0x0, (%rbx)
je 0x1f3ac8
leaq 0x130(%r12), %rbx
movq 0x138(%r12), %rax
testq %rax, %rax
je 0x1f3a99
lock
decl (%rax)
jne 0x1f3a99
movq 0x130(%r12), %rsi
movq 0x150(%r12), %rdi
testq %rdi, %rdi
je 0x1f3a8c
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x1f3a99
testq %rsi, %rsi
je 0x1f3a99
movq %rsi, %rdi
callq 0x563b0
movq $0x0, 0x170(%r12)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0xc(%rbx)
vmovups %xmm0, (%rbx)
vmovups %xmm0, 0x158(%r12)
movl $0x0, 0x168(%r12)
xorl %eax, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
jmp 0x1f3b17
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1f3b0f
lock
decl (%rax)
jne 0x1f3b0f
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x1f3b09
testq %rsi, %rsi
je 0x1f3b0f
movq %rsi, %rdi
callq 0x563b0
jmp 0x1f3b0f
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
nop
|
/ysh329[P]ncnn/build_O3/src/layer/x86/innerproduct_x86_fma.cpp
|
ncnn::innerproduct_fp16s_pack4_sse_f16c(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, int, ncnn::Mat const&, ncnn::Option const&)
|
static void innerproduct_fp16s_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int activation_type, const Mat& activation_params, const Option& opt)
{
#if NCNN_RUNTIME_CPU && NCNN_F16C && __AVX__ && !__F16C__
if (ncnn::cpu_support_x86_f16c())
{
innerproduct_fp16s_pack4_sse_f16c(bottom_blob, top_blob, weight_data_fp16, bias_data, activation_type, activation_params, opt);
return;
}
#endif
#if __F16C__
const int num_input = bottom_blob.w * bottom_blob.elempack;
const int num_output = top_blob.w;
const float* bias_data_ptr = bias_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < num_output; p++)
{
__m128 _sum0 = _mm_setzero_ps();
__m256 _sum01 = _mm256_setzero_ps();
__m256 _sum23 = _mm256_setzero_ps();
__m256 _sum45 = _mm256_setzero_ps();
__m256 _sum67 = _mm256_setzero_ps();
if (bias_data_ptr)
{
_sum0 = _mm_loadu_ps(bias_data_ptr + p * 4);
}
const unsigned short* kptr = weight_data_fp16.row<const unsigned short>(p);
const float* sptr = bottom_blob;
int i = 0;
for (; i + 7 < num_input; i += 8)
{
__m128 _val0 = _mm_broadcast_ss(sptr);
__m128 _val1 = _mm_broadcast_ss(sptr + 1);
__m128 _val2 = _mm_broadcast_ss(sptr + 2);
__m128 _val3 = _mm_broadcast_ss(sptr + 3);
__m128 _val4 = _mm_broadcast_ss(sptr + 4);
__m128 _val5 = _mm_broadcast_ss(sptr + 5);
__m128 _val6 = _mm_broadcast_ss(sptr + 6);
__m128 _val7 = _mm_broadcast_ss(sptr + 7);
__m256 _val01 = _mm256_insertf128_ps(_mm256_castps128_ps256(_val0), _val1, 1);
__m256 _val23 = _mm256_insertf128_ps(_mm256_castps128_ps256(_val2), _val3, 1);
__m256 _val45 = _mm256_insertf128_ps(_mm256_castps128_ps256(_val4), _val5, 1);
__m256 _val67 = _mm256_insertf128_ps(_mm256_castps128_ps256(_val6), _val7, 1);
__m256i _w0123 = _mm256_lddqu_si256((const __m256i*)kptr);
__m256i _w4567 = _mm256_lddqu_si256((const __m256i*)(kptr + 16));
__m256 _w01 = _mm256_cvtph_ps(_mm256_extractf128_si256(_w0123, 0));
__m256 _w23 = _mm256_cvtph_ps(_mm256_extractf128_si256(_w0123, 1));
__m256 _w45 = _mm256_cvtph_ps(_mm256_extractf128_si256(_w4567, 0));
__m256 _w67 = _mm256_cvtph_ps(_mm256_extractf128_si256(_w4567, 1));
_sum01 = _mm256_comp_fmadd_ps(_val01, _w01, _sum01);
_sum23 = _mm256_comp_fmadd_ps(_val23, _w23, _sum23);
_sum45 = _mm256_comp_fmadd_ps(_val45, _w45, _sum45);
_sum67 = _mm256_comp_fmadd_ps(_val67, _w67, _sum67);
sptr += 8;
kptr += 32;
}
for (; i + 3 < num_input; i += 4)
{
__m128 _val0 = _mm_set1_ps(sptr[0]);
__m128 _val1 = _mm_set1_ps(sptr[1]);
__m128 _val2 = _mm_set1_ps(sptr[2]);
__m128 _val3 = _mm_set1_ps(sptr[3]);
__m256 _val01 = _mm256_insertf128_ps(_mm256_castps128_ps256(_val0), _val1, 1);
__m256 _val23 = _mm256_insertf128_ps(_mm256_castps128_ps256(_val2), _val3, 1);
__m256i _w0123 = _mm256_lddqu_si256((const __m256i*)kptr);
__m256 _w01 = _mm256_cvtph_ps(_mm256_extractf128_si256(_w0123, 0));
__m256 _w23 = _mm256_cvtph_ps(_mm256_extractf128_si256(_w0123, 1));
_sum01 = _mm256_comp_fmadd_ps(_val01, _w01, _sum01);
_sum23 = _mm256_comp_fmadd_ps(_val23, _w23, _sum23);
sptr += 4;
kptr += 16;
}
for (; i < num_input; i++)
{
__m128 _val = _mm_set1_ps(sptr[0]);
__m128 _w = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i*)kptr));
_sum0 = _mm_comp_fmadd_ps(_val, _w, _sum0);
sptr += 1;
kptr += 4;
}
_sum01 = _mm256_add_ps(_sum01, _sum23);
_sum45 = _mm256_add_ps(_sum45, _sum67);
_sum01 = _mm256_add_ps(_sum01, _sum45);
_sum0 = _mm_add_ps(_sum0, _mm256_extractf128_ps(_sum01, 0));
_sum0 = _mm_add_ps(_sum0, _mm256_extractf128_ps(_sum01, 1));
_sum0 = activation_sse(_sum0, activation_type, activation_params);
float* outptr = top_blob;
_mm_storeu_ps(outptr + p * 4, _sum0);
}
#else // __F16C__
(void)bottom_blob;
(void)top_blob;
(void)weight_data_fp16;
(void)bias_data;
(void)activation_type;
(void)activation_params;
(void)opt;
#endif // __F16C__
}
|
movslq 0x2c(%rsi), %rax
testq %rax, %rax
jle 0x21f750
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq (%rcx), %rcx
movl 0x18(%rdi), %r10d
imull 0x2c(%rdi), %r10d
movl %r10d, %r11d
andl $-0x8, %r11d
xorl %ebx, %ebx
decl %r8d
leaq 0x1fd06a(%rip), %r14 # 0x41c178
vxorps %xmm5, %xmm5, %xmm5
vbroadcastss 0x1f5575(%rip), %xmm13 # 0x414690
vbroadcastss 0x1f5bf4(%rip), %xmm6 # 0x414d18
vxorps %xmm1, %xmm1, %xmm1
testq %rcx, %rcx
je 0x21f13a
movq %rbx, %r15
shlq $0x4, %r15
vmovups (%rcx,%r15), %xmm1
movslq 0x2c(%rdx), %r15
imulq %rbx, %r15
imulq 0x10(%rdx), %r15
addq (%rdx), %r15
movq (%rdi), %r12
cmpl $0x8, %r10d
jl 0x21f221
vxorps %xmm15, %xmm15, %xmm15
movl $0x7, %ebp
vxorps %xmm14, %xmm14, %xmm14
vxorps %xmm2, %xmm2, %xmm2
vxorps %xmm7, %xmm7, %xmm7
vbroadcastss (%r12), %xmm3
vbroadcastss 0x4(%r12), %xmm4
vinsertf128 $0x1, %xmm4, %ymm3, %ymm3
vbroadcastss 0x8(%r12), %xmm4
vbroadcastss 0xc(%r12), %xmm8
vinsertf128 $0x1, %xmm8, %ymm4, %ymm4
vbroadcastss 0x10(%r12), %xmm8
vbroadcastss 0x14(%r12), %xmm9
vinsertf128 $0x1, %xmm9, %ymm8, %ymm8
vbroadcastss 0x18(%r12), %xmm9
vbroadcastss 0x1c(%r12), %xmm10
vinsertf128 $0x1, %xmm10, %ymm9, %ymm9
vlddqu (%r15), %ymm10
vlddqu 0x20(%r15), %ymm11
vcvtph2ps %xmm10, %ymm12
vmulps %ymm3, %ymm12, %ymm3
vaddps %ymm3, %ymm15, %ymm15
vextractf128 $0x1, %ymm10, %xmm3
vcvtph2ps %xmm3, %ymm3
vmulps %ymm3, %ymm4, %ymm3
vaddps %ymm3, %ymm14, %ymm14
vcvtph2ps %xmm11, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vextractf128 $0x1, %ymm11, %xmm3
vcvtph2ps %xmm3, %ymm3
vmulps %ymm3, %ymm9, %ymm3
vaddps %ymm7, %ymm3, %ymm7
addq $0x20, %r12
addq $0x40, %r15
addl $0x8, %ebp
cmpl %r10d, %ebp
jl 0x21f16e
movl %r11d, %r13d
jmp 0x21f236
xorl %r13d, %r13d
vxorps %xmm7, %xmm7, %xmm7
vxorps %xmm2, %xmm2, %xmm2
vxorps %xmm14, %xmm14, %xmm14
vxorps %xmm15, %xmm15, %xmm15
movl %r13d, %ebp
orl $0x3, %ebp
cmpl %r10d, %ebp
jge 0x21f2a5
vbroadcastss (%r12), %xmm3
vbroadcastss 0x4(%r12), %xmm4
vinsertf128 $0x1, %xmm4, %ymm3, %ymm3
vbroadcastss 0x8(%r12), %xmm4
vbroadcastss 0xc(%r12), %xmm8
vinsertf128 $0x1, %xmm8, %ymm4, %ymm4
vlddqu (%r15), %ymm8
vcvtph2ps %xmm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vaddps %ymm3, %ymm15, %ymm15
vextractf128 $0x1, %ymm8, %xmm3
vcvtph2ps %xmm3, %ymm3
vmulps %ymm3, %ymm4, %ymm3
vaddps %ymm3, %ymm14, %ymm14
addq $0x10, %r12
addq $0x20, %r15
leal 0x4(%r13), %ebp
addl $0x7, %r13d
cmpl %r10d, %r13d
movl %ebp, %r13d
jl 0x21f241
movl %r10d, %ebp
subl %r13d, %ebp
jle 0x21f2cc
xorl %r13d, %r13d
vbroadcastss (%r12,%r13,4), %xmm3
vcvtph2ps (%r15,%r13,8), %xmm4
vmulps %xmm4, %xmm3, %xmm3
vaddps %xmm1, %xmm3, %xmm1
incq %r13
cmpl %r13d, %ebp
jne 0x21f2b0
vaddps %ymm7, %ymm2, %ymm2
vaddps %ymm15, %ymm14, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm1, %xmm2, %xmm1
cmpl $0x5, %r8d
ja 0x21f72a
movslq (%r14,%r8,4), %r15
addq %r14, %r15
jmpq *%r15
vmaxps %xmm5, %xmm1, %xmm1
jmp 0x21f72a
vbroadcastss 0x1f7d4b(%rip), %xmm9 # 0x417058
vminps %xmm1, %xmm9, %xmm2
vbroadcastss 0x1f7d42(%rip), %xmm10 # 0x41705c
vmaxps %xmm2, %xmm10, %xmm2
vbroadcastss 0x1f7d39(%rip), %xmm11 # 0x417060
vmulps %xmm2, %xmm11, %xmm3
vaddps %xmm3, %xmm13, %xmm3
vcvttps2dq %xmm3, %xmm4
vcvtdq2ps %xmm4, %xmm4
vcmpltps %xmm4, %xmm3, %xmm3
vandps %xmm6, %xmm3, %xmm3
vsubps %xmm3, %xmm4, %xmm3
vmovaps %xmm6, %xmm5
vbroadcastss 0x1f840f(%rip), %xmm9 # 0x417760
vmulps %xmm3, %xmm9, %xmm4
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm2, %xmm2, %xmm4
vbroadcastss 0x1f7d06(%rip), %xmm12 # 0x41706c
vmulps %xmm2, %xmm12, %xmm7
vbroadcastss 0x1f7cfd(%rip), %xmm14 # 0x417070
vaddps %xmm7, %xmm14, %xmm7
vmulps %xmm2, %xmm7, %xmm7
vbroadcastss 0x1f7cf0(%rip), %xmm15 # 0x417074
vaddps %xmm7, %xmm15, %xmm7
vmulps %xmm2, %xmm7, %xmm7
vmovaps %xmm13, %xmm6
vbroadcastss 0x1f7cdf(%rip), %xmm13 # 0x417078
vaddps %xmm7, %xmm13, %xmm7
vmulps %xmm2, %xmm7, %xmm7
vbroadcastss 0x1f7cd2(%rip), %xmm0 # 0x41707c
vaddps %xmm0, %xmm7, %xmm7
vmulps %xmm2, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm4, %xmm4
vaddps %xmm5, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vcvttps2dq %xmm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm5, %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm5, %xmm2, %xmm2
vbroadcastss 0x1f7ca0(%rip), %xmm3 # 0x417080
vmaxps %xmm3, %xmm2, %xmm3
vpsrld $0x17, %xmm3, %xmm4
vbroadcastss 0x1f7c96(%rip), %xmm7 # 0x417088
vpaddd %xmm7, %xmm4, %xmm4
vbroadcastss 0x1f7c85(%rip), %xmm7 # 0x417084
vandps %xmm7, %xmm3, %xmm3
vorps %xmm6, %xmm3, %xmm3
vcvtdq2ps %xmm4, %xmm4
vbroadcastss 0x1f7c78(%rip), %xmm7 # 0x41708c
vcmpltps %xmm7, %xmm3, %xmm7
vandps %xmm3, %xmm7, %xmm8
vbroadcastss 0x1f7c6a(%rip), %xmm0 # 0x417090
vaddps %xmm0, %xmm3, %xmm3
vaddps %xmm3, %xmm8, %xmm3
vandps %xmm5, %xmm7, %xmm7
vsubps %xmm7, %xmm4, %xmm4
vmulps %xmm3, %xmm3, %xmm7
vbroadcastss 0x1f7c51(%rip), %xmm8 # 0x417094
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f7c48(%rip), %xmm13 # 0x417098
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f7c3a(%rip), %xmm13 # 0x41709c
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f7c2c(%rip), %xmm13 # 0x4170a0
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f7c1e(%rip), %xmm13 # 0x4170a4
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f7c10(%rip), %xmm13 # 0x4170a8
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f7c02(%rip), %xmm13 # 0x4170ac
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f7bf4(%rip), %xmm13 # 0x4170b0
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f7be6(%rip), %xmm13 # 0x4170b4
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vbroadcastss 0x1f8284(%rip), %xmm13 # 0x417764
vaddps %xmm13, %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vcmpleps 0x1f4b6e(%rip), %xmm2, %xmm2 # 0x414060
vmovaps %xmm6, %xmm13
vmulps %xmm4, %xmm9, %xmm4
vaddps %xmm3, %xmm4, %xmm3
vaddps %xmm7, %xmm3, %xmm3
vbroadcastss 0x1f98d9(%rip), %xmm4 # 0x418de4
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1f822c(%rip), %xmm4 # 0x417744
vblendvps %xmm2, %xmm4, %xmm3, %xmm2
vbroadcastss 0x1f7b31(%rip), %xmm3 # 0x417058
vminps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm10, %xmm2
vmulps %xmm2, %xmm11, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vcvttps2dq %xmm3, %xmm4
vcvtdq2ps %xmm4, %xmm4
vcmpltps %xmm4, %xmm3, %xmm3
vandps %xmm5, %xmm3, %xmm3
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm9, %xmm4
vmovaps %xmm5, %xmm6
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm2, %xmm2, %xmm4
vmulps %xmm2, %xmm12, %xmm7
vaddps %xmm7, %xmm14, %xmm7
vmulps %xmm2, %xmm7, %xmm7
vaddps %xmm7, %xmm15, %xmm7
vmulps %xmm2, %xmm7, %xmm7
vbroadcastss 0x1f7aff(%rip), %xmm5 # 0x417078
vaddps %xmm5, %xmm7, %xmm7
vxorps %xmm5, %xmm5, %xmm5
vmulps %xmm2, %xmm7, %xmm7
vbroadcastss 0x1f7aee(%rip), %xmm8 # 0x41707c
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm2, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmulps %xmm7, %xmm4, %xmm4
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vcvttps2dq %xmm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vaddps %xmm3, %xmm3, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1f981c(%rip), %xmm7 # 0x418dec
vsubps %xmm2, %xmm7, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm0, %xmm4, %xmm3
vaddps %xmm2, %xmm3, %xmm2
jmp 0x21f726
movq (%r9), %r15
vbroadcastss (%r15), %xmm2
vbroadcastss 0x4(%r15), %xmm3
vmaxps %xmm2, %xmm1, %xmm1
vminps %xmm3, %xmm1, %xmm1
jmp 0x21f72a
vbroadcastss 0x1f7a4b(%rip), %xmm2 # 0x417054
vxorps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1f7a42(%rip), %xmm2 # 0x417058
vminps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1f7a39(%rip), %xmm2 # 0x41705c
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1f7a30(%rip), %xmm2 # 0x417060
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm13, %xmm2
vcvttps2dq %xmm2, %xmm3
vcvtdq2ps %xmm3, %xmm3
vcmpltps %xmm3, %xmm2, %xmm2
vandps %xmm6, %xmm2, %xmm2
vsubps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1f9f2e(%rip), %xmm3 # 0x419584
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vmulps %xmm1, %xmm1, %xmm3
vbroadcastss 0x1f7a01(%rip), %xmm4 # 0x41706c
vmulps %xmm4, %xmm1, %xmm4
vbroadcastss 0x1f79f8(%rip), %xmm7 # 0x417070
vaddps %xmm7, %xmm4, %xmm4
vmulps %xmm1, %xmm4, %xmm4
vbroadcastss 0x1f79eb(%rip), %xmm7 # 0x417074
vaddps %xmm7, %xmm4, %xmm4
vmulps %xmm1, %xmm4, %xmm4
vbroadcastss 0x1f79de(%rip), %xmm7 # 0x417078
vaddps %xmm7, %xmm4, %xmm4
vmulps %xmm1, %xmm4, %xmm4
vbroadcastss 0x1f79d1(%rip), %xmm0 # 0x41707c
vaddps %xmm0, %xmm4, %xmm4
vmulps %xmm1, %xmm4, %xmm4
vaddps %xmm4, %xmm13, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vaddps %xmm6, %xmm1, %xmm1
vaddps %xmm3, %xmm1, %xmm1
vcvttps2dq %xmm2, %xmm2
vpslld $0x17, %xmm2, %xmm2
vpaddd %xmm6, %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vrcpps %xmm1, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm2, %xmm1
jmp 0x21f72a
movq (%r9), %r15
vmaxps %xmm5, %xmm1, %xmm2
vminps %xmm5, %xmm1, %xmm1
vbroadcastss (%r15), %xmm3
vmulps %xmm1, %xmm3, %xmm1
vaddps %xmm2, %xmm1, %xmm1
jmp 0x21f72a
movq (%r9), %r15
vbroadcastss (%r15), %xmm2
vbroadcastss 0x4(%r15), %xmm3
vmulps %xmm1, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm1
movq (%rsi), %r15
movq %rbx, %r12
shlq $0x4, %r12
vmovups %xmm1, (%r15,%r12)
incq %rbx
cmpq %rax, %rbx
jne 0x21f124
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/ysh329[P]ncnn/src/layer/x86/innerproduct_fp16s.h
|
ncnn::Input::Input()
|
Input::Input()
{
one_blob_only = true;
support_inplace = true;
support_vulkan = true;
support_packing = true;
support_bf16_storage = true;
support_image_storage = true;
}
|
pushq %rbx
movq %rdi, %rbx
callq 0x70f80
leaq 0x27255c(%rip), %rax # 0x49bb40
movq %rax, (%rbx)
movb $0x1, %al
movb %al, 0xf(%rbx)
movb %al, 0xc(%rbx)
movl $0x1010101, 0x8(%rbx) # imm = 0x1010101
popq %rbx
retq
|
/ysh329[P]ncnn/src/layer/input.cpp
|
ncnn::LRN::load_param(ncnn::ParamDict const&)
|
int LRN::load_param(const ParamDict& pd)
{
region_type = pd.get(0, 0);
local_size = pd.get(1, 5);
alpha = pd.get(2, 1.f);
beta = pd.get(3, 0.75f);
bias = pd.get(4, 1.f);
return 0;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0xd0(%rbx)
movq %r14, %rdi
movl $0x1, %esi
movl $0x5, %edx
callq 0x69336
movl %eax, 0xd4(%rbx)
movss 0x1eb47c(%rip), %xmm0 # 0x414d18
movq %r14, %rdi
movl $0x2, %esi
callq 0x69354
movss %xmm0, 0xd8(%rbx)
movss 0x1f2a13(%rip), %xmm0 # 0x41c2cc
movq %r14, %rdi
movl $0x3, %esi
callq 0x69354
movss %xmm0, 0xdc(%rbx)
movq %r14, %rdi
movl $0x4, %esi
movss 0x1eb43a(%rip), %xmm0 # 0x414d18
callq 0x69354
movss %xmm0, 0xe0(%rbx)
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
nop
|
/ysh329[P]ncnn/src/layer/lrn.cpp
|
ncnn::LRN_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int LRN_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = _mm256_mul_ps(_p, _p);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*outptr = *ptr * *ptr;
ptr++;
outptr++;
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _sp = _mm256_loadu_ps(sptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_add_ps(_ssp, _sp);
_mm256_storeu_ps(ssptr, _ssp);
sptr += 8;
ssptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ssptr += *sptr;
sptr++;
ssptr++;
}
}
float* ptr = bottom_top_blob.channel(q);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
__m256 _bias = _mm256_set1_ps(bias);
__m256 _ads = _mm256_set1_ps(alpha_div_size);
__m256 _mb = _mm256_set1_ps(-beta);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_mul_ps(_ssp, _ads);
_ssp = _mm256_add_ps(_ssp, _bias);
_ssp = pow256_ps(_ssp, _mb);
_p = _mm256_mul_ps(_p, _ssp);
_mm256_storeu_ps(ptr, _p);
ssptr += 8;
ptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ptr = *ptr * pow(bias + alpha_div_size * *ssptr, -beta);
ssptr++;
ptr++;
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * pow(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x168, %rsp # imm = 0x168
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, 0x18(%rsp)
movl 0x2c(%rsi), %esi
movl 0x30(%r12), %edx
movl 0x38(%r12), %r14d
movq 0x10(%r12), %rbx
leaq 0xa0(%rsp), %rdi
movq $0x0, 0x40(%rdi)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdi)
movups %xmm0, 0xc(%rdi)
movaps %xmm0, 0x20(%rdi)
movups %xmm0, 0x2c(%rdi)
movq 0x10(%r15), %r9
movq %rsi, 0x10(%rsp)
movq %rdx, 0x80(%rsp)
movl %r14d, %ecx
movq %rbx, %r8
callq 0x5b24c
movq 0xa0(%rsp), %rcx
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
testq %rcx, %rcx
je 0x22a9b3
movq 0xe0(%rsp), %rdx
movslq 0xd8(%rsp), %rax
movq %rdx, %rsi
imulq %rax, %rsi
testq %rsi, %rsi
je 0x22a9b3
movq 0x80(%rsp), %rsi
imull 0x10(%rsp), %esi
movl %esi, 0xc(%rsp)
testl %r14d, %r14d
movq 0x18(%rsp), %r11
jle 0x22a2bd
movq (%r12), %rsi
movq 0x40(%r12), %rdi
imulq 0x10(%r12), %rdi
imulq 0xb0(%rsp), %rdx
movl 0xc(%rsp), %r8d
xorl %r9d, %r9d
cmpl $0x0, 0xc(%rsp)
jle 0x22a2af
xorl %r10d, %r10d
movss (%rsi,%r10,4), %xmm0
mulss %xmm0, %xmm0
movss %xmm0, (%rcx,%r10,4)
incq %r10
cmpl %r10d, %r8d
jne 0x22a297
incq %r9
addq %rdi, %rsi
addq %rdx, %rcx
cmpq %r14, %r9
jne 0x22a28d
movq (%r11), %rcx
movq -0x18(%rcx), %rdx
movl 0xd0(%r11,%rdx), %edx
xorl %ebp, %ebp
testl %edx, %edx
movq %r14, 0x98(%rsp)
je 0x22a40b
cmpl $0x1, %edx
jne 0x22a9b3
movq 0xa0(%rsp), %rsi
movq 0xa8(%rsp), %rdx
movq %rsi, 0x30(%rsp)
movq %rdx, 0x38(%rsp)
movq 0xb0(%rsp), %rsi
movq %rsi, 0x40(%rsp)
movl 0xb8(%rsp), %esi
movl %esi, 0x48(%rsp)
movq 0xc0(%rsp), %rsi
movq %rsi, 0x50(%rsp)
movups 0xc8(%rsp), %xmm0
movups %xmm0, 0x58(%rsp)
movl %eax, 0x68(%rsp)
movq 0xe0(%rsp), %rax
movq %rax, 0x70(%rsp)
testq %rdx, %rdx
je 0x22a34f
lock
incl (%rdx)
movq (%r11), %rcx
movq -0x18(%rcx), %rax
movl 0xd4(%r11,%rax), %r8d
cmpl $0x2, %r8d
jl 0x22a6a9
shrl %r8d
movups (%r15), %xmm0
movups 0x10(%r15), %xmm1
movups 0x20(%r15), %xmm2
movups 0x30(%r15), %xmm3
leaq 0x120(%rsp), %rax
movaps %xmm3, 0x30(%rax)
movaps %xmm2, 0x20(%rax)
movaps %xmm1, 0x10(%rax)
movaps %xmm0, (%rax)
movq 0x10(%r15), %rdx
movq %rdx, 0x8(%rax)
movq -0x18(%rcx), %rdx
movl %r8d, %ecx
notl %ecx
addl 0xd4(%r11,%rdx), %ecx
leaq 0xa0(%rsp), %rdi
leaq 0x30(%rsp), %rsi
xorps %xmm0, %xmm0
movl %r8d, %edx
movl %ecx, %r9d
pushq %rax
pushq $0x0
callq 0x5c159
addq $0x10, %rsp
cmpq $0x0, 0x30(%rsp)
je 0x22a95e
movslq 0x68(%rsp), %rax
imulq 0x70(%rsp), %rax
testq %rax, %rax
je 0x22a95e
movl 0x5c(%rsp), %r13d
movq 0x18(%rsp), %r11
movq (%r11), %rax
movq -0x18(%rax), %rax
movl 0xd4(%r11,%rax), %r8d
jmp 0x22a6b1
leaq 0x30(%rsp), %rdi
movq $0x0, 0x40(%rdi)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdi)
movups %xmm0, 0xc(%rdi)
movaps %xmm0, 0x20(%rdi)
movups %xmm0, 0x2c(%rdi)
movq 0x10(%r15), %r9
movq 0x10(%rsp), %rsi
movq 0x80(%rsp), %rdx
movl %r14d, %ecx
movq %rbx, %r8
callq 0x5b24c
movq 0x30(%rsp), %rdi
testq %rdi, %rdi
je 0x22a674
movq 0x70(%rsp), %rax
movslq 0x68(%rsp), %rcx
movq %rax, %rdx
imulq %rcx, %rdx
testq %rdx, %rdx
je 0x22a674
imull %ecx, %eax
testl %eax, %eax
jle 0x22a482
movl %eax, %edx
shlq $0x2, %rdx
xorl %esi, %esi
callq 0x56070
testl %r14d, %r14d
movq 0x18(%rsp), %r11
jle 0x22a63a
movq (%r11), %rbx
movq -0x18(%rbx), %rax
movss 0xd8(%r11,%rax), %xmm1
xorps %xmm0, %xmm0
cvtsi2ssl 0xd4(%r11,%rax), %xmm0
movq (%r12), %r15
movq 0x40(%r12), %rcx
movq 0x70(%rsp), %rax
imulq 0x40(%rsp), %rax
movq %rax, 0x28(%rsp)
movq %r12, %rax
movq 0x30(%rsp), %r12
movss 0x1ea841(%rip), %xmm2 # 0x414d18
divss %xmm0, %xmm2
movl 0xc(%rsp), %r13d
imulq 0x10(%rax), %rcx
movq %rcx, 0x90(%rsp)
xorl %ebp, %ebp
movaps 0x1e9dfa(%rip), %xmm3 # 0x4142f0
movss %xmm1, 0x10(%rsp)
movss %xmm2, 0x20(%rsp)
movq -0x18(%rbx), %rdi
movl 0xd4(%r11,%rdi), %eax
movl %eax, %ecx
shrl $0x1f, %ecx
addl %eax, %ecx
sarl %ecx
movl %ebp, %eax
subl %ecx, %eax
addl %ebp, %ecx
cmpl %ecx, %eax
jg 0x22a5b5
movq 0xa0(%rsp), %rcx
movq 0xe0(%rsp), %rdx
movq 0x70(%rsp), %rsi
imulq 0x40(%rsp), %rsi
imulq %rbp, %rsi
addq 0x30(%rsp), %rsi
imulq 0xb0(%rsp), %rdx
cmpl $0x0, 0xc(%rsp)
setle %r8b
cmpl %r14d, %eax
setae %r9b
orb %r8b, %r9b
jne 0x22a591
movl %eax, %edi
imulq %rdx, %rdi
addq %rcx, %rdi
xorl %r8d, %r8d
movss (%rsi,%r8,4), %xmm0
addss (%rdi,%r8,4), %xmm0
movss %xmm0, (%rsi,%r8,4)
incq %r8
cmpl %r8d, %r13d
jne 0x22a573
movq -0x18(%rbx), %rdi
leal 0x1(%rax), %r8d
movl 0xd4(%r11,%rdi), %r9d
movl %r9d, %r10d
shrl $0x1f, %r10d
addl %r9d, %r10d
sarl %r10d
addl %ebp, %r10d
cmpl %r10d, %eax
movl %r8d, %eax
jl 0x22a552
cmpl $0x0, 0xc(%rsp)
jle 0x22a619
xorl %r14d, %r14d
movq -0x18(%rbx), %rax
movss (%r12,%r14,4), %xmm0
mulss %xmm1, %xmm0
mulss %xmm2, %xmm0
addss 0xe0(%r11,%rax), %xmm0
movss 0xdc(%r11,%rax), %xmm1
xorps %xmm3, %xmm1
callq 0x560e0
movaps 0x1e9cfc(%rip), %xmm3 # 0x4142f0
movss 0x20(%rsp), %xmm2
movss 0x10(%rsp), %xmm1
movq 0x18(%rsp), %r11
mulss (%r15,%r14,4), %xmm0
movss %xmm0, (%r15,%r14,4)
incq %r14
cmpl %r14d, %r13d
jne 0x22a5bf
incq %rbp
addq 0x28(%rsp), %r12
addq 0x90(%rsp), %r15
movq 0x98(%rsp), %r14
cmpq %r14, %rbp
jne 0x22a502
movq 0x38(%rsp), %rax
testq %rax, %rax
movl $0x0, %ebp
je 0x22a9b3
lock
decl (%rax)
jne 0x22a9b3
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x22a992
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22a9b3
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x22a9ae
lock
decl (%rax)
jne 0x22a9ae
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x22a98b
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22a9ae
movq 0x10(%rsp), %rcx
movl %ecx, %r13d
movl %r8d, 0xc(%rsp)
movl %r8d, %ebp
imull %ebp, %ebp
movss 0xd8(%r11,%rax), %xmm0
movss %xmm0, 0x20(%rsp)
leaq 0x120(%rsp), %rdi
leaq 0x27(%rsp), %rdx
movq %rbp, %rsi
movq %r11, %rbx
callq 0x6aa92
movq 0x120(%rsp), %r15
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %edi
testl %edi, %edi
jle 0x22a746
movq %rbx, %r11
subl %edi, %r13d
xorl %ecx, %ecx
xorl %edx, %edx
xorl %esi, %esi
testl %edi, %edi
jle 0x22a73d
movslq %esi, %rdi
leaq (%r15,%rdi,4), %r9
movl %edx, %r10d
xorl %r8d, %r8d
leal (%r10,%r8), %edi
movl %edi, (%r9,%r8,4)
movq -0x18(%rax), %rdi
movl 0xd4(%r11,%rdi), %edi
incq %r8
cmpl %edi, %r8d
jl 0x22a71b
addl %r8d, %esi
addl %r8d, %edx
addl %r13d, %edx
incl %ecx
cmpl %edi, %ecx
jl 0x22a70a
testl %r14d, %r14d
jle 0x22a91b
movslq 0x10(%rsp), %rax
movq %rax, 0x118(%rsp)
xorps %xmm0, %xmm0
cvtsi2ss %ebp, %xmm0
movq (%r12), %rax
movq %rax, 0x100(%rsp)
movq 0x40(%r12), %rax
imulq 0x10(%r12), %rax
movq %rax, 0xf8(%rsp)
movslq 0x5c(%rsp), %rcx
movq 0x30(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x40(%rsp), %rax
movq 0x70(%rsp), %rdx
imulq %rax, %rdx
movq %rdx, 0xe8(%rsp)
imulq %rax, %rcx
movq %rcx, 0x108(%rsp)
cmpl $0x1, %ebp
adcl $0x0, %ebp
movss 0x1ea554(%rip), %xmm1 # 0x414d18
divss %xmm0, %xmm1
movss %xmm1, 0x90(%rsp)
movq $0x0, 0x88(%rsp)
cmpl $0x0, 0x80(%rsp)
jle 0x22a8fa
movq 0xe8(%rsp), %rcx
movq 0x88(%rsp), %rax
imulq %rax, %rcx
addq 0xf0(%rsp), %rcx
movq %rcx, 0x110(%rsp)
movq 0xf8(%rsp), %r13
imulq %rax, %r13
addq 0x100(%rsp), %r13
movq 0x18(%rsp), %rax
movq (%rax), %r12
movq $0x0, 0x28(%rsp)
cmpl $0x0, 0x10(%rsp)
jle 0x22a8d2
movq 0x108(%rsp), %rbx
imulq 0x28(%rsp), %rbx
addq 0x110(%rsp), %rbx
xorl %r14d, %r14d
cmpl $0x0, 0xc(%rsp)
je 0x22a880
leaq (%rbx,%r14,4), %rax
xorps %xmm0, %xmm0
xorl %ecx, %ecx
movq 0x18(%rsp), %rsi
movslq (%r15,%rcx,4), %rdx
addss (%rax,%rdx,4), %xmm0
incq %rcx
cmpq %rcx, %rbp
jne 0x22a86d
jmp 0x22a888
xorps %xmm0, %xmm0
movq 0x18(%rsp), %rsi
mulss 0x20(%rsp), %xmm0
mulss 0x90(%rsp), %xmm0
movq -0x18(%r12), %rax
addss 0xe0(%rsi,%rax), %xmm0
movss 0xdc(%rsi,%rax), %xmm1
xorps 0x1e9a3b(%rip), %xmm1 # 0x4142f0
callq 0x560e0
mulss (%r13,%r14,4), %xmm0
movss %xmm0, (%r13,%r14,4)
incq %r14
cmpq 0x10(%rsp), %r14
jne 0x22a858
movq 0x118(%rsp), %rax
leaq (%r13,%rax,4), %r13
movq 0x28(%rsp), %rcx
incq %rcx
movq %rcx, 0x28(%rsp)
cmpq 0x80(%rsp), %rcx
jne 0x22a834
movq 0x88(%rsp), %rcx
incq %rcx
movq %rcx, 0x88(%rsp)
cmpq 0x98(%rsp), %rcx
jne 0x22a7dd
testq %r15, %r15
je 0x22a933
movq 0x130(%rsp), %rsi
subq %r15, %rsi
movq %r15, %rdi
callq 0x56270
movq 0x38(%rsp), %rax
testq %rax, %rax
movl $0x0, %ebp
je 0x22a9b3
lock
decl (%rax)
jne 0x22a9b3
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x22a984
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22a9b3
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x22a9ae
lock
decl (%rax)
jne 0x22a9ae
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
je 0x22a9a1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22a9ae
testq %rsi, %rsi
je 0x22a9b3
jmp 0x22a997
testq %rsi, %rsi
je 0x22a9ae
jmp 0x22a9a6
testq %rsi, %rsi
je 0x22a9b3
movq %rsi, %rdi
callq 0x563b0
jmp 0x22a9b3
testq %rsi, %rsi
je 0x22a9ae
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x22a9ef
lock
decl (%rax)
jne 0x22a9ef
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
je 0x22a9e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22a9ef
testq %rsi, %rsi
je 0x22a9ef
movq %rsi, %rdi
callq 0x563b0
movl %ebp, %eax
addq $0x168, %rsp # imm = 0x168
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x22aad1
jmp 0x22aad1
jmp 0x22aad1
jmp 0x22aad1
jmp 0x22aa19
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x22aa8d
lock
decl (%rax)
jne 0x22aa8d
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0x22aa41
testq %rsi, %rsi
je 0x22aa8d
jmp 0x22aa74
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22aa8d
jmp 0x22aad1
movq %rax, %rbx
movq 0x38(%rsp), %rax
testq %rax, %rax
je 0x22aa8d
lock
decl (%rax)
jne 0x22aa8d
movq 0x30(%rsp), %rsi
movq 0x50(%rsp), %rdi
testq %rdi, %rdi
jne 0x22aa7e
testq %rsi, %rsi
je 0x22aa8d
movq %rsi, %rdi
callq 0x563b0
jmp 0x22aa8d
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22aa8d
jmp 0x22aad1
jmp 0x22aad1
movq %rax, %rbx
movq 0xa8(%rsp), %rax
testq %rax, %rax
je 0x22aac9
lock
decl (%rax)
jne 0x22aac9
movq 0xa0(%rsp), %rsi
movq 0xc0(%rsp), %rdi
testq %rdi, %rdi
jne 0x22aac3
testq %rsi, %rsi
je 0x22aac9
movq %rsi, %rdi
callq 0x563b0
jmp 0x22aac9
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
nop
|
/ysh329[P]ncnn/src/layer/x86/lrn_x86.cpp
|
ncnn::LRN_x86_avx512::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int LRN_x86_avx512::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = _mm256_mul_ps(_p, _p);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*outptr = *ptr * *ptr;
ptr++;
outptr++;
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _sp = _mm256_loadu_ps(sptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_add_ps(_ssp, _sp);
_mm256_storeu_ps(ssptr, _ssp);
sptr += 8;
ssptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ssptr += *sptr;
sptr++;
ssptr++;
}
}
float* ptr = bottom_top_blob.channel(q);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
__m256 _bias = _mm256_set1_ps(bias);
__m256 _ads = _mm256_set1_ps(alpha_div_size);
__m256 _mb = _mm256_set1_ps(-beta);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_mul_ps(_ssp, _ads);
_ssp = _mm256_add_ps(_ssp, _bias);
_ssp = pow256_ps(_ssp, _mb);
_p = _mm256_mul_ps(_p, _ssp);
_mm256_storeu_ps(ptr, _p);
ssptr += 8;
ptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ptr = *ptr * pow(bias + alpha_div_size * *ssptr, -beta);
ssptr++;
ptr++;
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * pow(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x208, %rsp # imm = 0x208
movq %rdx, %r13
movq %rsi, %r12
movq %rdi, 0x8(%rsp)
movl 0x2c(%rsi), %esi
movl 0x30(%r12), %edx
movl 0x38(%r12), %r14d
movq 0x10(%r12), %rbx
leaq 0x80(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq 0x10(%r13), %r9
movq %rsi, 0xd0(%rsp)
movq %rdx, 0x18(%rsp)
movl %r14d, %ecx
movq %rbx, %r8
callq 0x5b24c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, 0x80(%rsp)
je 0x22b7f3
movslq 0xb8(%rsp), %rax
imulq 0xc0(%rsp), %rax
testq %rax, %rax
je 0x22b7f3
movq 0x18(%rsp), %rax
movl %eax, %r15d
imull 0xd0(%rsp), %r15d
testl %r14d, %r14d
movq 0x8(%rsp), %r9
jle 0x22ac9c
movl %r15d, %eax
andl $-0x8, %eax
xorl %ecx, %ecx
movq 0x40(%r12), %rdx
imulq %rcx, %rdx
imulq 0x10(%r12), %rdx
addq (%r12), %rdx
movq 0xc0(%rsp), %rsi
imulq %rcx, %rsi
imulq 0x90(%rsp), %rsi
addq 0x80(%rsp), %rsi
cmpl $0x8, %r15d
jl 0x22ac6a
movl $0x7, %edi
vmovups (%rdx), %ymm0
vmulps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rsi)
addq $0x20, %rdx
addq $0x20, %rsi
addl $0x8, %edi
cmpl %r15d, %edi
jl 0x22ac49
movl %eax, %r8d
jmp 0x22ac6d
xorl %r8d, %r8d
movl %r15d, %edi
subl %r8d, %edi
jle 0x22ac90
xorl %r8d, %r8d
vmovss (%rdx,%r8,4), %xmm0
vmulss %xmm0, %xmm0, %xmm0
vmovss %xmm0, (%rsi,%r8,4)
incq %r8
cmpl %r8d, %edi
jne 0x22ac78
incq %rcx
cmpq %r14, %rcx
jne 0x22ac0e
movq (%r9), %rax
movq -0x18(%rax), %rcx
movl 0xd0(%r9,%rcx), %ecx
xorl %ebp, %ebp
testl %ecx, %ecx
movq %r14, 0x78(%rsp)
je 0x22ade1
cmpl $0x1, %ecx
jne 0x22b7f3
movq %r13, %rsi
movq 0x88(%rsp), %rcx
vmovaps 0x80(%rsp), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq 0x90(%rsp), %rdx
movq %rdx, 0x30(%rsp)
movl 0x98(%rsp), %edx
movl %edx, 0x38(%rsp)
movq 0xa0(%rsp), %rdx
movq %rdx, 0x40(%rsp)
vmovups 0xa8(%rsp), %xmm0
vmovups %xmm0, 0x48(%rsp)
movl 0xb8(%rsp), %edx
movl %edx, 0x58(%rsp)
movq 0xc0(%rsp), %rdx
movq %rdx, 0x60(%rsp)
testq %rcx, %rcx
je 0x22ad34
lock
incl (%rcx)
movq (%r9), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%r9,%rcx), %r13d
cmpl $0x2, %r13d
jl 0x22b40d
shrl %r13d
vmovups (%rsi), %zmm0
leaq 0x1c0(%rsp), %r10
vmovups %zmm0, (%r10)
movq 0x10(%rsi), %rcx
movq %rcx, 0x8(%r10)
movq -0x18(%rax), %rax
movl %r13d, %ecx
notl %ecx
addl 0xd4(%r9,%rax), %ecx
leaq 0x80(%rsp), %rdi
leaq 0x20(%rsp), %rsi
vxorps %xmm0, %xmm0, %xmm0
movl %r13d, %edx
movl %r13d, %r8d
movl %ecx, %r9d
pushq %r10
pushq $0x0
vzeroupper
callq 0x5c159
addq $0x10, %rsp
cmpq $0x0, 0x20(%rsp)
je 0x22b79b
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x22b79b
movl 0x4c(%rsp), %ebp
movq 0x8(%rsp), %r9
movq (%r9), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%r9,%rcx), %r13d
jmp 0x22b417
leaq 0x20(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq 0x10(%r13), %r9
movq 0xd0(%rsp), %rsi
movq 0x18(%rsp), %rdx
movl %r14d, %ecx
movq %rbx, %r8
vzeroupper
callq 0x5b24c
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x22b3d8
movq 0x60(%rsp), %rax
movslq 0x58(%rsp), %rcx
movq %rax, %rdx
imulq %rcx, %rdx
testq %rdx, %rdx
je 0x22b3d8
imull %ecx, %eax
testl %eax, %eax
jle 0x22ae60
movl %eax, %edx
shlq $0x2, %rdx
xorl %esi, %esi
callq 0x56070
testl %r14d, %r14d
movq 0x8(%rsp), %r8
jle 0x22b39b
movq (%r8), %r13
movq -0x18(%r13), %rax
vmovss 0xd8(%r8,%rax), %xmm0
vcvtsi2ssl 0xd4(%r8,%rax), %xmm1, %xmm1
vdivss %xmm1, %xmm0, %xmm8
vbroadcastss %xmm8, %ymm20
movl %r15d, %r10d
andl $-0x8, %r10d
xorl %r11d, %r11d
vbroadcastss 0x1ec1ae(%rip), %xmm10 # 0x417054
vbroadcastss 0x1ec1d1(%rip), %ymm4 # 0x417080
vpbroadcastd 0x1e97d8(%rip), %ymm5 # 0x414690
vpbroadcastd 0x1ec1c3(%rip), %ymm6 # 0x417084
vpbroadcastd 0x1ec1be(%rip), %ymm7 # 0x417088
vbroadcastss 0x1ec1b9(%rip), %ymm9 # 0x41708c
vbroadcastss 0x1e9e3b(%rip), %ymm17 # 0x414d18
vbroadcastss 0x1ec1b9(%rip), %ymm21 # 0x4170a0
vbroadcastss 0x1ec1b3(%rip), %ymm22 # 0x4170a4
vbroadcastss 0x1ec1ad(%rip), %ymm23 # 0x4170a8
vbroadcastss 0x1ec1a7(%rip), %ymm24 # 0x4170ac
vbroadcastss 0x1ec1a2(%rip), %ymm2 # 0x4170b0
vbroadcastss 0x1ec150(%rip), %ymm27 # 0x417068
vbroadcastss 0x1e976e(%rip), %ymm28 # 0x414690
vbroadcastss 0x1ec138(%rip), %ymm29 # 0x417064
vmovups %ymm20, 0x140(%rsp)
movl %r10d, 0x18(%rsp)
movq %r12, 0x70(%rsp)
vmovaps %xmm8, 0xf0(%rsp)
movq -0x18(%r13), %rcx
movl 0xd4(%r8,%rcx), %eax
movl %eax, %edx
shrl $0x1f, %edx
addl %eax, %edx
sarl %edx
movl %r11d, %eax
subl %edx, %eax
addl %r11d, %edx
cmpl %edx, %eax
jg 0x22b011
cmpl %r14d, %eax
jae 0x22afec
movl %eax, %ecx
imulq 0xc0(%rsp), %rcx
imulq 0x90(%rsp), %rcx
addq 0x80(%rsp), %rcx
movq 0x60(%rsp), %rdx
imulq %r11, %rdx
imulq 0x30(%rsp), %rdx
addq 0x20(%rsp), %rdx
xorl %edi, %edi
cmpl $0x8, %r15d
jl 0x22afcd
movl $0x7, %esi
vmovups (%rdx), %ymm0
vaddps (%rcx), %ymm0, %ymm0
vmovups %ymm0, (%rdx)
addq $0x20, %rcx
addq $0x20, %rdx
addl $0x8, %esi
cmpl %r15d, %esi
jl 0x22afae
movl %r10d, %edi
movl %r15d, %esi
subl %edi, %esi
jle 0x22afec
xorl %edi, %edi
vmovss (%rdx,%rdi,4), %xmm0
vaddss (%rcx,%rdi,4), %xmm0, %xmm0
vmovss %xmm0, (%rdx,%rdi,4)
incq %rdi
cmpl %edi, %esi
jne 0x22afd6
leal 0x1(%rax), %edx
movq -0x18(%r13), %rcx
movl 0xd4(%r8,%rcx), %esi
movl %esi, %edi
shrl $0x1f, %edi
addl %esi, %edi
sarl %edi
addl %r11d, %edi
cmpl %edi, %eax
movl %edx, %eax
jl 0x22af6c
movq 0x40(%r12), %rbp
imulq %r11, %rbp
imulq 0x10(%r12), %rbp
addq (%r12), %rbp
movq 0x60(%rsp), %r12
imulq %r11, %r12
imulq 0x30(%rsp), %r12
addq 0x20(%rsp), %r12
xorl %eax, %eax
cmpl $0x8, %r15d
movq %r11, 0x180(%rsp)
jl 0x22b28b
vbroadcastss 0xe0(%r8,%rcx), %ymm0
vmovups %ymm0, 0xd0(%rsp)
vmovss 0xdc(%r8,%rcx), %xmm1
vxorps %xmm1, %xmm10, %xmm1
vbroadcastss %xmm1, %ymm0
vmovups %ymm0, 0x160(%rsp)
movl $0x7, %eax
vbroadcastss 0x1ebfce(%rip), %ymm31 # 0x417058
vbroadcastss 0x1ebfc9(%rip), %ymm11 # 0x41705c
vbroadcastss 0x1ebfc4(%rip), %ymm12 # 0x417060
vbroadcastss 0x1ebfc7(%rip), %ymm13 # 0x41706c
vbroadcastss 0x1ebfc2(%rip), %ymm14 # 0x417070
vbroadcastss 0x1ebfbd(%rip), %ymm15 # 0x417074
vbroadcastss 0x1ebfb7(%rip), %ymm16 # 0x417078
vbroadcastss 0x1ebfb1(%rip), %ymm18 # 0x41707c
vpbroadcastd 0x1e9c43(%rip), %ymm19 # 0x414d18
vmovaps %ymm21, %ymm26
vmovaps %ymm22, %ymm30
vmovaps %ymm23, %ymm25
vmovaps %ymm2, %ymm1
vmovups (%r12), %ymm2
vfmadd213ps 0xd0(%rsp), %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + mem
vmaxps %ymm4, %ymm2, %ymm3
vmovaps %ymm4, %ymm21
vpsrld $0x17, %ymm3, %ymm4
vpternlogd $0xec, %ymm6, %ymm5, %ymm3
vcmpltps %ymm9, %ymm3, %k1
vmovdqa64 %ymm5, %ymm22
vaddps 0x1ebf67(%rip){1to8}, %ymm3, %ymm5 # 0x417090
vaddps %ymm3, %ymm5, %ymm5 {%k1}
vbroadcastss 0x1ebf5c(%rip), %ymm3 # 0x417094
vfmadd213ps 0x1ebf56(%rip){1to8}, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + mem
vfmadd213ps 0x1ebf50(%rip){1to8}, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + mem
vfmadd213ps %ymm26, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm26
vfmadd213ps %ymm30, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm30
vmovdqa64 %ymm6, %ymm23
vmulps %ymm5, %ymm5, %ymm6
vfmadd213ps %ymm25, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm25
vfmadd213ps %ymm24, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm24
vfmadd213ps %ymm1, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm1
vfmadd213ps 0x1ebf37(%rip){1to8}, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + mem
vmovaps %ymm24, %ymm0
vmovaps %ymm9, %ymm24
vmovdqa %ymm7, %ymm9
vmulps %ymm5, %ymm6, %ymm7
vmulps %ymm3, %ymm7, %ymm3
vmovdqa %ymm9, %ymm7
vmovaps %ymm24, %ymm9
vmovaps %ymm0, %ymm24
vpaddd %ymm7, %ymm4, %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm17, %ymm4, %ymm4 {%k1}
vfmadd231ps %ymm27, %ymm4, %ymm3 # ymm3 = (ymm4 * ymm27) + ymm3
vfmsub231ps %ymm6, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm6) - ymm3
vmovdqa64 %ymm23, %ymm6
vcmpleps 0x1ee150(%rip), %ymm2, %k1 # 0x419320
vsubps %ymm5, %ymm3, %ymm2
vfmsub231ps %ymm4, %ymm29, %ymm2 # ymm2 = (ymm29 * ymm4) - ymm2
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovaps %ymm0, %ymm2 {%k1}
vmulps 0x160(%rsp), %ymm2, %ymm2
vminps %ymm31, %ymm2, %ymm2
vmaxps %ymm2, %ymm11, %ymm2
vmovaps %ymm28, %ymm3
vfmadd231ps %ymm12, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm12) + ymm3
vroundps $0x1, %ymm3, %ymm4
vcmpltps %ymm4, %ymm3, %k1
vsubps %ymm17, %ymm4, %ymm4 {%k1}
vfmsub231ps %ymm29, %ymm4, %ymm2 # ymm2 = (ymm4 * ymm29) - ymm2
vfnmsub231ps %ymm27, %ymm4, %ymm2 # ymm2 = -(ymm4 * ymm27) - ymm2
vmovaps %ymm13, %ymm3
vfmadd213ps %ymm14, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + ymm14
vmulps %ymm2, %ymm2, %ymm5
vfmadd213ps %ymm15, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + ymm15
vfmadd213ps %ymm16, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + ymm16
vfmadd213ps %ymm18, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + ymm18
vfmadd213ps %ymm28, %ymm2, %ymm3 # ymm3 = (ymm2 * ymm3) + ymm28
vfmadd213ps %ymm2, %ymm5, %ymm3 # ymm3 = (ymm5 * ymm3) + ymm2
vmovdqa64 %ymm22, %ymm5
vcvttps2dq %ymm4, %ymm2
vmovaps %ymm21, %ymm4
vpslld $0x17, %ymm2, %ymm2
vpaddd %ymm19, %ymm2, %ymm2
vmulps (%rbp), %ymm2, %ymm2
vfmadd213ps %ymm2, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm2
vmovups %ymm2, (%rbp)
addq $0x20, %r12
addq $0x20, %rbp
addl $0x8, %eax
cmpl %r15d, %eax
jl 0x22b0eb
movl %r10d, %eax
movl %r15d, %r14d
subl %eax, %r14d
jle 0x22b2ea
xorl %ebx, %ebx
movq -0x18(%r13), %rax
vmovss (%r12,%rbx,4), %xmm0
vfmadd213ss 0xe0(%r8,%rax), %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + mem
vmovss 0xdc(%r8,%rax), %xmm1
vxorps %xmm1, %xmm10, %xmm1
vzeroupper
callq 0x560e0
vbroadcastss 0x1ebd8c(%rip), %xmm10 # 0x417054
vmovaps 0xf0(%rsp), %xmm8
movq 0x8(%rsp), %r8
vmulss (%rbp,%rbx,4), %xmm0, %xmm0
vmovss %xmm0, (%rbp,%rbx,4)
incq %rbx
cmpl %ebx, %r14d
jne 0x22b295
movq 0x180(%rsp), %r11
incq %r11
movq 0x78(%rsp), %r14
cmpq %r14, %r11
movq 0x70(%rsp), %r12
vmovups 0x140(%rsp), %ymm20
movl 0x18(%rsp), %r10d
vbroadcastss 0x1ebd68(%rip), %ymm4 # 0x417080
vpbroadcastd 0x1e936f(%rip), %ymm5 # 0x414690
vpbroadcastd 0x1ebd5a(%rip), %ymm6 # 0x417084
vpbroadcastd 0x1ebd55(%rip), %ymm7 # 0x417088
vbroadcastss 0x1ebd50(%rip), %ymm9 # 0x41708c
vbroadcastss 0x1e99d2(%rip), %ymm17 # 0x414d18
vbroadcastss 0x1ebd50(%rip), %ymm21 # 0x4170a0
vbroadcastss 0x1ebd4a(%rip), %ymm22 # 0x4170a4
vbroadcastss 0x1ebd44(%rip), %ymm23 # 0x4170a8
vbroadcastss 0x1ebd3e(%rip), %ymm24 # 0x4170ac
vbroadcastss 0x1ebd39(%rip), %ymm2 # 0x4170b0
vbroadcastss 0x1ebce7(%rip), %ymm27 # 0x417068
vbroadcastss 0x1e9305(%rip), %ymm28 # 0x414690
vbroadcastss 0x1ebccf(%rip), %ymm29 # 0x417064
jne 0x22af47
movq 0x28(%rsp), %rax
testq %rax, %rax
movl $0x0, %ebp
je 0x22b7f3
lock
decl (%rax)
jne 0x22b7f3
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22b7cf
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x22b7f3
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22b7ee
lock
decl (%rax)
jne 0x22b7ee
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22b7c8
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22b7ee
movq 0xd0(%rsp), %rax
movl %eax, %ebp
movl %r13d, %esi
imull %esi, %esi
vmovss 0xd8(%r9,%rcx), %xmm0
vmovss %xmm0, 0xf0(%rsp)
leaq 0x1c0(%rsp), %rdi
leaq 0x17(%rsp), %rdx
movq %rsi, 0x180(%rsp)
movq %r9, %rbx
vzeroupper
callq 0x6aa92
movq 0x1c0(%rsp), %r15
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %edi
testl %edi, %edi
jle 0x22b4b0
movq %rbx, %r11
subl %edi, %ebp
xorl %ecx, %ecx
xorl %edx, %edx
xorl %esi, %esi
testl %edi, %edi
jle 0x22b4a8
movslq %esi, %rdi
leaq (%r15,%rdi,4), %r9
movl %edx, %r10d
xorl %r8d, %r8d
leal (%r10,%r8), %edi
movl %edi, (%r9,%r8,4)
movq -0x18(%rax), %rdi
movl 0xd4(%r11,%rdi), %edi
incq %r8
cmpl %edi, %r8d
jl 0x22b486
addl %r8d, %esi
addl %r8d, %edx
addl %ebp, %edx
incl %ecx
cmpl %edi, %ecx
jl 0x22b475
movl %r13d, 0x160(%rsp)
testl %r14d, %r14d
jle 0x22b752
movslq 0xd0(%rsp), %rax
movq %rax, 0x138(%rsp)
movq 0x180(%rsp), %rsi
vcvtsi2ss %esi, %xmm1, %xmm0
movq (%r12), %rax
movq %rax, 0x120(%rsp)
movq 0x40(%r12), %rax
imulq 0x10(%r12), %rax
movq %rax, 0x118(%rsp)
movslq 0x4c(%rsp), %rcx
movq 0x20(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x30(%rsp), %rax
movq 0x60(%rsp), %rdx
imulq %rax, %rdx
movq %rdx, 0x108(%rsp)
imulq %rax, %rcx
movq %rcx, 0x128(%rsp)
cmpl $0x1, %esi
adcl $0x0, %esi
movl %esi, %ebp
addl $0xf, %ebp
andl $-0x10, %ebp
leaq -0x1(%rsi), %rax
vpbroadcastq %rax, %zmm3
vmovss 0x1e97c8(%rip), %xmm1 # 0x414d18
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0x70(%rsp)
movq $0x0, 0x68(%rsp)
vpmovsxbq 0x1eb771(%rip), %zmm4 # 0x416cde
vpmovsxbq 0x1eb76f(%rip), %zmm5 # 0x416ce6
vmovdqu64 %zmm3, 0x180(%rsp)
cmpl $0x0, 0x18(%rsp)
jle 0x22b73a
movq 0x108(%rsp), %rcx
movq 0x68(%rsp), %rax
imulq %rax, %rcx
addq 0x110(%rsp), %rcx
movq %rcx, 0x130(%rsp)
movq 0x118(%rsp), %r12
imulq %rax, %r12
addq 0x120(%rsp), %r12
movq 0x8(%rsp), %rax
movq (%rax), %r13
movq $0x0, 0x140(%rsp)
cmpl $0x0, 0xd0(%rsp)
jle 0x22b710
movq 0x128(%rsp), %rbx
imulq 0x140(%rsp), %rbx
addq 0x130(%rsp), %rbx
xorl %r14d, %r14d
cmpl $0x0, 0x160(%rsp)
je 0x22b696
leaq (%rbx,%r14,4), %rax
vxorps %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
movq 0x8(%rsp), %rdx
vmovaps %zmm1, %zmm0
vpbroadcastq %rcx, %zmm1
vporq %zmm4, %zmm1, %zmm2
vporq %zmm5, %zmm1, %zmm1
vpcmpleuq %zmm3, %zmm1, %k0
vpcmpleuq %zmm3, %zmm2, %k1
kunpckbw %k0, %k1, %k1
vmovdqu32 (%r15,%rcx,4), %zmm1 {%k1} {z}
kmovq %k1, %k2
vpxor %xmm2, %xmm2, %xmm2
vgatherdps (%rax,%zmm1,4), %zmm2 {%k2}
vaddps %zmm0, %zmm2, %zmm1
addq $0x10, %rcx
cmpq %rcx, %rbp
jne 0x22b61a
vmovaps %zmm1, %zmm0 {%k1}
vextractf64x4 $0x1, %zmm0, %ymm1
vaddps %zmm1, %zmm0, %zmm0
vextractf128 $0x1, %ymm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vshufpd $0x1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0]
vaddps %xmm1, %xmm0, %xmm0
vhaddps %xmm0, %xmm0, %xmm0
jmp 0x22b69f
vxorps %xmm0, %xmm0, %xmm0
movq 0x8(%rsp), %rdx
vmulss 0xf0(%rsp), %xmm0, %xmm0
movq -0x18(%r13), %rax
vmovss 0x70(%rsp), %xmm1
vfmadd213ss 0xe0(%rdx,%rax), %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + mem
vmovss 0xdc(%rdx,%rax), %xmm1
vxorps 0x1eb985(%rip){1to4}, %xmm1, %xmm1 # 0x417054
vzeroupper
callq 0x560e0
vmulss (%r12,%r14,4), %xmm0, %xmm0
vmovss %xmm0, (%r12,%r14,4)
incq %r14
cmpq 0xd0(%rsp), %r14
vmovdqu64 0x180(%rsp), %zmm3
vpmovsxbq 0x1eb5de(%rip), %zmm4 # 0x416cde
vpmovsxbq 0x1eb5dc(%rip), %zmm5 # 0x416ce6
jne 0x22b5fd
movq 0x138(%rsp), %rax
leaq (%r12,%rax,4), %r12
movq 0x140(%rsp), %rcx
incq %rcx
movq %rcx, 0x140(%rsp)
cmpq 0x18(%rsp), %rcx
jne 0x22b5d3
movq 0x68(%rsp), %rcx
incq %rcx
movq %rcx, 0x68(%rsp)
cmpq 0x78(%rsp), %rcx
jne 0x22b57f
testq %r15, %r15
je 0x22b76d
movq 0x1d0(%rsp), %rsi
subq %r15, %rsi
movq %r15, %rdi
vzeroupper
callq 0x56270
movq 0x28(%rsp), %rax
testq %rax, %rax
movl $0x0, %ebp
je 0x22b7f3
lock
decl (%rax)
jne 0x22b7f3
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22b7c1
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x22b7f3
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22b7ee
lock
decl (%rax)
jne 0x22b7ee
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22b7e1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22b7ee
testq %rsi, %rsi
je 0x22b7f3
jmp 0x22b7d4
testq %rsi, %rsi
je 0x22b7ee
jmp 0x22b7e6
testq %rsi, %rsi
je 0x22b7f3
movq %rsi, %rdi
vzeroupper
callq 0x563b0
jmp 0x22b7f3
testq %rsi, %rsi
je 0x22b7ee
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x22b835
lock
decl (%rax)
jne 0x22b835
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
je 0x22b825
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x22b835
testq %rsi, %rsi
je 0x22b835
movq %rsi, %rdi
vzeroupper
callq 0x563b0
movl %ebp, %eax
addq $0x208, %rsp # imm = 0x208
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
jmp 0x22b91a
jmp 0x22b91a
jmp 0x22b91a
jmp 0x22b91a
jmp 0x22b862
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22b8d6
lock
decl (%rax)
jne 0x22b8d6
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x22b88a
testq %rsi, %rsi
je 0x22b8d6
jmp 0x22b8bd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22b8d6
jmp 0x22b91a
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22b8d6
lock
decl (%rax)
jne 0x22b8d6
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x22b8c7
testq %rsi, %rsi
je 0x22b8d6
movq %rsi, %rdi
callq 0x563b0
jmp 0x22b8d6
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22b8d6
jmp 0x22b91a
jmp 0x22b91a
movq %rax, %rbx
movq 0x88(%rsp), %rax
testq %rax, %rax
je 0x22b912
lock
decl (%rax)
jne 0x22b912
movq 0x80(%rsp), %rsi
movq 0xa0(%rsp), %rdi
testq %rdi, %rdi
jne 0x22b90c
testq %rsi, %rsi
je 0x22b912
movq %rsi, %rdi
callq 0x563b0
jmp 0x22b912
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/build_O3/src/layer/x86/lrn_x86_avx512.cpp
|
ncnn::LRN_x86_avx::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int LRN_x86_avx::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int channels = bottom_top_blob.c;
size_t elemsize = bottom_top_blob.elemsize;
int size = w * h;
// squared values with local_size padding
Mat square_blob;
square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_top_blob.channel(q);
float* outptr = square_blob.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = _mm256_mul_ps(_p, _p);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*outptr = *ptr * *ptr;
ptr++;
outptr++;
}
}
if (region_type == NormRegion_ACROSS_CHANNELS)
{
Mat square_sum;
square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
if (square_sum.empty())
return -100;
square_sum.fill(0.f);
const float alpha_div_size = alpha / local_size;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
// square sum
for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
{
if (p < 0 || p >= channels)
continue;
const float* sptr = square_blob.channel(p);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
for (; i + 7 < size; i += 8)
{
__m256 _sp = _mm256_loadu_ps(sptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_add_ps(_ssp, _sp);
_mm256_storeu_ps(ssptr, _ssp);
sptr += 8;
ssptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ssptr += *sptr;
sptr++;
ssptr++;
}
}
float* ptr = bottom_top_blob.channel(q);
float* ssptr = square_sum.channel(q);
int i = 0;
#if __AVX__
__m256 _bias = _mm256_set1_ps(bias);
__m256 _ads = _mm256_set1_ps(alpha_div_size);
__m256 _mb = _mm256_set1_ps(-beta);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _ssp = _mm256_loadu_ps(ssptr);
_ssp = _mm256_mul_ps(_ssp, _ads);
_ssp = _mm256_add_ps(_ssp, _bias);
_ssp = pow256_ps(_ssp, _mb);
_p = _mm256_mul_ps(_p, _ssp);
_mm256_storeu_ps(ptr, _p);
ssptr += 8;
ptr += 8;
}
#endif // __AVX__
for (; i < size; i++)
{
*ptr = *ptr * pow(bias + alpha_div_size * *ssptr, -beta);
ssptr++;
ptr++;
}
}
}
else if (region_type == NormRegion_WITHIN_CHANNEL)
{
int outw = w;
int outh = h;
Mat square_blob_bordered = square_blob;
int pad = local_size / 2;
if (pad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
if (square_blob_bordered.empty())
return -100;
w = square_blob_bordered.w;
h = square_blob_bordered.h;
}
const int maxk = local_size * local_size;
const float alpha_div_size = alpha / maxk;
// norm window offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - local_size;
for (int i = 0; i < local_size; i++)
{
for (int j = 0; j < local_size; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
const Mat m = square_blob_bordered.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i) + j;
float ss = 0.f;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
ss += val;
}
ptr[j] = ptr[j] * pow(bias + alpha_div_size * ss, -beta);
}
ptr += outw;
}
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1c8, %rsp # imm = 0x1C8
movq %rdx, %r15
movq %rsi, %r13
movq %rdi, 0x8(%rsp)
movl 0x2c(%rsi), %esi
movl 0x30(%r13), %edx
movl 0x38(%r13), %r14d
movq 0x10(%r13), %rbx
leaq 0x90(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq 0x10(%r15), %r9
movq %rsi, 0xe0(%rsp)
movq %rdx, 0x140(%rsp)
movl %r14d, %ecx
movq %rbx, %r8
callq 0x5b24c
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, 0x90(%rsp)
je 0x22d214
movslq 0xc8(%rsp), %rax
imulq 0xd0(%rsp), %rax
testq %rax, %rax
je 0x22d214
movq 0x140(%rsp), %rax
movl %eax, %r12d
imull 0xe0(%rsp), %r12d
testl %r14d, %r14d
movq 0x8(%rsp), %r9
jle 0x22c7d5
movl %r12d, %eax
andl $-0x8, %eax
xorl %ecx, %ecx
movq 0x40(%r13), %rdx
imulq %rcx, %rdx
imulq 0x10(%r13), %rdx
addq (%r13), %rdx
movq 0xd0(%rsp), %rsi
imulq %rcx, %rsi
imulq 0xa0(%rsp), %rsi
addq 0x90(%rsp), %rsi
cmpl $0x8, %r12d
jl 0x22c7a3
movl $0x7, %edi
vmovups (%rdx), %ymm0
vmulps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rsi)
addq $0x20, %rdx
addq $0x20, %rsi
addl $0x8, %edi
cmpl %r12d, %edi
jl 0x22c782
movl %eax, %r8d
jmp 0x22c7a6
xorl %r8d, %r8d
movl %r12d, %edi
subl %r8d, %edi
jle 0x22c7c9
xorl %r8d, %r8d
vmovss (%rdx,%r8,4), %xmm0
vmulss %xmm0, %xmm0, %xmm0
vmovss %xmm0, (%rsi,%r8,4)
incq %r8
cmpl %r8d, %edi
jne 0x22c7b1
incq %rcx
cmpq %r14, %rcx
jne 0x22c749
movq (%r9), %rax
movq -0x18(%rax), %rcx
movl 0xd0(%r9,%rcx), %ecx
testl %ecx, %ecx
movq %r14, 0x88(%rsp)
je 0x22c91c
cmpl $0x1, %ecx
jne 0x22cebc
movq 0x98(%rsp), %rcx
vmovaps 0x90(%rsp), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq 0xa0(%rsp), %rdx
movq %rdx, 0x30(%rsp)
movl 0xa8(%rsp), %edx
movl %edx, 0x38(%rsp)
movq 0xb0(%rsp), %rdx
movq %rdx, 0x40(%rsp)
vmovups 0xb8(%rsp), %xmm0
vmovups %xmm0, 0x48(%rsp)
movl 0xc8(%rsp), %edx
movl %edx, 0x58(%rsp)
movq 0xd0(%rsp), %rdx
movq %rdx, 0x60(%rsp)
testq %rcx, %rcx
je 0x22c86b
lock
incl (%rcx)
movq (%r9), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%r9,%rcx), %edx
cmpl $0x2, %edx
jl 0x22cef8
shrl %edx
vmovups (%r15), %ymm0
vmovups 0x20(%r15), %ymm1
leaq 0x180(%rsp), %r10
vmovups %ymm1, 0x20(%r10)
vmovups %ymm0, (%r10)
movq 0x10(%r15), %rcx
movq %rcx, 0x8(%r10)
movq -0x18(%rax), %rax
movl %edx, %ecx
notl %ecx
addl 0xd4(%r9,%rax), %ecx
leaq 0x90(%rsp), %rdi
leaq 0x20(%rsp), %rsi
vxorps %xmm0, %xmm0, %xmm0
movl %edx, %r8d
movl %ecx, %r9d
pushq %r10
pushq $0x0
vzeroupper
callq 0x5c159
addq $0x10, %rsp
cmpq $0x0, 0x20(%rsp)
je 0x22d1bc
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x22d1bc
movl 0x4c(%rsp), %ebp
movq 0x8(%rsp), %r9
movq (%r9), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%r9,%rcx), %edx
jmp 0x22cf02
leaq 0x20(%rsp), %rdi
movq $0x0, 0x40(%rdi)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
vmovups %xmm0, 0xc(%rdi)
vmovaps %xmm0, 0x20(%rdi)
vmovups %xmm0, 0x2c(%rdi)
movq 0x10(%r15), %r9
movq 0xe0(%rsp), %rsi
movq 0x140(%rsp), %rdx
movl %r14d, %ecx
movq %rbx, %r8
vzeroupper
callq 0x5b24c
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x22cec3
movq 0x60(%rsp), %rax
movslq 0x58(%rsp), %rcx
movq %rax, %rdx
imulq %rcx, %rdx
testq %rdx, %rdx
je 0x22cec3
imull %ecx, %eax
testl %eax, %eax
jle 0x22c99e
movl %eax, %edx
shlq $0x2, %rdx
xorl %esi, %esi
callq 0x56070
testl %r14d, %r14d
movq 0x8(%rsp), %r8
jle 0x22ce7f
movq (%r8), %r15
movq -0x18(%r15), %rax
vmovss 0xd8(%r8,%rax), %xmm0
vcvtsi2ssl 0xd4(%r8,%rax), %xmm1, %xmm1
vdivss %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm3
movl %r12d, %r10d
andl $-0x8, %r10d
xorl %r11d, %r11d
vbroadcastss 0x1ea68e(%rip), %ymm4 # 0x417080
vbroadcastss 0x1ea689(%rip), %ymm5 # 0x417084
vbroadcastss 0x1e7c8c(%rip), %ymm7 # 0x414690
vbroadcastss 0x1ea67f(%rip), %ymm14 # 0x41708c
vmovups %ymm3, 0x140(%rsp)
movl %r10d, 0x78(%rsp)
movq %r13, 0x18(%rsp)
movq -0x18(%r15), %rcx
movl 0xd4(%r8,%rcx), %eax
movl %eax, %edx
shrl $0x1f, %edx
addl %eax, %edx
sarl %edx
movl %r11d, %eax
subl %edx, %eax
addl %r11d, %edx
cmpl %edx, %eax
jg 0x22caea
cmpl %r14d, %eax
jae 0x22cac5
movl %eax, %ecx
imulq 0xd0(%rsp), %rcx
imulq 0xa0(%rsp), %rcx
addq 0x90(%rsp), %rcx
movq 0x60(%rsp), %rdx
imulq %r11, %rdx
imulq 0x30(%rsp), %rdx
addq 0x20(%rsp), %rdx
xorl %edi, %edi
cmpl $0x8, %r12d
jl 0x22caa6
movl $0x7, %esi
vmovups (%rdx), %ymm0
vaddps (%rcx), %ymm0, %ymm0
vmovups %ymm0, (%rdx)
addq $0x20, %rcx
addq $0x20, %rdx
addl $0x8, %esi
cmpl %r12d, %esi
jl 0x22ca87
movl %r10d, %edi
movl %r12d, %esi
subl %edi, %esi
jle 0x22cac5
xorl %edi, %edi
vmovss (%rdx,%rdi,4), %xmm0
vaddss (%rcx,%rdi,4), %xmm0, %xmm0
vmovss %xmm0, (%rdx,%rdi,4)
incq %rdi
cmpl %edi, %esi
jne 0x22caaf
leal 0x1(%rax), %edx
movq -0x18(%r15), %rcx
movl 0xd4(%r8,%rcx), %esi
movl %esi, %edi
shrl $0x1f, %edi
addl %esi, %edi
sarl %edi
addl %r11d, %edi
cmpl %edi, %eax
movl %edx, %eax
jl 0x22ca45
movq %r13, %rax
movq 0x40(%r13), %r13
imulq %r11, %r13
imulq 0x10(%rax), %r13
addq (%rax), %r13
movq 0x60(%rsp), %rbp
imulq %r11, %rbp
imulq 0x30(%rsp), %rbp
addq 0x20(%rsp), %rbp
xorl %eax, %eax
cmpl $0x8, %r12d
movq %r11, 0x80(%rsp)
jl 0x22cdcb
vbroadcastss 0xe0(%r8,%rcx), %ymm0
vmovups %ymm0, 0xe0(%rsp)
vbroadcastss 0xdc(%r8,%rcx), %xmm1
vbroadcastss 0x1ea509(%rip), %xmm0 # 0x417054
vxorps %xmm0, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm0
vmovups %ymm0, 0x160(%rsp)
movl $0x7, %eax
vbroadcastss 0x1eabf4(%rip), %ymm0 # 0x417760
vbroadcastss 0x1e81a3(%rip), %ymm1 # 0x414d18
vbroadcastss 0x1eabc2(%rip), %xmm9 # 0x417740
vbroadcastss 0x1e8191(%rip), %xmm11 # 0x414d18
vmulps (%rbp), %ymm3, %ymm2
vaddps 0xe0(%rsp), %ymm2, %ymm2
vmovaps %ymm4, %ymm13
vmaxps %ymm4, %ymm2, %ymm4
vmovaps %ymm3, %ymm12
vextractf128 $0x1, %ymm4, %xmm3
vmovaps %ymm5, %ymm15
vandps %ymm5, %ymm4, %ymm5
vorps %ymm7, %ymm5, %ymm5
vcmpleps %ymm5, %ymm14, %ymm6
vmovaps %ymm7, %ymm8
vpsrld $0x17, %xmm3, %xmm7
vandnps %ymm5, %ymm6, %ymm3
vbroadcastss 0x1ea4c2(%rip), %ymm10 # 0x417090
vaddps %ymm5, %ymm10, %ymm5
vaddps %ymm3, %ymm5, %ymm3
vextractf128 $0x1, %ymm6, %xmm5
vpsubd %xmm5, %xmm7, %xmm5
vmovaps %ymm8, %ymm7
vpsrld $0x17, %xmm4, %xmm4
vpsubd %xmm6, %xmm4, %xmm4
vbroadcastss 0x1ea49e(%rip), %ymm6 # 0x417094
vmulps %ymm6, %ymm3, %ymm6
vbroadcastss 0x1ea495(%rip), %ymm8 # 0x417098
vaddps %ymm6, %ymm8, %ymm6
vmulps %ymm3, %ymm6, %ymm6
vbroadcastss 0x1ea488(%rip), %ymm8 # 0x41709c
vaddps %ymm6, %ymm8, %ymm6
vmulps %ymm3, %ymm6, %ymm6
vbroadcastss 0x1ea47b(%rip), %ymm8 # 0x4170a0
vaddps %ymm6, %ymm8, %ymm6
vmulps %ymm3, %ymm6, %ymm6
vbroadcastss 0x1ea46e(%rip), %ymm8 # 0x4170a4
vaddps %ymm6, %ymm8, %ymm6
vpaddd %xmm4, %xmm9, %xmm4
vmulps %ymm3, %ymm6, %ymm6
vbroadcastss 0x1ea45d(%rip), %ymm8 # 0x4170a8
vaddps %ymm6, %ymm8, %ymm6
vmulps %ymm3, %ymm6, %ymm6
vpaddd %xmm5, %xmm9, %xmm5
vbroadcastss 0x1ea44c(%rip), %ymm8 # 0x4170ac
vaddps %ymm6, %ymm8, %ymm6
vmulps %ymm3, %ymm6, %ymm6
vbroadcastss 0x1ea43f(%rip), %ymm8 # 0x4170b0
vaddps %ymm6, %ymm8, %ymm6
vinsertf128 $0x1, %xmm5, %ymm4, %ymm4
vmulps %ymm3, %ymm6, %ymm5
vbroadcastss 0x1ea42c(%rip), %ymm6 # 0x4170b4
vaddps %ymm6, %ymm5, %ymm5
vmulps %ymm3, %ymm5, %ymm5
vmulps %ymm3, %ymm3, %ymm6
vbroadcastss 0x1eaac7(%rip), %ymm8 # 0x417764
vaddps %ymm5, %ymm8, %ymm5
vmulps %ymm5, %ymm6, %ymm5
vxorps %xmm6, %xmm6, %xmm6
vcvtdq2ps %ymm4, %ymm4
vcmpleps %ymm6, %ymm2, %ymm2
vmulps %ymm0, %ymm4, %ymm4
vaddps %ymm3, %ymm4, %ymm3
vaddps %ymm5, %ymm3, %ymm3
vorps %ymm3, %ymm2, %ymm2
vmulps 0x160(%rsp), %ymm2, %ymm2
vbroadcastss 0x1ea384(%rip), %ymm3 # 0x417058
vminps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1ea37b(%rip), %ymm3 # 0x41705c
vmaxps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1ea372(%rip), %ymm3 # 0x417060
vmulps %ymm3, %ymm2, %ymm3
vaddps %ymm7, %ymm3, %ymm3
vroundps $0x1, %ymm3, %ymm4
vcmpltps %ymm4, %ymm3, %ymm3
vandps %ymm1, %ymm3, %ymm3
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm0, %ymm3, %ymm4
vsubps %ymm4, %ymm2, %ymm2
vbroadcastss 0x1ea352(%rip), %ymm4 # 0x41706c
vmulps %ymm4, %ymm2, %ymm4
vbroadcastss 0x1ea349(%rip), %ymm5 # 0x417070
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm2, %ymm4, %ymm4
vbroadcastss 0x1ea33c(%rip), %ymm5 # 0x417074
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm2, %ymm4, %ymm4
vbroadcastss 0x1ea32f(%rip), %ymm5 # 0x417078
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm2, %ymm4, %ymm4
vbroadcastss 0x1ea322(%rip), %ymm5 # 0x41707c
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm2, %ymm2, %ymm5
vmulps %ymm2, %ymm4, %ymm4
vaddps %ymm7, %ymm4, %ymm4
vcvttps2dq %ymm3, %ymm3
vmulps %ymm4, %ymm5, %ymm4
vpslld $0x17, %xmm3, %xmm5
vextractf128 $0x1, %ymm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm3, %xmm11, %xmm3
vpaddd %xmm5, %xmm11, %xmm5
vinsertf128 $0x1, %xmm3, %ymm5, %ymm3
vmovaps %ymm15, %ymm5
vaddps %ymm1, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vmovaps %ymm13, %ymm4
vmulps (%r13), %ymm3, %ymm3
vmulps %ymm2, %ymm3, %ymm2
vmovaps %ymm12, %ymm3
vmovups %ymm2, (%r13)
addq $0x20, %rbp
addq $0x20, %r13
addl $0x8, %eax
cmpl %r12d, %eax
jl 0x22cb87
movl %r10d, %eax
movl %r12d, %r14d
subl %eax, %r14d
jle 0x22ce2c
xorl %ebx, %ebx
movq -0x18(%r15), %rax
vmovaps 0x100(%rsp), %xmm0
vmulss (%rbp,%rbx,4), %xmm0, %xmm0
vaddss 0xe0(%r8,%rax), %xmm0, %xmm0
vmovss 0xdc(%r8,%rax), %xmm1
vbroadcastss 0x1ea24f(%rip), %xmm2 # 0x417054
vxorps %xmm2, %xmm1, %xmm1
vzeroupper
callq 0x560e0
movq 0x8(%rsp), %r8
vmulss (%r13,%rbx,4), %xmm0, %xmm0
vmovss %xmm0, (%r13,%rbx,4)
incq %rbx
cmpl %ebx, %r14d
jne 0x22cdd5
movq 0x80(%rsp), %r11
incq %r11
movq 0x88(%rsp), %r14
cmpq %r14, %r11
movq 0x18(%rsp), %r13
vmovups 0x140(%rsp), %ymm3
movl 0x78(%rsp), %r10d
vbroadcastss 0x1ea222(%rip), %ymm4 # 0x417080
vbroadcastss 0x1ea21d(%rip), %ymm5 # 0x417084
vbroadcastss 0x1e7820(%rip), %ymm7 # 0x414690
vbroadcastss 0x1ea213(%rip), %ymm14 # 0x41708c
jne 0x22ca20
movq 0x28(%rsp), %rax
testq %rax, %rax
movl $0x0, %ebp
je 0x22d214
lock
decl (%rax)
jne 0x22d214
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22d1f0
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x22d214
xorl %ebp, %ebp
jmp 0x22d214
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22d20f
lock
decl (%rax)
jne 0x22d20f
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22d1e9
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22d20f
movq 0xe0(%rsp), %rax
movl %eax, %ebp
movl %edx, 0x160(%rsp)
movl %edx, %r15d
imull %r15d, %r15d
vmovss 0xd8(%r9,%rcx), %xmm0
vmovss %xmm0, 0x100(%rsp)
leaq 0x180(%rsp), %rdi
leaq 0x17(%rsp), %rdx
movq %r15, %rsi
movq %r9, %rbx
vzeroupper
callq 0x6aa92
movq 0x180(%rsp), %r12
movq (%rbx), %rax
movq -0x18(%rax), %rcx
movl 0xd4(%rbx,%rcx), %edi
testl %edi, %edi
jle 0x22cf9e
movq %rbx, %r11
subl %edi, %ebp
xorl %ecx, %ecx
xorl %edx, %edx
xorl %esi, %esi
testl %edi, %edi
jle 0x22cf96
movslq %esi, %rdi
leaq (%r12,%rdi,4), %r9
movl %edx, %r10d
xorl %r8d, %r8d
leal (%r10,%r8), %edi
movl %edi, (%r9,%r8,4)
movq -0x18(%rax), %rdi
movl 0xd4(%r11,%rdi), %edi
incq %r8
cmpl %edi, %r8d
jl 0x22cf74
addl %r8d, %esi
addl %r8d, %edx
addl %ebp, %edx
incl %ecx
cmpl %edi, %ecx
jl 0x22cf63
testl %r14d, %r14d
jle 0x22d179
movslq 0xe0(%rsp), %rax
movq %rax, 0x78(%rsp)
vcvtsi2ss %r15d, %xmm2, %xmm0
movq (%r13), %rax
movq %rax, 0x128(%rsp)
movq 0x40(%r13), %rax
imulq 0x10(%r13), %rax
movq %rax, 0x120(%rsp)
movslq 0x4c(%rsp), %rcx
movq 0x20(%rsp), %rax
movq %rax, 0x118(%rsp)
movq 0x30(%rsp), %rax
movq 0x60(%rsp), %rdx
imulq %rax, %rdx
movq %rdx, 0x110(%rsp)
imulq %rax, %rcx
movq %rcx, 0x130(%rsp)
cmpl $0x1, %r15d
adcl $0x0, %r15d
vmovss 0x1e7cfe(%rip), %xmm1 # 0x414d18
vdivss %xmm0, %xmm1, %xmm0
vmovss %xmm0, 0x80(%rsp)
movq $0x0, 0x70(%rsp)
cmpl $0x0, 0x140(%rsp)
jle 0x22d15e
movq 0x110(%rsp), %rcx
movq 0x70(%rsp), %rax
imulq %rax, %rcx
addq 0x118(%rsp), %rcx
movq %rcx, 0x138(%rsp)
movq 0x120(%rsp), %rbp
imulq %rax, %rbp
addq 0x128(%rsp), %rbp
movq 0x8(%rsp), %rax
movq (%rax), %r13
movq $0x0, 0x18(%rsp)
cmpl $0x0, 0xe0(%rsp)
jle 0x22d139
movq 0x130(%rsp), %rbx
imulq 0x18(%rsp), %rbx
addq 0x138(%rsp), %rbx
xorl %r14d, %r14d
cmpl $0x0, 0x160(%rsp)
je 0x22d0d7
leaq (%rbx,%r14,4), %rax
vxorps %xmm0, %xmm0, %xmm0
xorl %ecx, %ecx
movq 0x8(%rsp), %rsi
movslq (%r12,%rcx,4), %rdx
vaddss (%rax,%rdx,4), %xmm0, %xmm0
incq %rcx
cmpq %rcx, %r15
jne 0x22d0c4
jmp 0x22d0e0
vxorps %xmm0, %xmm0, %xmm0
movq 0x8(%rsp), %rsi
vmulss 0x100(%rsp), %xmm0, %xmm0
vmulss 0x80(%rsp), %xmm0, %xmm0
movq -0x18(%r13), %rax
vaddss 0xe0(%rsi,%rax), %xmm0, %xmm0
vmovss 0xdc(%rsi,%rax), %xmm1
vbroadcastss 0x1e9f43(%rip), %xmm2 # 0x417054
vxorps %xmm2, %xmm1, %xmm1
callq 0x560e0
vmulss (%rbp,%r14,4), %xmm0, %xmm0
vmovss %xmm0, (%rbp,%r14,4)
incq %r14
cmpq 0xe0(%rsp), %r14
jne 0x22d0ab
movq 0x78(%rsp), %rax
leaq (%rbp,%rax,4), %rbp
movq 0x18(%rsp), %rcx
incq %rcx
movq %rcx, 0x18(%rsp)
cmpq 0x140(%rsp), %rcx
jne 0x22d084
movq 0x70(%rsp), %rcx
incq %rcx
movq %rcx, 0x70(%rsp)
cmpq 0x88(%rsp), %rcx
jne 0x22d030
testq %r12, %r12
je 0x22d191
movq 0x190(%rsp), %rsi
subq %r12, %rsi
movq %r12, %rdi
callq 0x56270
movq 0x28(%rsp), %rax
testq %rax, %rax
movl $0x0, %ebp
je 0x22d214
lock
decl (%rax)
jne 0x22d214
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22d1e2
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22d214
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22d20f
lock
decl (%rax)
jne 0x22d20f
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22d202
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22d20f
testq %rsi, %rsi
je 0x22d214
jmp 0x22d1f5
testq %rsi, %rsi
je 0x22d20f
jmp 0x22d207
testq %rsi, %rsi
je 0x22d214
movq %rsi, %rdi
vzeroupper
callq 0x563b0
jmp 0x22d214
testq %rsi, %rsi
je 0x22d20f
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x22d256
lock
decl (%rax)
jne 0x22d256
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
je 0x22d246
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x22d256
testq %rsi, %rsi
je 0x22d256
movq %rsi, %rdi
vzeroupper
callq 0x563b0
movl %ebp, %eax
addq $0x1c8, %rsp # imm = 0x1C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
jmp 0x22d33b
jmp 0x22d33b
jmp 0x22d33b
jmp 0x22d33b
jmp 0x22d283
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22d2f7
lock
decl (%rax)
jne 0x22d2f7
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x22d2ab
testq %rsi, %rsi
je 0x22d2f7
jmp 0x22d2de
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22d2f7
jmp 0x22d33b
movq %rax, %rbx
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22d2f7
lock
decl (%rax)
jne 0x22d2f7
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
jne 0x22d2e8
testq %rsi, %rsi
je 0x22d2f7
movq %rsi, %rdi
callq 0x563b0
jmp 0x22d2f7
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22d2f7
jmp 0x22d33b
jmp 0x22d33b
movq %rax, %rbx
movq 0x98(%rsp), %rax
testq %rax, %rax
je 0x22d333
lock
decl (%rax)
jne 0x22d333
movq 0x90(%rsp), %rsi
movq 0xb0(%rsp), %rdi
testq %rdi, %rdi
jne 0x22d32d
testq %rsi, %rsi
je 0x22d333
movq %rsi, %rdi
callq 0x563b0
jmp 0x22d333
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
nop
|
/ysh329[P]ncnn/build_O3/src/layer/x86/lrn_x86_avx.cpp
|
ncnn::MVN::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int MVN::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int size = w * h;
top_blob.create(w, h, channels, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
// prepare sum per channel
Mat sum(channels, elemsize, opt.workspace_allocator);
if (sum.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
float s = 0.f;
for (int i = 0; i < size; i++)
{
s += ptr[i];
}
sum[q] = s;
}
if (across_channels)
{
// compute mean across channels
float mean = 0.f;
for (int q = 0; q < channels; q++)
{
mean += sum[q];
}
mean = mean / (channels * size);
// subtract mean
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
for (int i = 0; i < size; i++)
{
outptr[i] = ptr[i] - mean;
}
}
}
else
{
// subtract mean
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
float mean = sum[q] / size;
for (int i = 0; i < size; i++)
{
outptr[i] = ptr[i] - mean;
}
}
}
if (normalize_variance)
{
// prepare squared sum per channel
Mat sqsum(channels, elemsize, opt.workspace_allocator);
if (sqsum.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = top_blob.channel(q);
float s = 0.f;
for (int i = 0; i < size; i++)
{
s += ptr[i] * ptr[i];
}
sqsum[q] = s;
}
if (across_channels)
{
// compute squared mean across channels
float sqmean = 0.f;
for (int q = 0; q < channels; q++)
{
sqmean += sqsum[q];
}
sqmean = sqmean / (channels * size);
// normalize variance
float norm_var = static_cast<float>(sqrt(sqmean) + eps);
float norm_var_inv = 1.f / norm_var;
// apply normalize_variance
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* outptr = top_blob.channel(q);
for (int i = 0; i < size; i++)
{
outptr[i] = outptr[i] * norm_var_inv;
}
}
}
else
{
// apply normalize_variance
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* outptr = top_blob.channel(q);
float sqmean = sqsum[q] / size;
float norm_var = static_cast<float>(sqrt(sqmean) + eps);
float norm_var_inv = 1.f / norm_var;
for (int i = 0; i < size; i++)
{
outptr[i] = outptr[i] * norm_var_inv;
}
}
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movq %rdx, %r15
movq %rsi, %rbp
movq %rdi, %r12
movl 0x2c(%rsi), %r13d
movl 0x30(%rsi), %ebx
movl 0x38(%rsi), %r14d
movq 0x10(%rsi), %r8
movq %rcx, 0x10(%rsp)
movq 0x8(%rcx), %r9
movq %rdx, %rdi
movl %r13d, %esi
movl %ebx, %edx
movl %r14d, %ecx
movq %r8, 0x18(%rsp)
callq 0x5b24c
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
cmpq $0x0, (%r15)
je 0x22e16e
movslq 0x38(%r15), %rcx
imulq 0x40(%r15), %rcx
testq %rcx, %rcx
je 0x22e16e
movq %r15, 0x8(%rsp)
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rcx
leaq 0x70(%rsp), %r15
movq $0x0, 0x40(%r15)
xorps %xmm0, %xmm0
movaps %xmm0, (%r15)
movups %xmm0, 0xc(%r15)
movaps %xmm0, 0x20(%r15)
movups %xmm0, 0x2c(%r15)
movq %r15, %rdi
movl %r14d, %esi
movq 0x18(%rsp), %rdx
callq 0x5b024
movq (%r15), %rcx
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
testq %rcx, %rcx
je 0x22e130
movslq 0xa8(%rsp), %rdx
imulq 0xb0(%rsp), %rdx
testq %rdx, %rdx
je 0x22e130
imull %r13d, %ebx
testl %r14d, %r14d
jle 0x22de7b
movq (%rbp), %rdx
movq 0x40(%rbp), %rsi
imulq 0x10(%rbp), %rsi
movl %ebx, %eax
xorl %edi, %edi
xorps %xmm0, %xmm0
testl %ebx, %ebx
jle 0x22ddd2
xorl %r8d, %r8d
addss (%rdx,%r8,4), %xmm0
incq %r8
cmpq %r8, %rax
jne 0x22ddc4
movss %xmm0, (%rcx,%rdi,4)
incq %rdi
addq %rsi, %rdx
cmpq %r14, %rdi
jne 0x22ddba
leaq 0xd4(%r12), %r15
cmpl $0x0, 0xd4(%r12)
je 0x22de85
testl %r14d, %r14d
jle 0x22def2
xorps %xmm0, %xmm0
xorl %edx, %edx
addss (%rcx,%rdx,4), %xmm0
incq %rdx
cmpq %rdx, %r14
jne 0x22de07
testl %r14d, %r14d
jle 0x22def2
movl %r14d, %ecx
imull %ebx, %ecx
cvtsi2ss %ecx, %xmm1
divss %xmm1, %xmm0
movq (%rbp), %rcx
movq 0x40(%rbp), %rdx
movq 0x8(%rsp), %rdi
movq 0x40(%rdi), %rsi
imulq 0x10(%rdi), %rsi
movq (%rdi), %rdi
imulq 0x10(%rbp), %rdx
xorl %r8d, %r8d
testl %ebx, %ebx
jle 0x22de6b
xorl %r9d, %r9d
movss (%rcx,%r9,4), %xmm1
subss %xmm0, %xmm1
movss %xmm1, (%rdi,%r9,4)
incq %r9
cmpq %r9, %rax
jne 0x22de53
incq %r8
addq %rsi, %rdi
addq %rdx, %rcx
cmpq %r14, %r8
jne 0x22de4c
jmp 0x22def2
leaq 0xd4(%r12), %r15
jmp 0x22def2
testl %r14d, %r14d
jle 0x22def2
movq (%rbp), %rdx
movq 0x40(%rbp), %rsi
movq 0x8(%rsp), %r9
movq (%r9), %rdi
movq 0x40(%r9), %r8
imulq 0x10(%r9), %r8
cvtsi2ss %ebx, %xmm1
movss 0x1e6e69(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
imulq 0x10(%rbp), %rsi
xorl %r9d, %r9d
testl %ebx, %ebx
jle 0x22dee4
movss (%rcx,%r9,4), %xmm1
mulss %xmm0, %xmm1
xorl %r10d, %r10d
movss (%rdx,%r10,4), %xmm2
subss %xmm1, %xmm2
movss %xmm2, (%rdi,%r10,4)
incq %r10
cmpq %r10, %rax
jne 0x22decc
incq %r9
addq %r8, %rdi
addq %rsi, %rdx
cmpq %r14, %r9
jne 0x22debb
xorl %eax, %eax
cmpl $0x0, 0xd0(%r12)
je 0x22e130
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rcx
leaq 0x20(%rsp), %rdi
movq $0x0, 0x40(%rdi)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdi)
movups %xmm0, 0xc(%rdi)
movaps %xmm0, 0x20(%rdi)
movups %xmm0, 0x2c(%rdi)
movl %r14d, %esi
movq 0x18(%rsp), %rdx
callq 0x5b024
movq 0x20(%rsp), %rcx
testq %rcx, %rcx
je 0x22e043
movslq 0x58(%rsp), %rax
imulq 0x60(%rsp), %rax
testq %rax, %rax
je 0x22e043
testl %r14d, %r14d
jle 0x22e0f1
movq 0x8(%rsp), %rax
movq (%rax), %rdx
movq 0x40(%rax), %rsi
imulq 0x10(%rax), %rsi
movl %ebx, %eax
xorl %edi, %edi
xorps %xmm0, %xmm0
testl %ebx, %ebx
jle 0x22df98
xorl %r8d, %r8d
movss (%rdx,%r8,4), %xmm1
mulss %xmm1, %xmm1
addss %xmm1, %xmm0
incq %r8
cmpq %r8, %rax
jne 0x22df82
movss %xmm0, (%rcx,%rdi,4)
incq %rdi
addq %rsi, %rdx
cmpq %r14, %rdi
jne 0x22df78
cmpl $0x0, (%r15)
je 0x22e078
testl %r14d, %r14d
jle 0x22e0f1
xorps %xmm0, %xmm0
xorl %edx, %edx
addss (%rcx,%rdx,4), %xmm0
incq %rdx
cmpq %rdx, %r14
jne 0x22dfc0
testl %r14d, %r14d
jle 0x22e0f1
movl %r14d, %ecx
imull %ebx, %ecx
xorps %xmm1, %xmm1
cvtsi2ss %ecx, %xmm1
divss %xmm1, %xmm0
xorps %xmm1, %xmm1
sqrtss %xmm0, %xmm1
addss 0xd8(%r12), %xmm1
movq 0x8(%rsp), %rdx
movq 0x40(%rdx), %rcx
imulq 0x10(%rdx), %rcx
movq (%rdx), %rdx
movss 0x1e6d07(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
xorl %esi, %esi
testl %ebx, %ebx
jle 0x22e033
xorl %edi, %edi
movss (%rdx,%rdi,4), %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rdx,%rdi,4)
incq %rdi
cmpq %rdi, %rax
jne 0x22e01d
incq %rsi
addq %rcx, %rdx
cmpq %r14, %rsi
jne 0x22e017
jmp 0x22e0f1
movq 0x28(%rsp), %rax
testq %rax, %rax
je 0x22e12b
lock
decl (%rax)
jne 0x22e12b
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22e11e
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22e12b
testl %r14d, %r14d
jle 0x22e0f1
xorps %xmm2, %xmm2
cvtsi2ss %ebx, %xmm2
movq 0x8(%rsp), %rdi
movq (%rdi), %rdx
movq 0x40(%rdi), %rsi
movss 0x1e6c80(%rip), %xmm0 # 0x414d18
movaps %xmm0, %xmm1
divss %xmm2, %xmm1
imulq 0x10(%rdi), %rsi
xorl %edi, %edi
testl %ebx, %ebx
jle 0x22e0e6
movss (%rcx,%rdi,4), %xmm2
mulss %xmm1, %xmm2
xorps %xmm3, %xmm3
sqrtss %xmm2, %xmm3
addss 0xd8(%r12), %xmm3
movaps %xmm0, %xmm2
divss %xmm3, %xmm2
xorl %r8d, %r8d
movss (%rdx,%r8,4), %xmm3
mulss %xmm2, %xmm3
movss %xmm3, (%rdx,%r8,4)
incq %r8
cmpq %r8, %rax
jne 0x22e0ce
incq %rdi
addq %rsi, %rdx
cmpq %r14, %rdi
jne 0x22e0a6
movq 0x28(%rsp), %rcx
testq %rcx, %rcx
movl $0x0, %eax
je 0x22e130
lock
decl (%rcx)
jne 0x22e130
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdi
testq %rdi, %rdi
je 0x22e180
movq (%rdi), %rax
callq *0x18(%rax)
xorl %eax, %eax
jmp 0x22e130
testq %rsi, %rsi
je 0x22e12b
movq %rsi, %rdi
callq 0x563b0
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
movq 0x78(%rsp), %rcx
testq %rcx, %rcx
je 0x22e16e
lock
decl (%rcx)
jne 0x22e16e
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x22e15d
movq (%rdi), %rcx
movl %eax, %ebx
callq *0x18(%rcx)
movl %ebx, %eax
jmp 0x22e16e
testq %rsi, %rsi
je 0x22e16e
movq %rsi, %rdi
movl %eax, %ebx
callq 0x563b0
movl %ebx, %eax
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
testq %rsi, %rsi
je 0x22e130
movq %rsi, %rdi
callq 0x563b0
xorl %eax, %eax
jmp 0x22e130
jmp 0x22e195
jmp 0x22e195
movq %rax, %rdi
callq 0x598e3
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x22e1d6
lock
decl (%rax)
jne 0x22e1d6
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x22e1d0
testq %rsi, %rsi
je 0x22e1d6
movq %rsi, %rdi
callq 0x563b0
jmp 0x22e1d6
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
jmp 0x22e195
|
/ysh329[P]ncnn/src/layer/mvn.cpp
|
ncnn::Pooling::make_padding(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
void Pooling::make_padding(const Mat& bottom_blob, Mat& bottom_blob_bordered, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
bottom_blob_bordered = bottom_blob;
float pad_value = 0.f;
if (pooling_type == PoolMethod_MAX)
{
pad_value = bottom_blob.elemsize == 1 ? -128.f : -FLT_MAX;
}
else if (pooling_type == PoolMethod_AVE)
{
pad_value = 0.f;
}
int wtailpad = 0;
int htailpad = 0;
if (pad_mode == 0) // full padding
{
int wtail = (w + pad_left + pad_right - kernel_w) % stride_w;
int htail = (h + pad_top + pad_bottom - kernel_h) % stride_h;
if (wtail != 0)
wtailpad = stride_w - wtail;
if (htail != 0)
htailpad = stride_h - htail;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, pad_top, pad_bottom + htailpad, pad_left, pad_right + wtailpad, BORDER_CONSTANT, pad_value, opt_b);
}
else if (pad_mode == 1) // valid padding
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, pad_top, pad_bottom, pad_left, pad_right, BORDER_CONSTANT, pad_value, opt_b);
}
else if (pad_mode == 2) // tensorflow padding=SAME or onnx padding=SAME_UPPER
{
int wpad = kernel_w + (w - 1) / stride_w * stride_w - w;
int hpad = kernel_h + (h - 1) / stride_h * stride_h - h;
if (wpad > 0 || hpad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, BORDER_CONSTANT, pad_value, opt_b);
}
}
else if (pad_mode == 3) // onnx padding=SAME_LOWER
{
int wpad = kernel_w + (w - 1) / stride_w * stride_w - w;
int hpad = kernel_h + (h - 1) / stride_h * stride_h - h;
if (wpad > 0 || hpad > 0)
{
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, hpad - hpad / 2, hpad / 2, wpad - wpad / 2, wpad / 2, BORDER_CONSTANT, pad_value, opt_b);
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rcx, %r15
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r12
movl 0x2c(%rsi), %r13d
movl 0x30(%rsi), %ebp
cmpq %rsi, %rdx
je 0x22f170
movq 0x8(%r14), %rax
testq %rax, %rax
je 0x22f0ee
lock
incl (%rax)
movq 0x8(%rbx), %rax
testq %rax, %rax
je 0x22f11d
lock
decl (%rax)
jne 0x22f11d
movq (%rbx), %rsi
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x22f110
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x22f11d
testq %rsi, %rsi
je 0x22f11d
movq %rsi, %rdi
callq 0x563b0
movq $0x0, 0x40(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
movups %xmm0, 0xc(%rbx)
movups %xmm0, 0x28(%rbx)
movl $0x0, 0x38(%rbx)
movups (%r14), %xmm0
movups %xmm0, (%rbx)
movq 0x10(%r14), %rax
movq %rax, 0x10(%rbx)
movl 0x18(%r14), %eax
movl %eax, 0x18(%rbx)
movq 0x20(%r14), %rax
movq %rax, 0x20(%rbx)
movups 0x28(%r14), %xmm0
movups %xmm0, 0x28(%rbx)
movl 0x38(%r14), %eax
movl %eax, 0x38(%rbx)
movq 0x40(%r14), %rax
movq %rax, 0x40(%rbx)
xorps %xmm0, %xmm0
cmpl $0x0, 0xd0(%r12)
jne 0x22f194
xorl %eax, %eax
cmpq $0x1, 0x10(%r14)
sete %al
leaq 0x1ed259(%rip), %rcx # 0x41c3e8
movss (%rcx,%rax,4), %xmm0
movl 0xf8(%r12), %eax
cmpq $0x3, %rax
ja 0x22f3db
leaq 0x1ed243(%rip), %rcx # 0x41c3f0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movl 0xe4(%r12), %r8d
addl %r8d, %r13d
movl 0xe8(%r12), %edi
addl %edi, %r13d
subl 0xd4(%r12), %r13d
movl 0xdc(%r12), %r9d
movl 0xe0(%r12), %ecx
movl %r13d, %eax
cltd
idivl %r9d
movl %edx, %esi
movl 0xec(%r12), %r10d
addl %r10d, %ebp
movl 0xf0(%r12), %r11d
addl %r11d, %ebp
subl 0xd8(%r12), %ebp
movl %ebp, %eax
cltd
idivl %ecx
subl %esi, %r9d
testl %esi, %esi
cmovel %esi, %r9d
subl %edx, %ecx
testl %edx, %edx
cmovel %edx, %ecx
movups (%r15), %xmm1
movups 0x10(%r15), %xmm2
movups 0x20(%r15), %xmm3
movups 0x30(%r15), %xmm4
movq %rsp, %rax
movaps %xmm4, 0x30(%rax)
movaps %xmm3, 0x20(%rax)
movaps %xmm2, 0x10(%rax)
movaps %xmm1, (%rax)
movq 0x10(%r15), %rdx
movq %rdx, 0x8(%rax)
addl %r11d, %ecx
addl %edi, %r9d
movq %r14, %rdi
movq %rbx, %rsi
movl %r10d, %edx
jmp 0x22f3cf
decl %r13d
movl %r13d, %eax
cltd
idivl 0xdc(%r12)
movl %edx, %r9d
notl %r9d
addl 0xd4(%r12), %r9d
decl %ebp
movl %ebp, %eax
cltd
idivl 0xe0(%r12)
movl %edx, %ecx
notl %ecx
addl 0xd8(%r12), %ecx
testl %r9d, %r9d
jg 0x22f2a4
testl %ecx, %ecx
jle 0x22f3db
movups (%r15), %xmm1
movups 0x10(%r15), %xmm2
movups 0x20(%r15), %xmm3
movups 0x30(%r15), %xmm4
movq %rsp, %rax
movaps %xmm4, 0x30(%rax)
movaps %xmm3, 0x20(%rax)
movaps %xmm2, 0x10(%rax)
movaps %xmm1, (%rax)
movq 0x10(%r15), %rdx
movq %rdx, 0x8(%rax)
movl %ecx, %edx
shrl $0x1f, %edx
addl %ecx, %edx
sarl %edx
subl %edx, %ecx
movl %r9d, %r8d
shrl $0x1f, %r8d
addl %r9d, %r8d
sarl %r8d
subl %r8d, %r9d
jmp 0x22f3c9
decl %r13d
movl %r13d, %eax
cltd
idivl 0xdc(%r12)
movl %edx, %r8d
notl %r8d
addl 0xd4(%r12), %r8d
decl %ebp
movl %ebp, %eax
cltd
idivl 0xe0(%r12)
notl %edx
addl 0xd8(%r12), %edx
testl %r8d, %r8d
jg 0x22f332
testl %edx, %edx
jle 0x22f3db
movups (%r15), %xmm1
movups 0x10(%r15), %xmm2
movups 0x20(%r15), %xmm3
movups 0x30(%r15), %xmm4
movq %rsp, %rax
movaps %xmm4, 0x30(%rax)
movaps %xmm3, 0x20(%rax)
movaps %xmm2, 0x10(%rax)
movaps %xmm1, (%rax)
movq 0x10(%r15), %rcx
movq %rcx, 0x8(%rax)
movl %edx, %ecx
shrl $0x1f, %ecx
addl %edx, %ecx
sarl %ecx
subl %ecx, %edx
movl %r8d, %r9d
shrl $0x1f, %r9d
addl %r8d, %r9d
sarl %r9d
subl %r9d, %r8d
jmp 0x22f3c9
movups (%r15), %xmm1
movups 0x10(%r15), %xmm2
movups 0x20(%r15), %xmm3
movups 0x30(%r15), %xmm4
movq %rsp, %rax
movaps %xmm4, 0x30(%rax)
movaps %xmm3, 0x20(%rax)
movaps %xmm2, 0x10(%rax)
movaps %xmm1, (%rax)
movq 0x10(%r15), %rcx
movq %rcx, 0x8(%rax)
movl 0xec(%r12), %edx
movl 0xf0(%r12), %ecx
movl 0xe4(%r12), %r8d
movl 0xe8(%r12), %r9d
movq %r14, %rdi
movq %rbx, %rsi
pushq %rax
pushq $0x0
callq 0x5c159
addq $0x10, %rsp
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/ysh329[P]ncnn/src/layer/pooling.cpp
|
ncnn::Pooling_x86::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Pooling_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
// max value in NxN window
// avg value in NxN window
if (adaptive_pooling)
{
return Pooling::forward(bottom_blob, top_blob, opt);
}
#if __SSE2__
int elempack = bottom_blob.elempack;
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
#if __AVX__
#if __AVX512F__
if (elempack == 16)
{
if (global_pooling)
{
top_blob.create(channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int size = w * h;
if (pooling_type == PoolMethod_MAX)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
__m512 _max = _mm512_loadu_ps(ptr);
for (int i = 0; i < size; i++)
{
__m512 _val = _mm512_loadu_ps(ptr);
_max = _mm512_max_ps(_max, _val);
ptr += 16;
}
float* outptr = top_blob;
_mm512_storeu_ps(outptr + q * 16, _max);
}
}
else if (pooling_type == PoolMethod_AVE)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
__m512 _sum = _mm512_set1_ps(0.f);
for (int i = 0; i < size; i++)
{
__m512 _val = _mm512_loadu_ps(ptr);
_sum = _mm512_add_ps(_sum, _val);
ptr += 16;
}
__m512 _inv_size = _mm512_set1_ps(1.f / size);
__m512 _avg = _mm512_mul_ps(_sum, _inv_size);
float* outptr = top_blob;
_mm512_storeu_ps(outptr + q * 16, _avg);
}
}
return 0;
}
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_w) / stride_w + 1;
int outh = (h - kernel_h) / stride_h + 1;
top_blob.create(outw, outh, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - kernel_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
if (pooling_type == PoolMethod_MAX)
{
if (kernel_w == 2 && kernel_h == 2 && stride_w == 2 && stride_h == 2)
{
pooling2x2s2_max_pack16_avx512(bottom_blob_bordered, top_blob, opt);
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && stride_w == 2 && stride_h == 2)
{
pooling3x3s2_max_pack16_avx512(bottom_blob_bordered, top_blob, opt);
return 0;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
__m512 _max = _mm512_loadu_ps(sptr);
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
_max = _mm512_max_ps(_max, _val);
}
_mm512_storeu_ps(outptr, _max);
outptr += 16;
}
}
}
}
else if (pooling_type == PoolMethod_AVE)
{
if (avgpool_count_include_pad == 0)
{
int wtailpad = 0;
int htailpad = 0;
if (pad_mode == 0) // full padding
{
wtailpad = bottom_blob_bordered.w - bottom_blob.w - pad_left - pad_right;
htailpad = bottom_blob_bordered.h - bottom_blob.h - pad_top - pad_bottom;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
for (int i = 0; i < outh; i++)
{
int sy0 = i * stride_h;
for (int j = 0; j < outw; j++)
{
int sx0 = j * stride_w;
__m512 _sum = _mm512_set1_ps(0.f);
int area = 0;
for (int ki = 0; ki < kernel_h; ki++)
{
int sy = sy0 + ki;
if (sy < pad_top)
continue;
if (sy >= h - pad_bottom - htailpad)
break;
for (int kj = 0; kj < kernel_w; kj++)
{
int sx = sx0 + kj;
if (sx < pad_left)
continue;
if (sx >= w - pad_right - wtailpad)
break;
__m512 _val = _mm512_loadu_ps(m.row(sy) + sx * 16);
_sum = _mm512_add_ps(_sum, _val);
area += 1;
}
}
__m512 _inv_area = _mm512_set1_ps(1.f / area);
__m512 _avg = _mm512_mul_ps(_sum, _inv_area);
_mm512_storeu_ps(outptr, _avg);
outptr += 16;
}
}
}
}
else // if (avgpool_count_include_pad == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
__m512 _inv_maxk = _mm512_set1_ps(1.f / maxk);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i * stride_h) + j * stride_w * 16;
__m512 _sum = _mm512_set1_ps(0.f);
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(sptr + space_ofs[k] * 16);
_sum = _mm512_add_ps(_sum, _val);
}
__m512 _avg = _mm512_mul_ps(_sum, _inv_maxk);
_mm512_storeu_ps(outptr, _avg);
outptr += 16;
}
}
}
}
}
return 0;
}
#endif // __AVX512F__
// NCNN_LOGE("Pooling input %d x %d pad = %d %d %d %d ksize=%d %d stride=%d %d", w, h, pad_left, pad_right, pad_top, pad_bottom, kernel_w, kernel_h, stride_w, stride_h);
if (elempack == 8)
{
if (global_pooling)
{
top_blob.create(channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int size = w * h;
if (pooling_type == PoolMethod_MAX)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
__m256 _max = _mm256_loadu_ps(ptr);
for (int i = 0; i < size; i++)
{
__m256 _val = _mm256_loadu_ps(ptr);
_max = _mm256_max_ps(_max, _val);
ptr += 8;
}
float* outptr = top_blob;
_mm256_storeu_ps(outptr + q * 8, _max);
}
}
else if (pooling_type == PoolMethod_AVE)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
__m256 _sum = _mm256_set1_ps(0.f);
for (int i = 0; i < size; i++)
{
__m256 _val = _mm256_loadu_ps(ptr);
_sum = _mm256_add_ps(_sum, _val);
ptr += 8;
}
__m256 _inv_size = _mm256_set1_ps(1.f / size);
__m256 _avg = _mm256_mul_ps(_sum, _inv_size);
float* outptr = top_blob;
_mm256_storeu_ps(outptr + q * 8, _avg);
}
}
return 0;
}
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_w) / stride_w + 1;
int outh = (h - kernel_h) / stride_h + 1;
top_blob.create(outw, outh, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - kernel_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
if (pooling_type == PoolMethod_MAX)
{
if (kernel_w == 2 && kernel_h == 2 && stride_w == 2 && stride_h == 2)
{
pooling2x2s2_max_pack8_avx(bottom_blob_bordered, top_blob, opt);
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && stride_w == 2 && stride_h == 2)
{
pooling3x3s2_max_pack8_avx(bottom_blob_bordered, top_blob, opt);
return 0;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
__m256 _max = _mm256_loadu_ps(sptr);
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
_max = _mm256_max_ps(_max, _val);
}
_mm256_storeu_ps(outptr + j * 8, _max);
}
outptr += outw * 8;
}
}
}
else if (pooling_type == PoolMethod_AVE)
{
if (avgpool_count_include_pad == 0)
{
int wtailpad = 0;
int htailpad = 0;
if (pad_mode == 0) // full padding
{
wtailpad = bottom_blob_bordered.w - bottom_blob.w - pad_left - pad_right;
htailpad = bottom_blob_bordered.h - bottom_blob.h - pad_top - pad_bottom;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
for (int i = 0; i < outh; i++)
{
int sy0 = i * stride_h;
for (int j = 0; j < outw; j++)
{
int sx0 = j * stride_w;
__m256 _sum = _mm256_set1_ps(0.f);
int area = 0;
for (int ki = 0; ki < kernel_h; ki++)
{
int sy = sy0 + ki;
if (sy < pad_top)
continue;
if (sy >= h - pad_bottom - htailpad)
break;
for (int kj = 0; kj < kernel_w; kj++)
{
int sx = sx0 + kj;
if (sx < pad_left)
continue;
if (sx >= w - pad_right - wtailpad)
break;
__m256 _val = _mm256_loadu_ps(m.row(sy) + sx * 8);
_sum = _mm256_add_ps(_sum, _val);
area += 1;
}
}
__m256 _inv_area = _mm256_set1_ps(1.f / area);
__m256 _avg = _mm256_mul_ps(_sum, _inv_area);
_mm256_storeu_ps(outptr + j * 8, _avg);
}
outptr += outw * 8;
}
}
}
else // if (avgpool_count_include_pad == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
__m256 _inv_maxk = _mm256_set1_ps(1.f / maxk);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
__m256 _sum = _mm256_set1_ps(0.f);
for (int k = 0; k < maxk; k++)
{
__m256 _val = _mm256_loadu_ps(sptr + space_ofs[k] * 8);
_sum = _mm256_add_ps(_sum, _val);
}
__m256 _avg = _mm256_mul_ps(_sum, _inv_maxk);
_mm256_storeu_ps(outptr + j * 8, _avg);
}
outptr += outw * 8;
}
}
}
}
return 0;
}
#endif // __AVX__
if (elempack == 4)
{
if (global_pooling)
{
top_blob.create(channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
int size = w * h;
if (pooling_type == PoolMethod_MAX)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
__m128 _max = _mm_loadu_ps(ptr);
for (int i = 0; i < size; i++)
{
__m128 _val = _mm_loadu_ps(ptr);
_max = _mm_max_ps(_max, _val);
ptr += 4;
}
float* outptr = top_blob;
_mm_storeu_ps(outptr + q * 4, _max);
}
}
else if (pooling_type == PoolMethod_AVE)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = bottom_blob.channel(q);
__m128 _sum = _mm_set1_ps(0.f);
for (int i = 0; i < size; i++)
{
__m128 _val = _mm_loadu_ps(ptr);
_sum = _mm_add_ps(_sum, _val);
ptr += 4;
}
__m128 _inv_size = _mm_set1_ps(1.f / size);
__m128 _avg = _mm_mul_ps(_sum, _inv_size);
float* outptr = top_blob;
_mm_storeu_ps(outptr + q * 4, _avg);
}
}
return 0;
}
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_w) / stride_w + 1;
int outh = (h - kernel_h) / stride_h + 1;
top_blob.create(outw, outh, channels, elemsize, elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w - kernel_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2++;
}
p2 += gap;
}
}
if (pooling_type == PoolMethod_MAX)
{
if (kernel_w == 2 && kernel_h == 2 && stride_w == 2 && stride_h == 2)
{
pooling2x2s2_max_pack4_sse(bottom_blob_bordered, top_blob, opt);
return 0;
}
if (kernel_w == 3 && kernel_h == 3 && stride_w == 2 && stride_h == 2)
{
pooling3x3s2_max_pack4_sse(bottom_blob_bordered, top_blob, opt);
return 0;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
__m128 _max = _mm_loadu_ps(sptr);
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
_max = _mm_max_ps(_max, _val);
}
_mm_storeu_ps(outptr + j * 4, _max);
}
outptr += outw * 4;
}
}
}
else if (pooling_type == PoolMethod_AVE)
{
if (avgpool_count_include_pad == 0)
{
int wtailpad = 0;
int htailpad = 0;
if (pad_mode == 0) // full padding
{
wtailpad = bottom_blob_bordered.w - bottom_blob.w - pad_left - pad_right;
htailpad = bottom_blob_bordered.h - bottom_blob.h - pad_top - pad_bottom;
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
for (int i = 0; i < outh; i++)
{
int sy0 = i * stride_h;
for (int j = 0; j < outw; j++)
{
int sx0 = j * stride_w;
__m128 _sum = _mm_set1_ps(0.f);
int area = 0;
for (int ki = 0; ki < kernel_h; ki++)
{
int sy = sy0 + ki;
if (sy < pad_top)
continue;
if (sy >= h - pad_bottom - htailpad)
break;
for (int kj = 0; kj < kernel_w; kj++)
{
int sx = sx0 + kj;
if (sx < pad_left)
continue;
if (sx >= w - pad_right - wtailpad)
break;
__m128 _val = _mm_loadu_ps(m.row(sy) + sx * 4);
_sum = _mm_add_ps(_sum, _val);
area += 1;
}
}
__m128 _inv_area = _mm_set1_ps(1.f / area);
__m128 _avg = _mm_mul_ps(_sum, _inv_area);
_mm_storeu_ps(outptr + j * 4, _avg);
}
outptr += outw * 4;
}
}
}
else // if (avgpool_count_include_pad == 1)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob_bordered.channel(q);
float* outptr = top_blob.channel(q);
__m128 _inv_maxk = _mm_set1_ps(1.f / maxk);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
__m128 _sum = _mm_set1_ps(0.f);
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_loadu_ps(sptr + space_ofs[k] * 4);
_sum = _mm_add_ps(_sum, _val);
}
__m128 _avg = _mm_mul_ps(_sum, _inv_maxk);
_mm_storeu_ps(outptr + j * 4, _avg);
}
outptr += outw * 4;
}
}
}
}
return 0;
}
#endif // __SSE2__
if (kernel_w != kernel_h || stride_w != stride_h)
{
return Pooling::forward(bottom_blob, top_blob, opt);
}
const int stride = stride_w;
if (pooling_type != PoolMethod_MAX || stride != 2 || global_pooling == 1)
{
return Pooling::forward(bottom_blob, top_blob, opt);
}
#if __AVX__
const int kernel_size = kernel_w;
if (kernel_size != 2)
{
return Pooling::forward(bottom_blob, top_blob, opt);
}
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int outw = (w - kernel_w) / stride_w + 1;
int outh = (h - kernel_h) / stride_h + 1;
top_blob.create(outw, outh, channels, elemsize, opt.blob_allocator);
if (top_blob.empty())
return -100;
if (kernel_size == 2)
pooling2x2s2_max_avx(bottom_blob_bordered, top_blob, opt);
return 0;
#else
return Pooling::forward(bottom_blob, top_blob, opt);
#endif
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x108, %rsp # imm = 0x108
movq %rcx, %r14
movq %rdx, %r9
movq %rsi, %rbp
movq %rdi, %r15
movq (%rdi), %rax
movq -0x18(%rax), %rax
addq %rax, %rdi
cmpl $0x0, 0x100(%r15,%rax)
je 0x22f523
movq %rbp, %rsi
movq %r9, %rdx
movq %r14, %rcx
addq $0x108, %rsp # imm = 0x108
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x22e368
cmpl $0x4, 0x18(%rbp)
jne 0x22f61c
movl 0x38(%rbp), %esi
movq 0x10(%rbp), %rbx
cmpl $0x0, 0xf4(%rdi)
je 0x22f662
movl 0x2c(%rbp), %r13d
movl 0x30(%rbp), %r12d
movq 0x8(%r14), %r8
movq %r9, %rdi
movq %rsi, %r14
movq %rbx, %rdx
movl $0x4, %ecx
movq %r9, %rbx
callq 0x5a03c
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x22fe5e
movq %rbx, %rsi
movslq 0x38(%rbx), %rcx
imulq 0x40(%rbx), %rcx
testq %rcx, %rcx
je 0x22fe5e
movq %r14, %rdi
imull %r13d, %r12d
movq (%r15), %rax
movq -0x18(%rax), %rax
movl 0xd0(%r15,%rax), %ecx
xorl %eax, %eax
testl %ecx, %ecx
je 0x22f96a
cmpl $0x1, %ecx
jne 0x22fe5e
testl %edi, %edi
jle 0x22fe5e
cvtsi2ss %r12d, %xmm1
movss 0x1e5753(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
xorl %eax, %eax
testl %r12d, %r12d
jle 0x22f5fb
movq 0x40(%rbp), %rcx
imulq %rax, %rcx
imulq 0x10(%rbp), %rcx
addq (%rbp), %rcx
xorps %xmm1, %xmm1
movl %r12d, %edx
movups (%rcx), %xmm2
addps %xmm2, %xmm1
addq $0x10, %rcx
decl %edx
jne 0x22f5eb
jmp 0x22f5fe
xorps %xmm1, %xmm1
mulps %xmm0, %xmm1
movq (%rsi), %rcx
movq %rax, %rdx
shlq $0x4, %rdx
movups %xmm1, (%rcx,%rdx)
incq %rax
cmpq %rdi, %rax
jne 0x22f5cf
jmp 0x22f9b8
movl 0xd4(%rdi), %eax
cmpl 0xd8(%rdi), %eax
jne 0x22f504
movl 0xdc(%rdi), %eax
cmpl 0xe0(%rdi), %eax
jne 0x22f504
cmpl $0x2, %eax
jne 0x22f504
cmpl $0x0, 0xd0(%rdi)
jne 0x22f504
cmpl $0x1, 0xf4(%rdi)
jmp 0x22f504
movq %rsi, 0x18(%rsp)
movq %r9, 0x30(%rsp)
leaq 0x60(%rsp), %rdx
movq $0x0, 0x40(%rdx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rdx)
movups %xmm0, 0xc(%rdx)
movaps %xmm0, 0x20(%rdx)
movups %xmm0, 0x2c(%rdx)
movq %rbp, %rsi
movq %r14, %rcx
callq 0x22f0b8
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
cmpq $0x0, 0x60(%rsp)
je 0x22fe20
movslq 0x98(%rsp), %rcx
imulq 0xa0(%rsp), %rcx
testq %rcx, %rcx
je 0x22fe20
movl 0x8c(%rsp), %r13d
movl 0x90(%rsp), %edi
movq (%r15), %rax
movq -0x18(%rax), %rcx
movl %r13d, %eax
subl 0xd4(%r15,%rcx), %eax
cltd
idivl 0xdc(%r15,%rcx)
movl %eax, %esi
movl %edi, 0x10(%rsp)
movl %edi, %eax
subl 0xd8(%r15,%rcx), %eax
cltd
idivl 0xe0(%r15,%rcx)
movl %eax, %ecx
movq %rsi, 0x50(%rsp)
leal 0x1(%rsi), %r12d
movq %rcx, 0x38(%rsp)
leal 0x1(%rcx), %edx
movq 0x8(%r14), %rax
movq %rax, (%rsp)
movq 0x30(%rsp), %r14
movq %r14, %rdi
movl %r12d, %esi
movq 0x18(%rsp), %rcx
movq %rbx, %r8
movl $0x4, %r9d
callq 0x5a266
cmpq $0x0, (%r14)
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
je 0x22fe20
movslq 0x38(%r14), %rcx
imulq 0x40(%r14), %rcx
testq %rcx, %rcx
je 0x22fe20
movq (%r15), %rax
movq -0x18(%rax), %rax
movslq 0xd4(%r15,%rax), %rcx
movslq 0xd8(%r15,%rax), %r14
imulq %rcx, %r14
leaq 0xf0(%rsp), %rdi
leaq 0x2f(%rsp), %rdx
movq %r14, %rsi
callq 0x6aa92
movq 0xf0(%rsp), %rax
movq (%r15), %rcx
movq -0x18(%rcx), %rdx
movl 0xd8(%r15,%rdx), %r10d
testl %r10d, %r10d
jle 0x22f806
movl %r13d, %esi
subl 0xd4(%r15,%rdx), %esi
xorl %edi, %edi
xorl %r8d, %r8d
xorl %r9d, %r9d
cmpl $0x0, 0xd4(%r15,%rdx)
jle 0x22f7f4
movslq %r8d, %rdx
leaq (%rax,%rdx,4), %r11
movl %r9d, %ebx
xorl %r10d, %r10d
leal (%rbx,%r10), %edx
movl %edx, (%r11,%r10,4)
movq -0x18(%rcx), %rdx
incq %r10
cmpl 0xd4(%r15,%rdx), %r10d
jl 0x22f7d5
addl %r10d, %r8d
addl %r10d, %r9d
addl %esi, %r9d
incl %edi
movl 0xd8(%r15,%rdx), %r10d
cmpl %r10d, %edi
jl 0x22f7bd
addq %r15, %rdx
movl 0xd0(%rdx), %ecx
testl %ecx, %ecx
je 0x22f9bf
cmpl $0x1, %ecx
jne 0x22fe01
cmpl $0x0, 0xfc(%rdx)
je 0x22fb47
cmpl $0x0, 0x18(%rsp)
jle 0x22fe01
cvtsi2ss %r14d, %xmm1
movss 0x1e54d3(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
leal (,%r12,4), %ecx
movslq %ecx, %rcx
movq %rcx, 0x20(%rsp)
movl %r12d, %edx
movl %r14d, %esi
movq $0x0, 0x10(%rsp)
cmpl $0x0, 0x38(%rsp)
js 0x22f94d
movq 0xa0(%rsp), %r8
movq 0x10(%rsp), %r10
imulq %r10, %r8
movq 0x70(%rsp), %rdi
imulq %rdi, %r8
addq 0x60(%rsp), %r8
movq 0x30(%rsp), %rcx
movq 0x40(%rcx), %r9
imulq %r10, %r9
imulq 0x10(%rcx), %r9
addq (%rcx), %r9
movslq 0x8c(%rsp), %r10
movq (%r15), %r11
imulq %rdi, %r10
xorl %ebx, %ebx
cmpl $0x0, 0x50(%rsp)
js 0x22f935
movslq %ebx, %r12
xorl %r13d, %r13d
testl %r14d, %r14d
jle 0x22f91b
movq -0x18(%r11), %rdi
movslq 0xe0(%r15,%rdi), %rbp
imulq %r12, %rbp
imulq %r10, %rbp
addq %r8, %rbp
movl 0xdc(%r15,%rdi), %edi
imull %r13d, %edi
shll $0x2, %edi
movslq %edi, %rdi
leaq (%rbp,%rdi,4), %rbp
xorps %xmm1, %xmm1
xorl %edi, %edi
movslq (%rax,%rdi,4), %rcx
shlq $0x4, %rcx
movups (%rbp,%rcx), %xmm2
addps %xmm2, %xmm1
incq %rdi
cmpq %rdi, %rsi
jne 0x22f901
jmp 0x22f91e
xorps %xmm1, %xmm1
mulps %xmm0, %xmm1
movq %r13, %rdi
shlq $0x4, %rdi
movups %xmm1, (%r9,%rdi)
incq %r13
cmpq %rdx, %r13
jne 0x22f8c9
movq 0x20(%rsp), %rcx
leaq (%r9,%rcx,4), %r9
leal 0x1(%rbx), %edi
cmpl 0x38(%rsp), %ebx
movl %edi, %ebx
jne 0x22f8bc
movq 0x10(%rsp), %rdi
incq %rdi
movq %rdi, 0x10(%rsp)
cmpq 0x18(%rsp), %rdi
jne 0x22f86c
jmp 0x22fe01
testl %edi, %edi
jle 0x22fe5e
xorl %eax, %eax
movq (%rbp), %rcx
movq 0x40(%rbp), %rdx
imulq %rax, %rdx
imulq 0x10(%rbp), %rdx
movups (%rcx,%rdx), %xmm0
testl %r12d, %r12d
jle 0x22f9a2
addq %rdx, %rcx
movl %r12d, %edx
movups (%rcx), %xmm1
maxps %xmm1, %xmm0
addq $0x10, %rcx
decl %edx
jne 0x22f994
movq (%rsi), %rcx
movq %rax, %rdx
shlq $0x4, %rdx
movups %xmm0, (%rcx,%rdx)
incq %rax
cmpq %rdi, %rax
jne 0x22f974
xorl %eax, %eax
jmp 0x22fe5e
movl 0xd4(%rdx), %ecx
cmpl $0x2, %r10d
jne 0x22f9f6
cmpl $0x2, %ecx
jne 0x22f9f6
cmpl $0x2, 0xdc(%rdx)
jne 0x22fa27
cmpl $0x2, 0xe0(%rdx)
jne 0x22fa27
leaq 0x60(%rsp), %rdi
movq 0x30(%rsp), %rsi
callq 0x22febf
jmp 0x22fe01
cmpl $0x3, %r10d
jne 0x22fa27
cmpl $0x3, %ecx
jne 0x22fa27
cmpl $0x2, 0xdc(%rdx)
jne 0x22fa27
cmpl $0x2, 0xe0(%rdx)
jne 0x22fa27
leaq 0x60(%rsp), %rdi
movq 0x30(%rsp), %rsi
callq 0x22ff6c
jmp 0x22fe01
cmpl $0x0, 0x18(%rsp)
jle 0x22fe01
leal (,%r12,4), %ecx
movslq %ecx, %rcx
movq %rcx, 0x20(%rsp)
movl %r12d, %edx
movl %r14d, %esi
movq $0x0, 0x10(%rsp)
cmpl $0x0, 0x38(%rsp)
js 0x22fb2a
movq 0xa0(%rsp), %r8
movq 0x10(%rsp), %r10
imulq %r10, %r8
movq 0x70(%rsp), %rdi
imulq %rdi, %r8
addq 0x60(%rsp), %r8
movq 0x30(%rsp), %rcx
movq 0x40(%rcx), %r9
imulq %r10, %r9
imulq 0x10(%rcx), %r9
addq (%rcx), %r9
movslq 0x8c(%rsp), %r10
movq (%r15), %r11
imulq %rdi, %r10
xorl %ebx, %ebx
cmpl $0x0, 0x50(%rsp)
js 0x22fb12
movslq %ebx, %r12
xorl %r13d, %r13d
movq -0x18(%r11), %rbp
movslq 0xe0(%r15,%rbp), %rdi
imulq %r12, %rdi
imulq %r10, %rdi
addq %r8, %rdi
movl 0xdc(%r15,%rbp), %ebp
imull %r13d, %ebp
shll $0x2, %ebp
movslq %ebp, %rbp
movups (%rdi,%rbp,4), %xmm0
testl %r14d, %r14d
jle 0x22fafe
leaq (%rdi,%rbp,4), %rbp
xorl %edi, %edi
movslq (%rax,%rdi,4), %rcx
shlq $0x4, %rcx
movups (%rbp,%rcx), %xmm1
maxps %xmm1, %xmm0
incq %rdi
cmpq %rdi, %rsi
jne 0x22fae6
movq %r13, %rdi
shlq $0x4, %rdi
movups %xmm0, (%r9,%rdi)
incq %r13
cmpq %rdx, %r13
jne 0x22faae
movq 0x20(%rsp), %rcx
leaq (%r9,%rcx,4), %r9
leal 0x1(%rbx), %edi
cmpl 0x38(%rsp), %ebx
movl %edi, %ebx
jne 0x22faa1
movq 0x10(%rsp), %rdi
incq %rdi
movq %rdi, 0x10(%rsp)
cmpq 0x18(%rsp), %rdi
jne 0x22fa51
jmp 0x22fe01
xorl %r10d, %r10d
cmpl $0x0, 0xf8(%rdx)
movl $0x0, 0x20(%rsp)
jne 0x22fb8f
movl 0x2c(%rbp), %r10d
movl 0x30(%rbp), %eax
subl 0x8c(%rsp), %r10d
addl 0xe4(%rdx), %r10d
addl 0xe8(%rdx), %r10d
subl 0x90(%rsp), %eax
addl 0xec(%rdx), %eax
addl 0xf0(%rdx), %eax
movl %eax, 0x20(%rsp)
cmpl $0x0, 0x18(%rsp)
jle 0x22fe01
movq (%r15), %rax
movq %rax, 0xa8(%rsp)
movl 0x10(%rsp), %eax
addl %eax, 0x20(%rsp)
addl %r13d, %r10d
leal (,%r12,4), %ecx
movslq %ecx, %rax
movq %rax, 0xd0(%rsp)
movl %r12d, %eax
movq %rax, 0xe0(%rsp)
movq $0x0, 0x40(%rsp)
movss 0x1e5139(%rip), %xmm0 # 0x414d18
cmpl $0x0, 0x38(%rsp)
js 0x22fde9
movq 0x60(%rsp), %rax
movq %rax, 0xc8(%rsp)
movq 0x70(%rsp), %r14
movq 0x30(%rsp), %rax
movq 0x40(%rax), %rdx
movq 0x40(%rsp), %rcx
imulq %rcx, %rdx
imulq 0x10(%rax), %rdx
addq (%rax), %rdx
movq %rdx, 0x58(%rsp)
movslq 0x8c(%rsp), %rax
movq %r14, 0xc0(%rsp)
movq %rax, 0xb8(%rsp)
imulq %rax, %r14
movq 0xa0(%rsp), %rax
imulq %rcx, %rax
movq %rax, 0xb0(%rsp)
movq $0x0, 0x48(%rsp)
cmpl $0x0, 0x50(%rsp)
js 0x22fdba
movq 0xa8(%rsp), %rax
movq -0x18(%rax), %rcx
movl 0xe0(%r15,%rcx), %ecx
imull 0x48(%rsp), %ecx
movslq %ecx, %r9
movq 0xb8(%rsp), %rax
imulq %r9, %rax
addq 0xb0(%rsp), %rax
imulq 0xc0(%rsp), %rax
addq 0xc8(%rsp), %rax
movq %rax, 0xd8(%rsp)
xorl %edx, %edx
movq 0xa8(%rsp), %rax
movq -0x18(%rax), %rbp
movslq 0xd8(%r15,%rbp), %rsi
testq %rsi, %rsi
jle 0x22fd85
movslq 0xec(%r15,%rbp), %r12
movl 0xdc(%r15,%rbp), %ecx
movq %rdx, 0xe8(%rsp)
imull %edx, %ecx
movslq %ecx, %r13
movq %r13, 0x10(%rsp)
shlq $0x4, %r13
addq 0xd8(%rsp), %r13
xorps %xmm1, %xmm1
xorl %edi, %edi
xorl %edx, %edx
leaq (%rdi,%r9), %rcx
cmpq %r12, %rcx
jl 0x22fd69
movl 0x20(%rsp), %r8d
subl 0xf0(%r15,%rbp), %r8d
movslq %r8d, %r8
cmpq %r8, %rcx
jge 0x22fd74
movslq 0xd4(%r15,%rbp), %rbx
testq %rbx, %rbx
jle 0x22fd69
movslq 0xe4(%r15,%rbp), %r11
shlq $0x4, %rbx
movq 0x10(%rsp), %r8
xorl %ecx, %ecx
cmpq %r11, %r8
jl 0x22fd5d
movl %r10d, %eax
subl 0xe8(%r15,%rbp), %eax
cltq
cmpq %rax, %r8
jge 0x22fd69
movups (%r13,%rcx), %xmm2
addps %xmm2, %xmm1
incl %edx
addq $0x10, %rcx
incq %r8
cmpq %rcx, %rbx
jne 0x22fd3b
incq %rdi
addq %r14, %r13
cmpq %rsi, %rdi
jne 0x22fcfd
xorps %xmm2, %xmm2
cvtsi2ss %edx, %xmm2
movq 0xe8(%rsp), %rdx
jmp 0x22fd8b
xorps %xmm2, %xmm2
xorps %xmm1, %xmm1
movaps %xmm0, %xmm3
divss %xmm2, %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
mulps %xmm1, %xmm3
movq %rdx, %rax
shlq $0x4, %rax
movq 0x58(%rsp), %rcx
movups %xmm3, (%rcx,%rax)
incq %rdx
cmpq 0xe0(%rsp), %rdx
jne 0x22fcaa
movq 0xd0(%rsp), %rax
movq 0x58(%rsp), %rcx
leaq (%rcx,%rax,4), %rcx
movq %rcx, 0x58(%rsp)
movq 0x48(%rsp), %rax
leal 0x1(%rax), %ecx
cmpl 0x38(%rsp), %eax
movl %ecx, %eax
movq %rax, 0x48(%rsp)
jne 0x22fc54
movq 0x40(%rsp), %rcx
incq %rcx
movq %rcx, 0x40(%rsp)
cmpq 0x18(%rsp), %rcx
jne 0x22fbdf
movq 0xf0(%rsp), %rdi
testq %rdi, %rdi
je 0x22fe1e
movq 0x100(%rsp), %rsi
subq %rdi, %rsi
callq 0x56270
xorl %eax, %eax
movq 0x68(%rsp), %rcx
testq %rcx, %rcx
je 0x22fe5e
lock
decl (%rcx)
jne 0x22fe5e
movq 0x60(%rsp), %rsi
movq 0x80(%rsp), %rdi
testq %rdi, %rdi
je 0x22fe4d
movq (%rdi), %rcx
movl %eax, %ebx
callq *0x18(%rcx)
movl %ebx, %eax
jmp 0x22fe5e
testq %rsi, %rsi
je 0x22fe5e
movq %rsi, %rdi
movl %eax, %ebx
callq 0x563b0
movl %ebx, %eax
addq $0x108, %rsp # imm = 0x108
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x22fe76
jmp 0x22feb7
jmp 0x22fe76
movq %rax, %rbx
movq 0x68(%rsp), %rax
testq %rax, %rax
je 0x22feaf
lock
decl (%rax)
jne 0x22feaf
movq 0x60(%rsp), %rsi
movq 0x80(%rsp), %rdi
testq %rdi, %rdi
jne 0x22fea9
testq %rsi, %rsi
je 0x22feaf
movq %rsi, %rdi
callq 0x563b0
jmp 0x22feaf
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/x86/pooling_x86.cpp
|
virtual thunk to ncnn::Pooling_x86_avx512::create_pipeline(ncnn::Option const&)
|
int Pooling_x86_avx512::create_pipeline(const Option& /*opt*/)
{
if (adaptive_pooling)
{
support_packing = false;
support_bf16_storage = false;
support_fp16_storage = false;
support_int8_storage = false;
support_tensor_storage = false;
}
return 0;
}
|
movq (%rdi), %rax
movq -0x30(%rax), %rcx
leaq (%rdi,%rcx), %rax
movq (%rdi,%rcx), %rcx
movq -0x18(%rcx), %rdx
cmpl $0x0, 0x100(%rdx,%rax)
je 0x23020a
xorl %esi, %esi
movb %sil, 0xb(%rax,%rdx)
movq -0x18(%rcx), %rdx
movb %sil, 0xc(%rax,%rdx)
movq -0x18(%rcx), %rdx
movb %sil, 0xd(%rax,%rdx)
movq -0x18(%rcx), %rdx
movb %sil, 0xe(%rax,%rdx)
movq -0x18(%rcx), %rcx
movb %sil, 0x10(%rax,%rcx)
xorl %eax, %eax
retq
nop
|
/ysh329[P]ncnn/build_O3/src/layer/x86/pooling_x86_avx512.cpp
|
int ncnn::binary_op_6_11_16_25<ncnn::BinaryOp_x86_functor::binary_op_pow>(ncnn::Mat const&, ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&)
|
static int binary_op_6_11_16_25(const Mat& a, const Mat& b, Mat& c, const Option& opt)
{
Op op;
int w = a.w;
int h = a.h;
int d = a.d;
int channels = a.c;
int elempack = a.elempack;
int size = w * h * d * elempack;
// type 6 11 16 25
c.create_like(a, opt.blob_allocator);
if (c.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = a.channel(q);
const float b0 = b[0];
float* outptr = c.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _b0_avx512 = _mm512_set1_ps(b0);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
__m512 _outp = op.func_pack16(_p, _b0_avx512);
_mm512_storeu_ps(outptr, _outp);
ptr += 16;
outptr += 16;
}
#endif // __AVX512F__
__m256 _b0_avx = _mm256_set1_ps(b0);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = op.func_pack8(_p, _b0_avx);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
__m128 _b0 = _mm_set1_ps(b0);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
__m128 _outp = op.func_pack4(_p, _b0);
_mm_store_ps(outptr, _outp);
ptr += 4;
outptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
*outptr = op.func(*ptr, b0);
ptr += 1;
outptr += 1;
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, %r13
movq %rsi, 0x20(%rsp)
movq %rdi, %rsi
movl 0x30(%rdi), %edx
movl 0x38(%rdi), %eax
movq %rax, 0x8(%rsp)
imull 0x2c(%rdi), %edx
imull 0x34(%rdi), %edx
imull 0x18(%rdi), %edx
movl %edx, 0x4(%rsp)
movq %r13, %rdi
movq %rsi, 0x18(%rsp)
movq %rcx, %rdx
callq 0x5b4ec
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
cmpq $0x0, (%r13)
je 0x2af3cb
movslq 0x38(%r13), %rcx
imulq 0x40(%r13), %rcx
testq %rcx, %rcx
je 0x2af3cb
cmpl $0x0, 0x8(%rsp)
jle 0x2af3c9
xorl %ecx, %ecx
leaq 0x3(%rsp), %rbp
leaq 0x40(%rsp), %rbx
leaq 0x50(%rsp), %r14
movq %r13, 0x10(%rsp)
movq 0x18(%rsp), %rax
movq 0x40(%rax), %r15
imulq %rcx, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
movq 0x40(%r13), %r12
imulq %rcx, %r12
imulq 0x10(%r13), %r12
addq (%r13), %r12
movq 0x20(%rsp), %rax
movq (%rax), %rax
movss (%rax), %xmm0
movaps %xmm0, 0x30(%rsp)
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x50(%rsp)
movl 0x4(%rsp), %edx
cmpl $0x4, %edx
movq %rcx, 0x28(%rsp)
jl 0x2af382
xorl %r13d, %r13d
movaps (%r15), %xmm0
movaps %xmm0, 0x40(%rsp)
movq %rbp, %rdi
movq %rbx, %rsi
movq %r14, %rdx
callq 0x2af3da
movl 0x4(%rsp), %edx
movaps %xmm0, (%r12)
addq $0x10, %r15
addq $0x10, %r12
leal 0x4(%r13), %eax
addl $0x7, %r13d
cmpl %edx, %r13d
movl %eax, %r13d
jl 0x2af348
jmp 0x2af384
xorl %eax, %eax
movl %edx, %r13d
subl %eax, %r13d
jle 0x2af3ac
xorl %ebp, %ebp
movss (%r15,%rbp,4), %xmm0
movaps 0x30(%rsp), %xmm1
callq 0x560e0
movss %xmm0, (%r12,%rbp,4)
incq %rbp
cmpl %ebp, %r13d
jne 0x2af38e
movq 0x28(%rsp), %rcx
incq %rcx
cmpq 0x8(%rsp), %rcx
movq 0x10(%rsp), %r13
leaq 0x3(%rsp), %rbp
jne 0x2af2f7
xorl %eax, %eax
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/ysh329[P]ncnn/src/layer/x86/binaryop_x86.cpp
|
int ncnn::binary_op_7_13_19_29<ncnn::BinaryOp_x86_avx512_functor::binary_op_add>(ncnn::Mat const&, ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&)
|
static int binary_op_7_13_19_29(const Mat& a, const Mat& b, Mat& c, const Option& opt)
{
Op op;
int w = a.w;
int h = a.h;
int d = a.d;
int channels = a.c;
int elempack = a.elempack;
int size = w * h * d * elempack;
// type 7 13 19 29
c.create_like(a, opt.blob_allocator);
if (c.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = a.channel(q);
const float* ptr1 = b.channel(q);
float* outptr = c.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
__m512 _p1 = _mm512_loadu_ps(ptr1);
__m512 _outp = op.func_pack16(_p, _p1);
_mm512_storeu_ps(outptr, _outp);
ptr += 16;
ptr1 += 16;
outptr += 16;
}
#endif // __AVX512F__
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _p1 = _mm256_loadu_ps(ptr1);
__m256 _outp = op.func_pack8(_p, _p1);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
ptr1 += 8;
outptr += 8;
}
#endif // __AVX__
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
__m128 _p1 = _mm_load_ps(ptr1);
__m128 _outp = op.func_pack4(_p, _p1);
_mm_store_ps(outptr, _outp);
ptr += 4;
ptr1 += 4;
outptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
*outptr = op.func(*ptr, *ptr1);
ptr += 1;
ptr1 += 1;
outptr += 1;
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
movl 0x30(%rdi), %ebp
movl 0x38(%rdi), %r12d
imull 0x2c(%rdi), %ebp
imull 0x34(%rdi), %ebp
imull 0x18(%rdi), %ebp
movq %rdx, %rdi
movq %r15, %rsi
movq %rcx, %rdx
callq 0x5b4ec
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x2cd619
movslq 0x38(%rbx), %rcx
imulq 0x40(%rbx), %rcx
testq %rcx, %rcx
je 0x2cd619
testl %r12d, %r12d
jle 0x2cd617
movl %ebp, %eax
andl $-0x10, %eax
xorl %ecx, %ecx
movq 0x40(%r15), %rdx
imulq %rcx, %rdx
imulq 0x10(%r15), %rdx
addq (%r15), %rdx
movq 0x40(%r14), %rsi
imulq %rcx, %rsi
imulq 0x10(%r14), %rsi
addq (%r14), %rsi
movq 0x40(%rbx), %rdi
imulq %rcx, %rdi
imulq 0x10(%rbx), %rdi
addq (%rbx), %rdi
cmpl $0x10, %ebp
jl 0x2cd57b
movl $0xf, %r8d
vmovups (%rsi), %zmm0
vaddps (%rdx), %zmm0, %zmm0
vmovups %zmm0, (%rdi)
addq $0x40, %rdx
addq $0x40, %rsi
addq $0x40, %rdi
addl $0x10, %r8d
cmpl %ebp, %r8d
jl 0x2cd54f
movl %eax, %r8d
jmp 0x2cd57e
xorl %r8d, %r8d
movl %r8d, %r9d
orl $0x7, %r9d
cmpl %ebp, %r9d
jge 0x2cd5b2
vmovups (%rsi), %ymm0
vaddps (%rdx), %ymm0, %ymm0
vmovups %ymm0, (%rdi)
addq $0x20, %rdx
addq $0x20, %rsi
addq $0x20, %rdi
leal 0x8(%r8), %r9d
addl $0xf, %r8d
cmpl %ebp, %r8d
movl %r9d, %r8d
jl 0x2cd58a
movl %r8d, %r9d
orl $0x3, %r9d
cmpl %ebp, %r9d
jge 0x2cd5e6
vmovaps (%rsi), %xmm0
vaddps (%rdx), %xmm0, %xmm0
vmovaps %xmm0, (%rdi)
addq $0x10, %rdx
addq $0x10, %rsi
addq $0x10, %rdi
leal 0x4(%r8), %r9d
addl $0x7, %r8d
cmpl %ebp, %r8d
movl %r9d, %r8d
jl 0x2cd5be
movl %ebp, %r9d
subl %r8d, %r9d
jle 0x2cd60b
xorl %r8d, %r8d
vmovss (%rsi,%r8,4), %xmm0
vaddss (%rdx,%r8,4), %xmm0, %xmm0
vmovss %xmm0, (%rdi,%r8,4)
incq %r8
cmpl %r8d, %r9d
jne 0x2cd5f1
incq %rcx
cmpq %r12, %rcx
jne 0x2cd514
xorl %eax, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/ysh329[P]ncnn/build_O3/src/layer/x86/binaryop_x86_avx512.cpp
|
int ncnn::binary_op_6_11_16_25<ncnn::BinaryOp_x86_avx512_functor::binary_op_pow>(ncnn::Mat const&, ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&)
|
static int binary_op_6_11_16_25(const Mat& a, const Mat& b, Mat& c, const Option& opt)
{
Op op;
int w = a.w;
int h = a.h;
int d = a.d;
int channels = a.c;
int elempack = a.elempack;
int size = w * h * d * elempack;
// type 6 11 16 25
c.create_like(a, opt.blob_allocator);
if (c.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const float* ptr = a.channel(q);
const float b0 = b[0];
float* outptr = c.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _b0_avx512 = _mm512_set1_ps(b0);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
__m512 _outp = op.func_pack16(_p, _b0_avx512);
_mm512_storeu_ps(outptr, _outp);
ptr += 16;
outptr += 16;
}
#endif // __AVX512F__
__m256 _b0_avx = _mm256_set1_ps(b0);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
__m256 _outp = op.func_pack8(_p, _b0_avx);
_mm256_storeu_ps(outptr, _outp);
ptr += 8;
outptr += 8;
}
#endif // __AVX__
__m128 _b0 = _mm_set1_ps(b0);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
__m128 _outp = op.func_pack4(_p, _b0);
_mm_store_ps(outptr, _outp);
ptr += 4;
outptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
*outptr = op.func(*ptr, b0);
ptr += 1;
outptr += 1;
}
}
return 0;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x180, %rsp # imm = 0x180
movq %rdx, %rbx
movq %rsi, 0x30(%rsp)
movq %rdi, %rsi
movl 0x30(%rdi), %r14d
movl 0x38(%rdi), %eax
movq %rax, 0x18(%rsp)
imull 0x2c(%rdi), %r14d
imull 0x34(%rdi), %r14d
imull 0x18(%rdi), %r14d
movq %rdx, %rdi
movq %rsi, 0x28(%rsp)
movq %rcx, %rdx
callq 0x5b4ec
movl $0xffffff9c, %eax # imm = 0xFFFFFF9C
cmpq $0x0, (%rbx)
je 0x2cf3a6
movslq 0x38(%rbx), %rcx
imulq 0x40(%rbx), %rcx
testq %rcx, %rcx
je 0x2cf3a6
cmpl $0x0, 0x18(%rsp)
jle 0x2cf3a4
xorl %ecx, %ecx
leaq 0x80(%rsp), %rsi
movq %rbx, 0x20(%rsp)
movl %r14d, 0x14(%rsp)
movq 0x28(%rsp), %rax
movq 0x40(%rax), %r15
imulq %rcx, %r15
imulq 0x10(%rax), %r15
addq (%rax), %r15
movq 0x40(%rbx), %r13
movq %rcx, 0x38(%rsp)
imulq %rcx, %r13
imulq 0x10(%rbx), %r13
addq (%rbx), %r13
movq 0x30(%rsp), %rax
movq (%rax), %rax
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xc0(%rsp)
vmovaps %zmm0, 0x100(%rsp)
cmpl $0x10, %r14d
jl 0x2cf27c
xorl %ebx, %ebx
movl 0x14(%rsp), %r14d
vmovups (%r15), %zmm0
vmovaps %zmm0, 0x80(%rsp)
leaq 0x13(%rsp), %rdi
leaq 0x100(%rsp), %rdx
callq 0x2cf608
leaq 0x80(%rsp), %rsi
vmovups %zmm0, (%r13)
addq $0x40, %r15
addq $0x40, %r13
leal 0x10(%rbx), %r12d
addl $0x1f, %ebx
cmpl %r14d, %ebx
movl %r12d, %ebx
jl 0x2cf234
jmp 0x2cf284
xorl %r12d, %r12d
movl 0x14(%rsp), %r14d
vbroadcastss 0xc0(%rsp), %ymm0
vmovaps %ymm0, 0x80(%rsp)
movl %r12d, %eax
orl $0x7, %eax
cmpl %r14d, %eax
jge 0x2cf2ea
leaq 0x40(%rsp), %r14
vmovups (%r15), %ymm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0x13(%rsp), %rdi
movq %r14, %rsi
leaq 0x80(%rsp), %rdx
callq 0x2cf7d6
vmovups %ymm0, (%r13)
addq $0x20, %r15
addq $0x20, %r13
leal 0x8(%r12), %ebx
addl $0xf, %r12d
cmpl 0x14(%rsp), %r12d
movl %ebx, %r12d
jl 0x2cf2a7
jmp 0x2cf2f2
movl %r12d, %ebx
leaq 0x40(%rsp), %r14
vbroadcastss 0xc0(%rsp), %xmm0
vmovaps %xmm0, 0x40(%rsp)
movl %ebx, %eax
orl $0x3, %eax
cmpl 0x14(%rsp), %eax
leaq 0x13(%rsp), %r12
jge 0x2cf34c
vmovaps (%r15), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq %r12, %rdi
leaq 0x70(%rsp), %rsi
movq %r14, %rdx
vzeroupper
callq 0x2cf978
vmovaps %xmm0, (%r13)
addq $0x10, %r15
addq $0x10, %r13
leal 0x4(%rbx), %eax
addl $0x7, %ebx
cmpl 0x14(%rsp), %ebx
movl %eax, %ebx
jl 0x2cf312
movl 0x14(%rsp), %eax
movl %eax, %r12d
subl %ebx, %r12d
jle 0x2cf37f
xorl %ebx, %ebx
vmovss (%r15,%rbx,4), %xmm0
vmovaps 0xc0(%rsp), %zmm1
vzeroupper
callq 0x560e0
vmovss %xmm0, (%r13,%rbx,4)
incq %rbx
cmpl %ebx, %r12d
jne 0x2cf35a
movq 0x38(%rsp), %rcx
incq %rcx
cmpq 0x18(%rsp), %rcx
movq 0x20(%rsp), %rbx
movl 0x14(%rsp), %r14d
leaq 0x80(%rsp), %rsi
jne 0x2cf1df
xorl %eax, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/ysh329[P]ncnn/build_O3/src/layer/x86/binaryop_x86_avx512.cpp
|
virtual thunk to ncnn::UnaryOp_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int UnaryOp_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
using namespace UnaryOp_x86_functor;
if (op_type == Operation_ABS)
return unary_op_inplace<unary_op_abs>(bottom_top_blob, opt);
if (op_type == Operation_NEG)
return unary_op_inplace<unary_op_neg>(bottom_top_blob, opt);
if (op_type == Operation_FLOOR)
return unary_op_inplace<unary_op_floor>(bottom_top_blob, opt);
if (op_type == Operation_CEIL)
return unary_op_inplace<unary_op_ceil>(bottom_top_blob, opt);
if (op_type == Operation_SQUARE)
return unary_op_inplace<unary_op_square>(bottom_top_blob, opt);
if (op_type == Operation_SQRT)
return unary_op_inplace<unary_op_sqrt>(bottom_top_blob, opt);
if (op_type == Operation_RSQRT)
return unary_op_inplace<unary_op_rsqrt>(bottom_top_blob, opt);
if (op_type == Operation_EXP)
return unary_op_inplace<unary_op_exp>(bottom_top_blob, opt);
if (op_type == Operation_LOG)
return unary_op_inplace<unary_op_log>(bottom_top_blob, opt);
if (op_type == Operation_SIN)
return unary_op_inplace<unary_op_sin>(bottom_top_blob, opt);
if (op_type == Operation_COS)
return unary_op_inplace<unary_op_cos>(bottom_top_blob, opt);
if (op_type == Operation_TAN)
return unary_op_inplace<unary_op_tan>(bottom_top_blob, opt);
if (op_type == Operation_ASIN)
return unary_op_inplace<unary_op_asin>(bottom_top_blob, opt);
if (op_type == Operation_ACOS)
return unary_op_inplace<unary_op_acos>(bottom_top_blob, opt);
if (op_type == Operation_ATAN)
return unary_op_inplace<unary_op_atan>(bottom_top_blob, opt);
if (op_type == Operation_RECIPROCAL)
return unary_op_inplace<unary_op_reciprocal>(bottom_top_blob, opt);
if (op_type == Operation_TANH)
return unary_op_inplace<unary_op_tanh>(bottom_top_blob, opt);
return 0;
}
|
pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x2fc5d0
xorl %eax, %eax
popq %rcx
retq
nop
|
/ysh329[P]ncnn/src/layer/x86/unaryop_x86.cpp
|
ncnn::convolutiondepthwise(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, int, int, int, int, int, int, int, int, ncnn::Mat const&, ncnn::Option const&)
|
static int convolutiondepthwise(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data, const Mat& bias_data, int kernel_w, int kernel_h, int stride_w, int stride_h, int dilation_w, int dilation_h, int group, int activation_type, const Mat& activation_params, const Option& opt)
{
const int w = bottom_blob.w;
const int inch = bottom_blob.c;
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int bias_term = bias_data.empty() ? 0 : 1;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
// depth-wise
if (inch == group && group == outch)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
float* outptr = top_blob.channel(g);
const float* kptr = (const float*)weight_data + maxk * g;
const Mat m = bottom_blob.channel(g);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
sum = bias_data[g];
const float* sptr = m.row(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
float w = kptr[k];
sum += val * w;
}
outptr[j] = activation_ss(sum, activation_type, activation_params);
}
outptr += outw;
}
}
}
else
{
// group convolution
const int inch_g = inch / group;
const int outch_g = outch / group;
#ifdef _WIN32
#pragma omp parallel for num_threads(opt.num_threads)
#else
#pragma omp parallel for collapse(2) num_threads(opt.num_threads)
#endif
for (int g = 0; g < group; g++)
{
for (int p = 0; p < outch_g; p++)
{
float* outptr = top_blob.channel(g * outch_g + p);
const float* weight_data_ptr = (const float*)weight_data + maxk * inch_g * outch_g * g;
// shadowed variable for less openmp task args
const int outw = top_blob.w;
const int outh = top_blob.h;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
sum = bias_data[outch_g * g + p];
const float* kptr = weight_data_ptr + maxk * inch_g * p;
for (int q = 0; q < inch_g; q++)
{
const Mat m = bottom_blob.channel(inch_g * g + q);
const float* sptr = m.row(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++)
{
float val = sptr[space_ofs[k]];
float w = kptr[k];
sum += val * w;
}
kptr += maxk;
}
outptr[j] = activation_ss(sum, activation_type, activation_params);
}
outptr += outw;
}
}
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x138, %rsp # imm = 0x138
movl %r9d, %ebx
movl %r8d, %r12d
movq %rsi, %r13
movq %rdi, 0x48(%rsp)
cmpq $0x0, (%rcx)
movq %rcx, 0xb8(%rsp)
movq %rdx, 0x10(%rsp)
je 0x30ae40
movslq 0x38(%rcx), %rax
imulq 0x40(%rcx), %rax
testq %rax, %rax
sete 0xb(%rsp)
jmp 0x30ae45
movb $0x1, 0xb(%rsp)
movq 0x48(%rsp), %rax
movl 0x2c(%rax), %ebp
movslq 0x2c(%r13), %rcx
movq %rcx, 0x68(%rsp)
movl 0x30(%r13), %ecx
movq %rcx, 0x78(%rsp)
movl 0x38(%rax), %r15d
movl 0x38(%r13), %eax
movl %eax, 0x28(%rsp)
movl %ebx, %eax
imull %r12d, %eax
movl %eax, 0xc(%rsp)
movslq %eax, %rsi
leaq 0x120(%rsp), %r14
leaq 0x37(%rsp), %rdx
movq %r14, %rdi
movq %rsi, 0x18(%rsp)
callq 0x6aa92
movq (%r14), %r14
testl %ebx, %ebx
jle 0x30aee1
imull 0x188(%rsp), %ebp
movl 0x180(%rsp), %eax
movl %eax, %ecx
imull %r12d, %ecx
subl %ecx, %ebp
movl %r12d, %ecx
xorl %edx, %edx
xorl %esi, %esi
xorl %edi, %edi
testl %r12d, %r12d
jle 0x30aed9
movslq %edx, %r8
leaq (%r14,%r8,4), %r9
xorl %r8d, %r8d
movl %esi, (%r9,%r8,4)
addl %eax, %esi
incq %r8
cmpl %r8d, %ecx
jne 0x30aec8
addl %r8d, %edx
addl %ebp, %esi
incl %edi
cmpl %ebx, %edi
jne 0x30aeb9
movl 0x198(%rsp), %eax
movq %rax, 0x40(%rsp)
movl 0x178(%rsp), %ecx
movl 0x170(%rsp), %esi
movl %r15d, %r8d
movl 0x190(%rsp), %ebx
xorl %ebx, %r8d
movl 0x28(%rsp), %edi
movl %edi, %edx
xorl %ebx, %edx
orl %r8d, %edx
jne 0x30b243
testl %ebx, %ebx
movb 0xb(%rsp), %dil
movl 0xc(%rsp), %r8d
movq 0x18(%rsp), %r9
movq 0x68(%rsp), %r11
jle 0x30b6b1
movq (%r13), %rax
movq %rax, 0x70(%rsp)
movq 0x40(%r13), %rax
imulq 0x10(%r13), %rax
movq %rax, 0xa0(%rsp)
movq 0x10(%rsp), %rax
movq (%rax), %r12
movq 0x48(%rsp), %r10
movslq 0x2c(%r10), %rax
movq (%r10), %rdx
movq %rdx, 0x98(%rsp)
movq 0x10(%r10), %rdx
movq 0x40(%r10), %r10
imulq %rdx, %r10
movq %r10, 0x48(%rsp)
imulq %rdx, %rax
movslq %esi, %r10
movslq %ecx, %rcx
imulq %rax, %rcx
movq %rcx, 0xc0(%rsp)
movl %ebx, %eax
movq %rax, 0x50(%rsp)
movl %r8d, %r13d
shlq $0x2, %r9
movq $0x0, 0x10(%rsp)
movq 0x40(%rsp), %rax
decl %eax
movq %rax, 0x40(%rsp)
movq %r9, 0x18(%rsp)
movq %r10, 0x38(%rsp)
cmpl $0x0, 0x78(%rsp)
movq 0x40(%rsp), %rsi
jle 0x30b223
movq 0x48(%rsp), %rcx
movq 0x10(%rsp), %rax
imulq %rax, %rcx
addq 0x98(%rsp), %rcx
movq %rcx, 0x58(%rsp)
movq 0xa0(%rsp), %r15
imulq %rax, %r15
addq 0x70(%rsp), %r15
movq $0x0, 0x60(%rsp)
testl %r11d, %r11d
jle 0x30b207
movq 0xb8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x28(%rsp)
movq 0x60(%rsp), %rbx
imulq 0xc0(%rsp), %rbx
addq 0x58(%rsp), %rbx
movq 0x1a0(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x20(%rsp)
xorl %ebp, %ebp
xorps %xmm4, %xmm4
testb %dil, %dil
jne 0x30b059
movq 0x10(%rsp), %rax
movq 0x28(%rsp), %rcx
movss (%rcx,%rax,4), %xmm4
testl %r8d, %r8d
jle 0x30b086
movq %rbp, %rax
imulq %r10, %rax
leaq (%rbx,%rax,4), %rax
xorl %ecx, %ecx
movslq (%r14,%rcx,4), %rdx
movss (%r12,%rcx,4), %xmm0
mulss (%rax,%rdx,4), %xmm0
addss %xmm0, %xmm4
incq %rcx
cmpq %rcx, %r13
jne 0x30b06b
cmpl $0x5, %esi
ja 0x30b0a3
leaq 0x114b26(%rip), %rcx # 0x41fbb8
movslq (%rcx,%rsi,4), %rax
addq %rcx, %rax
jmpq *%rax
maxss 0x10bde5(%rip), %xmm4 # 0x416e88
movaps %xmm4, %xmm0
jmp 0x30b1f5
movaps %xmm4, %xmm0
movaps %xmm4, 0xd0(%rsp)
callq 0x563e0
addss 0x109c55(%rip), %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x38(%rsp), %r10
movq 0x68(%rsp), %r11
movq 0x40(%rsp), %rsi
movq 0x18(%rsp), %r9
movl 0xc(%rsp), %r8d
movb 0xb(%rsp), %dil
mulss 0xd0(%rsp), %xmm0
jmp 0x30b1f5
movq 0x20(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x30b1f2
jmp 0x30b1f5
movss 0x10bf38(%rip), %xmm2 # 0x417058
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x1091c2(%rip), %xmm0 # 0x4142f0
cmpltss 0x10bf25(%rip), %xmm4 # 0x41705c
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x563e0
movq 0x38(%rsp), %r10
movq 0x68(%rsp), %r11
movq 0x40(%rsp), %rsi
movq 0x18(%rsp), %r9
movl 0xc(%rsp), %r8d
movb 0xb(%rsp), %dil
movaps %xmm0, %xmm1
movss 0x109ba4(%rip), %xmm0 # 0x414d18
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x30b1f5
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x109b87(%rip), %xmm2 # 0x414d18
andps %xmm2, %xmm1
movq 0x20(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x30b1f5
movq 0x20(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x10912f(%rip), %xmm3 # 0x4142f0
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x30b1f5
movss 0x109b43(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x30b0a3
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movss %xmm0, (%r15,%rbp,4)
incq %rbp
cmpq %r11, %rbp
jne 0x30b042
leaq (%r15,%r11,4), %r15
movq 0x60(%rsp), %rcx
incq %rcx
movq %rcx, 0x60(%rsp)
cmpq 0x78(%rsp), %rcx
jne 0x30b004
movq 0x10(%rsp), %rcx
incq %rcx
addq %r9, %r12
movq %rcx, 0x10(%rsp)
cmpq 0x50(%rsp), %rcx
jne 0x30afbf
jmp 0x30b6b1
movl %r15d, %eax
cltd
idivl %ebx
movl %eax, 0x20(%rsp)
testl %ebx, %ebx
movb 0xb(%rsp), %r9b
movl 0xc(%rsp), %r10d
movq 0x18(%rsp), %r11
jle 0x30b6b1
movl %edi, %eax
cltd
idivl %ebx
movq (%r13), %rdx
movq %rdx, 0x108(%rsp)
movq 0x40(%r13), %rdx
imulq 0x10(%r13), %rdx
movq %rdx, 0x100(%rsp)
movq 0x10(%rsp), %rdx
movq (%rdx), %rdx
movq %rdx, 0xf0(%rsp)
movl 0x20(%rsp), %edx
movl %edx, %edi
imull %r11d, %edi
movl 0x2c(%r13), %r8d
movl 0x30(%r13), %r15d
movq %r15, 0x70(%rsp)
movq %r8, 0x10(%rsp)
movslq %r8d, %r8
movq %r8, 0xa0(%rsp)
movslq %esi, %rsi
movq %rsi, 0xd0(%rsp)
movslq %ecx, %rcx
movq %rcx, 0x98(%rsp)
movslq %edi, %rcx
movq %rax, 0xb0(%rsp)
cltq
movl %ebx, %esi
movq %rsi, 0xf8(%rsp)
movl %edx, %r12d
movl %r11d, %ebp
movq %rax, 0xe8(%rsp)
imull %edx, %eax
imull %r11d, %eax
movl %eax, 0xac(%rsp)
shlq $0x2, %rcx
movq %rcx, 0x118(%rsp)
shlq $0x2, %r11
movq $0x0, 0x88(%rsp)
movq $0x0, 0x80(%rsp)
movq %r11, 0x18(%rsp)
cmpl $0x0, 0xb0(%rsp)
jle 0x30b679
movslq 0x88(%rsp), %rax
movq 0xf0(%rsp), %rcx
leaq (%rcx,%rax,4), %rax
movq %rax, 0x38(%rsp)
movq 0x80(%rsp), %rcx
movq %rcx, %rax
imulq 0xe8(%rsp), %rax
movq %rax, 0x110(%rsp)
movl 0x20(%rsp), %eax
movl %eax, %r13d
imull %ecx, %r13d
movq $0x0, 0x90(%rsp)
cmpl $0x0, 0x70(%rsp)
jle 0x30b646
movq 0x110(%rsp), %rax
movq 0x90(%rsp), %rcx
addq %rax, %rcx
movq 0x100(%rsp), %rdx
movq %rcx, 0x78(%rsp)
imulq %rcx, %rdx
addq 0x108(%rsp), %rdx
movq %rdx, 0x28(%rsp)
movq $0x0, 0x50(%rsp)
cmpl $0x0, 0x10(%rsp)
jle 0x30b618
movq 0xb8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x60(%rsp)
movq 0x48(%rsp), %rdx
movslq 0x2c(%rdx), %rax
movq 0x10(%rdx), %rcx
movq 0x40(%rdx), %rbx
imulq %rcx, %rbx
movq 0x50(%rsp), %rsi
imulq 0x98(%rsp), %rsi
imulq %rax, %rsi
imulq %rcx, %rsi
addq (%rdx), %rsi
movq %rsi, 0x68(%rsp)
movq 0x1a0(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x58(%rsp)
xorl %r15d, %r15d
xorps %xmm4, %xmm4
testb %r9b, %r9b
jne 0x30b44a
movq 0x78(%rsp), %rax
movq 0x60(%rsp), %rcx
movss (%rcx,%rax,4), %xmm4
cmpl $0x0, 0x20(%rsp)
jle 0x30b4a5
movq %r15, %rax
imulq 0xd0(%rsp), %rax
movq 0x68(%rsp), %rcx
leaq (%rcx,%rax,4), %rax
movq 0x38(%rsp), %rcx
xorl %edx, %edx
testl %r10d, %r10d
jle 0x30b49a
leaq (%rdx,%r13), %rsi
imulq %rbx, %rsi
addq %rax, %rsi
xorl %edi, %edi
movslq (%r14,%rdi,4), %r8
movss (%rcx,%rdi,4), %xmm0
mulss (%rsi,%r8,4), %xmm0
addss %xmm0, %xmm4
incq %rdi
cmpq %rdi, %rbp
jne 0x30b47f
incq %rdx
addq %r11, %rcx
cmpq %r12, %rdx
jne 0x30b46d
movl 0x198(%rsp), %eax
decl %eax
cmpl $0x5, %eax
ja 0x30b4cb
leaq 0x1146e6(%rip), %rcx # 0x41fba0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
maxss 0x10b9bd(%rip), %xmm4 # 0x416e88
movaps %xmm4, %xmm0
jmp 0x30b5ff
movaps %xmm4, %xmm0
movaps %xmm4, 0xc0(%rsp)
callq 0x563e0
addss 0x10982d(%rip), %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x18(%rsp), %r11
movl 0xc(%rsp), %r10d
movb 0xb(%rsp), %r9b
mulss 0xc0(%rsp), %xmm0
jmp 0x30b5ff
movq 0x58(%rsp), %rax
maxss (%rax), %xmm4
movss 0x4(%rax), %xmm1
ucomiss %xmm1, %xmm4
movaps %xmm4, %xmm0
ja 0x30b5fc
jmp 0x30b5ff
movss 0x10bb1f(%rip), %xmm2 # 0x417058
minss %xmm2, %xmm4
movaps %xmm4, %xmm0
xorps 0x108da9(%rip), %xmm0 # 0x4142f0
cmpltss 0x10bb0c(%rip), %xmm4 # 0x41705c
movaps %xmm4, %xmm1
andnps %xmm0, %xmm1
andps %xmm2, %xmm4
orps %xmm1, %xmm4
movaps %xmm4, %xmm0
callq 0x563e0
movq 0x18(%rsp), %r11
movl 0xc(%rsp), %r10d
movb 0xb(%rsp), %r9b
movaps %xmm0, %xmm1
movss 0x10979a(%rip), %xmm0 # 0x414d18
addss %xmm0, %xmm1
divss %xmm1, %xmm0
jmp 0x30b5ff
xorps %xmm0, %xmm0
cmpltss %xmm4, %xmm0
movaps %xmm0, %xmm1
movss 0x10977d(%rip), %xmm2 # 0x414d18
andps %xmm2, %xmm1
movq 0x58(%rsp), %rax
movss (%rax), %xmm2
andnps %xmm2, %xmm0
orps %xmm1, %xmm0
mulss %xmm4, %xmm0
jmp 0x30b5ff
movq 0x58(%rsp), %rax
movss (%rax), %xmm1
movss 0x4(%rax), %xmm2
movaps %xmm2, %xmm3
xorps 0x108d25(%rip), %xmm3 # 0x4142f0
divss %xmm1, %xmm3
xorps %xmm0, %xmm0
ucomiss %xmm3, %xmm4
jb 0x30b5ff
movss 0x109739(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
addss %xmm0, %xmm3
ucomiss %xmm3, %xmm4
ja 0x30b4cb
mulss %xmm4, %xmm1
addss %xmm2, %xmm1
mulss %xmm4, %xmm1
movaps %xmm1, %xmm0
movq 0x28(%rsp), %rax
movss %xmm0, (%rax,%r15,4)
incq %r15
cmpq 0x10(%rsp), %r15
jne 0x30b433
movq 0xa0(%rsp), %rax
movq 0x28(%rsp), %rcx
leaq (%rcx,%rax,4), %rcx
movq %rcx, 0x28(%rsp)
movq 0x50(%rsp), %rcx
incq %rcx
movq %rcx, 0x50(%rsp)
cmpq 0x70(%rsp), %rcx
jne 0x30b3d2
movq 0x90(%rsp), %rcx
incq %rcx
movq 0x38(%rsp), %rax
addq 0x118(%rsp), %rax
movq %rax, 0x38(%rsp)
movq %rcx, 0x90(%rsp)
cmpq 0xb0(%rsp), %rcx
jne 0x30b38d
movq 0x80(%rsp), %rcx
incq %rcx
movq 0x88(%rsp), %rax
addl 0xac(%rsp), %eax
movq %rax, 0x88(%rsp)
movq %rcx, 0x80(%rsp)
cmpq 0xf8(%rsp), %rcx
jne 0x30b333
testq %r14, %r14
je 0x30b6c9
movq 0x130(%rsp), %rsi
subq %r14, %rsi
movq %r14, %rdi
callq 0x56270
addq $0x138, %rsp # imm = 0x138
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/ysh329[P]ncnn/src/layer/convolutiondepthwise.cpp
|
virtual thunk to ncnn::ConvolutionDepthWise_x86_avx512::destroy_pipeline(ncnn::Option const&)
|
int ConvolutionDepthWise_x86_avx512::destroy_pipeline(const Option& opt)
{
if (activation)
{
activation->destroy_pipeline(opt);
delete activation;
activation = 0;
}
for (int i = 0; i < (int)group_ops.size(); i++)
{
group_ops[i]->destroy_pipeline(opt);
delete group_ops[i];
}
group_ops.clear();
return 0;
}
|
pushq %rax
movq (%rdi), %rax
addq -0x38(%rax), %rdi
callq 0x315d16
xorl %eax, %eax
popq %rcx
retq
nop
|
/ysh329[P]ncnn/build_O3/src/layer/x86/convolutiondepthwise_x86_avx512.cpp
|
ncnn::convdw5x5s1_pack8_avx(ncnn::Mat const&, ncnn::Mat&, ncnn::Mat const&, ncnn::Mat const&, ncnn::Option const&)
|
static void convdw5x5s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_setzero_ps();
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_load_ps(r0);
__m256 _r01 = _mm256_load_ps(r0 + 8);
__m256 _r02 = _mm256_load_ps(r0 + 16);
__m256 _r03 = _mm256_load_ps(r0 + 24);
__m256 _r04 = _mm256_load_ps(r0 + 32);
__m256 _k00 = _mm256_load_ps(k0);
__m256 _k01 = _mm256_load_ps(k0 + 8);
__m256 _k02 = _mm256_load_ps(k0 + 16);
__m256 _k03 = _mm256_load_ps(k0 + 24);
__m256 _k04 = _mm256_load_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0);
__m256 _r10 = _mm256_load_ps(r1);
__m256 _r11 = _mm256_load_ps(r1 + 8);
__m256 _r12 = _mm256_load_ps(r1 + 16);
__m256 _r13 = _mm256_load_ps(r1 + 24);
__m256 _r14 = _mm256_load_ps(r1 + 32);
__m256 _k10 = _mm256_load_ps(k0);
__m256 _k11 = _mm256_load_ps(k0 + 8);
__m256 _k12 = _mm256_load_ps(k0 + 16);
__m256 _k13 = _mm256_load_ps(k0 + 24);
__m256 _k14 = _mm256_load_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k14, _r14, _sum0);
__m256 _r20 = _mm256_load_ps(r2);
__m256 _r21 = _mm256_load_ps(r2 + 8);
__m256 _r22 = _mm256_load_ps(r2 + 16);
__m256 _r23 = _mm256_load_ps(r2 + 24);
__m256 _r24 = _mm256_load_ps(r2 + 32);
__m256 _k20 = _mm256_load_ps(k0);
__m256 _k21 = _mm256_load_ps(k0 + 8);
__m256 _k22 = _mm256_load_ps(k0 + 16);
__m256 _k23 = _mm256_load_ps(k0 + 24);
__m256 _k24 = _mm256_load_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k24, _r24, _sum0);
__m256 _r30 = _mm256_load_ps(r3);
__m256 _r31 = _mm256_load_ps(r3 + 8);
__m256 _r32 = _mm256_load_ps(r3 + 16);
__m256 _r33 = _mm256_load_ps(r3 + 24);
__m256 _r34 = _mm256_load_ps(r3 + 32);
__m256 _k30 = _mm256_load_ps(k0);
__m256 _k31 = _mm256_load_ps(k0 + 8);
__m256 _k32 = _mm256_load_ps(k0 + 16);
__m256 _k33 = _mm256_load_ps(k0 + 24);
__m256 _k34 = _mm256_load_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_comp_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k34, _r34, _sum0);
__m256 _r40 = _mm256_load_ps(r4);
__m256 _r41 = _mm256_load_ps(r4 + 8);
__m256 _r42 = _mm256_load_ps(r4 + 16);
__m256 _r43 = _mm256_load_ps(r4 + 24);
__m256 _r44 = _mm256_load_ps(r4 + 32);
__m256 _k40 = _mm256_load_ps(k0);
__m256 _k41 = _mm256_load_ps(k0 + 8);
__m256 _k42 = _mm256_load_ps(k0 + 16);
__m256 _k43 = _mm256_load_ps(k0 + 24);
__m256 _k44 = _mm256_load_ps(k0 + 32);
k0 -= 160;
_sum0 = _mm256_comp_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k44, _r44, _sum0);
_mm256_store_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
outptr0 += 8;
}
r0 += 4 * 8;
r1 += 4 * 8;
r2 += 4 * 8;
r3 += 4 * 8;
r4 += 4 * 8;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %rdx, -0x18(%rsp)
movq %rsi, -0x28(%rsp)
movq %rdi, -0x20(%rsp)
movslq 0x38(%rdi), %rax
movq %rax, -0x8(%rsp)
testq %rax, %rax
jle 0x31bfa1
movq -0x28(%rsp), %rax
movl 0x2c(%rax), %esi
movl 0x30(%rax), %r9d
xorl %edi, %edi
movl $0x80, %r11d
movq %rcx, -0x10(%rsp)
testq %rcx, %rcx
je 0x31bd29
movq %rdi, %rax
shlq $0x5, %rax
vmovups (%rcx,%rax), %ymm0
jmp 0x31bd2d
vxorps %xmm0, %xmm0, %xmm0
testl %r9d, %r9d
jle 0x31bf8e
movq -0x28(%rsp), %rax
movq 0x40(%rax), %rbx
imulq %rdi, %rbx
imulq 0x10(%rax), %rbx
addq (%rax), %rbx
movq -0x18(%rsp), %rax
movslq 0x2c(%rax), %r14
imulq %rdi, %r14
imulq 0x10(%rax), %r14
movq (%rax), %r15
movq -0x20(%rsp), %rcx
movq 0x40(%rcx), %r12
imulq %rdi, %r12
movq 0x10(%rcx), %rax
imulq %rax, %r12
addq (%rcx), %r12
movslq 0x2c(%rcx), %r13
imulq %rax, %r13
leaq (%r12,%r13,4), %rbp
leaq (,%r13,2), %rax
addq %r13, %rax
addq %r12, %rax
leaq (%r12,%r13,2), %r10
addq %r12, %r13
xorl %ecx, %ecx
testl %esi, %esi
jle 0x31bf74
movl %esi, %r8d
xorl %edx, %edx
vmovaps (%r15,%r14), %ymm1
vmovaps 0x20(%r15,%r14), %ymm2
vmovaps 0x40(%r15,%r14), %ymm3
vmovaps 0x60(%r15,%r14), %ymm4
vmovaps 0x80(%r15,%r14), %ymm5
vfmadd132ps (%r12,%rdx), %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vfmadd231ps 0x20(%r12,%rdx), %ymm2, %ymm1 # ymm1 = (ymm2 * mem) + ymm1
vfmadd231ps 0x40(%r12,%rdx), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x60(%r12,%rdx), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x80(%r12,%rdx), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vmovaps 0xa0(%r15,%r14), %ymm2
vmovaps 0xc0(%r15,%r14), %ymm3
vmovaps 0xe0(%r15,%r14), %ymm4
vmovaps 0x100(%r15,%r14), %ymm5
vmovaps 0x120(%r15,%r14), %ymm6
vfmadd132ps (%r13,%rdx), %ymm1, %ymm2 # ymm2 = (ymm2 * mem) + ymm1
vfmadd231ps 0x20(%r13,%rdx), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
vfmadd231ps 0x40(%r13,%rdx), %ymm4, %ymm2 # ymm2 = (ymm4 * mem) + ymm2
vfmadd231ps 0x60(%r13,%rdx), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vfmadd231ps 0x80(%r13,%rdx), %ymm6, %ymm2 # ymm2 = (ymm6 * mem) + ymm2
vmovaps 0x140(%r15,%r14), %ymm1
vmovaps 0x160(%r15,%r14), %ymm3
vmovaps 0x180(%r15,%r14), %ymm4
vmovaps 0x1a0(%r15,%r14), %ymm5
vmovaps 0x1c0(%r15,%r14), %ymm6
vfmadd132ps (%r10,%rdx), %ymm2, %ymm1 # ymm1 = (ymm1 * mem) + ymm2
vfmadd231ps 0x20(%r10,%rdx), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x40(%r10,%rdx), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x60(%r10,%rdx), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vfmadd231ps 0x80(%r10,%rdx), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1
vmovaps 0x1e0(%r15,%r14), %ymm2
vmovaps 0x200(%r15,%r14), %ymm3
vmovaps 0x220(%r15,%r14), %ymm4
vmovaps 0x240(%r15,%r14), %ymm5
vmovaps 0x260(%r15,%r14), %ymm6
vfmadd132ps (%rax,%rdx), %ymm1, %ymm2 # ymm2 = (ymm2 * mem) + ymm1
vfmadd231ps 0x20(%rax,%rdx), %ymm3, %ymm2 # ymm2 = (ymm3 * mem) + ymm2
vfmadd231ps 0x40(%rax,%rdx), %ymm4, %ymm2 # ymm2 = (ymm4 * mem) + ymm2
vfmadd231ps 0x60(%rax,%rdx), %ymm5, %ymm2 # ymm2 = (ymm5 * mem) + ymm2
vfmadd231ps 0x80(%rax,%rdx), %ymm6, %ymm2 # ymm2 = (ymm6 * mem) + ymm2
vmovaps 0x280(%r15,%r14), %ymm1
vmovaps 0x2a0(%r15,%r14), %ymm3
vmovaps 0x2c0(%r15,%r14), %ymm4
vmovaps 0x2e0(%r15,%r14), %ymm5
vmovaps 0x300(%r15,%r14), %ymm6
vfmadd132ps (%rbp,%rdx), %ymm2, %ymm1 # ymm1 = (ymm1 * mem) + ymm2
vfmadd231ps 0x20(%rbp,%rdx), %ymm3, %ymm1 # ymm1 = (ymm3 * mem) + ymm1
vfmadd231ps 0x40(%rbp,%rdx), %ymm4, %ymm1 # ymm1 = (ymm4 * mem) + ymm1
vfmadd231ps 0x60(%rbp,%rdx), %ymm5, %ymm1 # ymm1 = (ymm5 * mem) + ymm1
vfmadd231ps 0x80(%rbp,%rdx), %ymm6, %ymm1 # ymm1 = (ymm6 * mem) + ymm1
vmovaps %ymm1, (%rbx,%rdx)
addq $0x20, %rdx
decl %r8d
jne 0x31bda8
addq %rdx, %rbx
addq %rdx, %r12
addq %rdx, %r13
addq %rdx, %r10
addq %rdx, %rax
addq %rdx, %rbp
addq %r11, %r12
addq %r11, %r13
addq %r11, %r10
addq %r11, %rax
addq %r11, %rbp
incl %ecx
cmpl %r9d, %ecx
jne 0x31bd9b
incq %rdi
cmpq -0x8(%rsp), %rdi
movq -0x10(%rsp), %rcx
jne 0x31bd16
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/ysh329[P]ncnn/src/layer/x86/convolutiondepthwise_5x5_pack8.h
|
ncnn::InstanceNorm::load_model(ncnn::ModelBin const&)
|
int InstanceNorm::load_model(const ModelBin& mb)
{
if (affine == 0)
return 0;
gamma_data = mb.load(channels, 1);
if (gamma_data.empty())
return -100;
beta_data = mb.load(channels, 1);
if (beta_data.empty())
return -100;
return 0;
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x50, %rsp
xorl %ebx, %ebx
cmpl $0x0, 0xd8(%rdi)
je 0x369609
movq %rsi, %r15
movq %rdi, %r14
movl 0xd0(%rdi), %edx
movq (%rsi), %rax
leaq 0x8(%rsp), %r12
movq %r12, %rdi
movl $0x1, %ecx
callq *0x10(%rax)
leaq 0xe0(%r14), %r13
movq 0x8(%r12), %rax
cmpq %r12, %r13
je 0x36949e
testq %rax, %rax
je 0x369406
lock
incl (%rax)
movq 0xe8(%r14), %rax
testq %rax, %rax
je 0x36943f
lock
decl (%rax)
jne 0x36943f
movq 0xe0(%r14), %rsi
movq 0x100(%r14), %rdi
testq %rdi, %rdi
je 0x369432
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36943f
testq %rsi, %rsi
je 0x36943f
movq %rsi, %rdi
callq 0x563b0
movq 0x8(%rsp), %rax
movq %rax, 0xe0(%r14)
movq 0x10(%rsp), %rax
movq %rax, 0xe8(%r14)
movq 0x18(%rsp), %rcx
movq %rcx, 0xf0(%r14)
movl 0x20(%rsp), %ecx
movl %ecx, 0xf8(%r14)
movq 0x28(%rsp), %rcx
movq %rcx, 0x100(%r14)
movups 0x30(%rsp), %xmm0
movups %xmm0, 0x108(%r14)
movl 0x40(%rsp), %ecx
movl %ecx, 0x118(%r14)
movq 0x48(%rsp), %rcx
movq %rcx, 0x120(%r14)
testq %rax, %rax
je 0x3694cc
lock
decl (%rax)
jne 0x3694cc
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x3694bf
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3694cc
testq %rsi, %rsi
je 0x3694cc
movq %rsi, %rdi
callq 0x563b0
cmpq $0x0, (%r13)
je 0x369604
movslq 0x118(%r14), %rax
imulq 0x120(%r14), %rax
testq %rax, %rax
je 0x369604
movl 0xd0(%r14), %edx
movq (%r15), %rax
movq %r12, %rdi
movq %r15, %rsi
movl $0x1, %ecx
callq *0x10(%rax)
leaq 0x128(%r14), %r15
movq 0x10(%rsp), %rax
cmpq %r12, %r15
je 0x3695bc
testq %rax, %rax
je 0x369524
lock
incl (%rax)
movq 0x130(%r14), %rax
testq %rax, %rax
je 0x36955d
lock
decl (%rax)
jne 0x36955d
movq 0x128(%r14), %rsi
movq 0x148(%r14), %rdi
testq %rdi, %rdi
je 0x369550
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36955d
testq %rsi, %rsi
je 0x36955d
movq %rsi, %rdi
callq 0x563b0
movq 0x8(%rsp), %rax
movq %rax, 0x128(%r14)
movq 0x10(%rsp), %rax
movq %rax, 0x130(%r14)
movq 0x18(%rsp), %rcx
movq %rcx, 0x138(%r14)
movl 0x20(%rsp), %ecx
movl %ecx, 0x140(%r14)
movq 0x28(%rsp), %rcx
movq %rcx, 0x148(%r14)
movups 0x30(%rsp), %xmm0
movups %xmm0, 0x150(%r14)
movl 0x40(%rsp), %ecx
movl %ecx, 0x160(%r14)
movq 0x48(%rsp), %rcx
movq %rcx, 0x168(%r14)
testq %rax, %rax
je 0x3695ea
lock
decl (%rax)
jne 0x3695ea
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x3695dd
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x3695ea
testq %rsi, %rsi
je 0x3695ea
movq %rsi, %rdi
callq 0x563b0
cmpq $0x0, (%r15)
je 0x369604
movslq 0x160(%r14), %rax
imulq 0x168(%r14), %rax
testq %rax, %rax
jne 0x369609
movl $0xffffff9c, %ebx # imm = 0xFFFFFF9C
movl %ebx, %eax
addq $0x50, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x36967c
lock
decl (%rax)
jne 0x36967c
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
je 0x369667
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x36967c
jmp 0x369686
jmp 0x369686
movq %rax, %rbx
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x36967c
lock
decl (%rax)
jne 0x36967c
movq 0x8(%rsp), %rsi
movq 0x28(%rsp), %rdi
testq %rdi, %rdi
jne 0x369676
testq %rsi, %rsi
je 0x36967c
movq %rsi, %rdi
callq 0x563b0
jmp 0x36967c
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
jmp 0x369686
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/instancenorm.cpp
|
ncnn::InstanceNorm::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int InstanceNorm::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
// x = (x - mean) / (sqrt(var + eps)) * gamma + beta
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int c = bottom_top_blob.c;
int size = w * h;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < c; q++)
{
float* ptr = bottom_top_blob.channel(q);
// mean and var
float sum = 0.f;
float sqsum = 0.f;
for (int i = 0; i < size; i++)
{
sum += ptr[i];
//sqsum += ptr[i] * ptr[i];
}
float mean = sum / size;
float tmp = 0.f;
for (int i = 0; i < size; i++)
{
tmp = ptr[i] - mean;
sqsum += tmp * tmp;
}
float var = sqsum / size;
// the var maybe minus due to accuracy
//float var = sqsum / size - mean * mean;
float a;
float b;
if (affine)
{
float gamma = gamma_data[q];
float beta = beta_data[q];
a = static_cast<float>(gamma / (sqrt(var + eps)));
b = -mean * a + beta;
}
else
{
a = static_cast<float>(1.f / (sqrt(var + eps)));
b = -mean * a;
}
for (int i = 0; i < size; i++)
{
ptr[i] = ptr[i] * a + b;
}
}
return 0;
}
|
movslq 0x38(%rsi), %rax
testq %rax, %rax
jle 0x3697cd
pushq %rbx
movl 0x30(%rsi), %ecx
imull 0x2c(%rsi), %ecx
movq (%rsi), %rdx
cvtsi2ss %ecx, %xmm1
movq 0x40(%rsi), %r8
movl 0xd8(%rdi), %r9d
movq 0xe0(%rdi), %r10
movq 0x128(%rdi), %r11
movss 0xab64d(%rip), %xmm0 # 0x414d18
divss %xmm1, %xmm0
imulq 0x10(%rsi), %r8
xorl %esi, %esi
movss 0xb2ee6(%rip), %xmm1 # 0x41c5c4
movss 0xae07e(%rip), %xmm2 # 0x417764
movaps 0xaac03(%rip), %xmm3 # 0x4142f0
xorps %xmm4, %xmm4
testl %ecx, %ecx
jle 0x36972b
xorl %ebx, %ebx
addss (%rdx,%rbx,4), %xmm4
incq %rbx
cmpq %rbx, %rcx
jne 0x3696f6
mulss %xmm0, %xmm4
testl %ecx, %ecx
jle 0x36972b
xorps %xmm5, %xmm5
xorl %ebx, %ebx
movss (%rdx,%rbx,4), %xmm6
subss %xmm4, %xmm6
mulss %xmm6, %xmm6
addss %xmm6, %xmm5
incq %rbx
cmpq %rbx, %rcx
jne 0x369710
jmp 0x36972e
xorps %xmm5, %xmm5
mulss %xmm0, %xmm5
testl %r9d, %r9d
je 0x369770
movss (%r11,%rsi,4), %xmm7
addss 0xd4(%rdi), %xmm5
xorps %xmm6, %xmm6
rsqrtss %xmm5, %xmm6
mulss %xmm6, %xmm5
mulss %xmm6, %xmm5
addss %xmm1, %xmm5
mulss %xmm2, %xmm6
mulss %xmm5, %xmm6
mulss (%r10,%rsi,4), %xmm6
mulss %xmm6, %xmm4
subss %xmm4, %xmm7
jmp 0x36979d
addss 0xd4(%rdi), %xmm5
xorps %xmm6, %xmm6
rsqrtss %xmm5, %xmm6
mulss %xmm6, %xmm5
mulss %xmm6, %xmm5
addss %xmm1, %xmm5
mulss %xmm2, %xmm6
mulss %xmm5, %xmm6
xorps %xmm3, %xmm4
mulss %xmm6, %xmm4
movaps %xmm4, %xmm7
testl %ecx, %ecx
jle 0x3697bd
xorl %ebx, %ebx
movss (%rdx,%rbx,4), %xmm4
mulss %xmm6, %xmm4
addss %xmm7, %xmm4
movss %xmm4, (%rdx,%rbx,4)
incq %rbx
cmpq %rbx, %rcx
jne 0x3697a3
incq %rsi
addq %r8, %rdx
cmpq %rax, %rsi
jne 0x3696ed
popq %rbx
xorl %eax, %eax
retq
|
/ysh329[P]ncnn/src/layer/instancenorm.cpp
|
virtual thunk to ncnn::Clip_x86::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
|
int Clip_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
{
int w = bottom_top_blob.w;
int h = bottom_top_blob.h;
int d = bottom_top_blob.d;
int channels = bottom_top_blob.c;
int elempack = bottom_top_blob.elempack;
int size = w * h * d * elempack;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
float* ptr = bottom_top_blob.channel(q);
int i = 0;
#if __SSE2__
#if __AVX__
#if __AVX512F__
__m512 _min_avx512 = _mm512_set1_ps(min);
__m512 _max_avx512 = _mm512_set1_ps(max);
for (; i + 15 < size; i += 16)
{
__m512 _p = _mm512_loadu_ps(ptr);
_p = _mm512_max_ps(_p, _min_avx512);
_p = _mm512_min_ps(_p, _max_avx512);
_mm512_storeu_ps(ptr, _p);
ptr += 16;
}
#endif // __AVX512F__
__m256 _min_avx = _mm256_set1_ps(min);
__m256 _max_avx = _mm256_set1_ps(max);
for (; i + 7 < size; i += 8)
{
__m256 _p = _mm256_loadu_ps(ptr);
_p = _mm256_max_ps(_p, _min_avx);
_p = _mm256_min_ps(_p, _max_avx);
_mm256_storeu_ps(ptr, _p);
ptr += 8;
}
#endif // __AVX__
__m128 _min = _mm_set1_ps(min);
__m128 _max = _mm_set1_ps(max);
for (; i + 3 < size; i += 4)
{
__m128 _p = _mm_load_ps(ptr);
_p = _mm_max_ps(_p, _min);
_p = _mm_min_ps(_p, _max);
_mm_store_ps(ptr, _p);
ptr += 4;
}
#endif // __SSE2__
for (; i < size; i++)
{
if (*ptr < min)
*ptr = min;
if (*ptr > max)
*ptr = max;
ptr++;
}
}
return 0;
}
|
pushq %rax
movq (%rdi), %rax
addq -0x58(%rax), %rdi
callq 0x36991c
xorl %eax, %eax
popq %rcx
retq
nopl (%rax)
|
/ysh329[P]ncnn/src/layer/x86/clip_x86.cpp
|
ncnn::Convolution1D_x86_avx::forward(ncnn::Mat const&, ncnn::Mat&, ncnn::Option const&) const
|
int Convolution1D_x86_avx::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
#if __AVX512F__
if (elempack == 16)
{
Mat tmp;
convert_packing(bottom_blob, tmp, 8, opt);
Mat tmpout;
forward(tmp, tmpout, opt);
convert_packing(tmpout, top_blob, 16, opt);
return 0;
}
#endif // __AVX512F__
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
h = bottom_blob_bordered.h;
int out_elempack = 1;
#if __SSE2__
if (opt.use_packing_layout)
{
#if __AVX__
out_elempack = num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
#else
out_elempack = num_output % 4 == 0 ? 4 : 1;
#endif
}
#endif // __SSE2__
size_t out_elemsize = elemsize / elempack * out_elempack;
const int outw = (w - kernel_extent_w) / stride_w + 1;
const int outh = num_output / out_elempack;
top_blob.create(outw, outh, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
#if __SSE2__
#if __AVX__
if (elempack == 8 && out_elempack == 8)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + p * 8);
}
const float* kptr = weight_data_packed.channel(p);
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 8;
for (int k = 0; k < kernel_w; k++)
{
__m256 _val0 = _mm256_broadcast_ss(sptr);
__m256 _val1 = _mm256_broadcast_ss(sptr + 1);
__m256 _val2 = _mm256_broadcast_ss(sptr + 2);
__m256 _val3 = _mm256_broadcast_ss(sptr + 3);
__m256 _val4 = _mm256_broadcast_ss(sptr + 4);
__m256 _val5 = _mm256_broadcast_ss(sptr + 5);
__m256 _val6 = _mm256_broadcast_ss(sptr + 6);
__m256 _val7 = _mm256_broadcast_ss(sptr + 7);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_mm256_comp_fmadd_ps8(_sum,
_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7,
_w0, _w1, _w2, _w3, _w4, _w5, _w6, _w7);
sptr += dilation_w * 8;
kptr += 64;
}
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
if (elempack == 1 && out_elempack == 8)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps(((const float*)bias_data) + p * 8);
}
const float* kptr = weight_data_packed.channel(p);
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w;
for (int k = 0; k < kernel_w; k++)
{
__m256 _val = _mm256_set1_ps(sptr[0]);
__m256 _w = _mm256_loadu_ps(kptr);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
sptr += dilation_w;
kptr += 8;
}
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
if (elempack == 4 && out_elempack == 8)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_set1_ps(0.f);
if (bias_term)
{
_sum = _mm256_loadu_ps((const float*)bias_data + p * 8);
}
const float* kptr = weight_data_packed.channel(p);
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 4;
for (int k = 0; k < kernel_w; k++)
{
__m256 _val0 = _mm256_broadcast_ss(sptr);
__m256 _val1 = _mm256_broadcast_ss(sptr + 1);
__m256 _val2 = _mm256_broadcast_ss(sptr + 2);
__m256 _val3 = _mm256_broadcast_ss(sptr + 3);
__m256 _w0 = _mm256_loadu_ps(kptr);
_sum = _mm256_comp_fmadd_ps(_val0, _w0, _sum);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
_sum = _mm256_comp_fmadd_ps(_val1, _w1, _sum);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
_sum = _mm256_comp_fmadd_ps(_val2, _w2, _sum);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
_sum = _mm256_comp_fmadd_ps(_val3, _w3, _sum);
sptr += dilation_w * 4;
kptr += 32;
}
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
if (elempack == 8 && out_elempack == 1)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[p];
}
const float* kptr = weight_data_packed.channel(p);
__m256 _sum8 = _mm256_set1_ps(0);
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 8;
for (int k = 0; k < kernel_w; k++) // 29.23
{
__m256 _val = _mm256_loadu_ps(sptr);
__m256 _w = _mm256_loadu_ps(kptr);
__m256 _s8 = _mm256_mul_ps(_val, _w);
_sum8 = _mm256_add_ps(_sum8, _s8);
sptr += dilation_w * 8;
kptr += 8;
}
}
sum += _mm256_reduce_add_ps(_sum8); // dot
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
}
}
}
if (elempack == 8 && out_elempack == 4)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps((const float*)bias_data + p * 4);
}
const float* kptr = weight_data_packed.channel(p);
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 8;
for (int k = 0; k < kernel_w; k++)
{
__m128 _val0 = _mm_broadcast_ss(sptr);
__m128 _val1 = _mm_broadcast_ss(sptr + 1);
__m128 _val2 = _mm_broadcast_ss(sptr + 2);
__m128 _val3 = _mm_broadcast_ss(sptr + 3);
__m128 _val4 = _mm_broadcast_ss(sptr + 4);
__m128 _val5 = _mm_broadcast_ss(sptr + 5);
__m128 _val6 = _mm_broadcast_ss(sptr + 6);
__m128 _val7 = _mm_broadcast_ss(sptr + 7);
__m128 _w0 = _mm_loadu_ps(kptr);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
__m128 _w1 = _mm_loadu_ps(kptr + 4);
_sum = _mm_comp_fmadd_ps(_val1, _w1, _sum);
__m128 _w2 = _mm_loadu_ps(kptr + 8);
_sum = _mm_comp_fmadd_ps(_val2, _w2, _sum);
__m128 _w3 = _mm_loadu_ps(kptr + 12);
_sum = _mm_comp_fmadd_ps(_val3, _w3, _sum);
__m128 _w4 = _mm_loadu_ps(kptr + 16);
_sum = _mm_comp_fmadd_ps(_val4, _w4, _sum);
__m128 _w5 = _mm_loadu_ps(kptr + 20);
_sum = _mm_comp_fmadd_ps(_val5, _w5, _sum);
__m128 _w6 = _mm_loadu_ps(kptr + 24);
_sum = _mm_comp_fmadd_ps(_val6, _w6, _sum);
__m128 _w7 = _mm_loadu_ps(kptr + 28);
_sum = _mm_comp_fmadd_ps(_val7, _w7, _sum);
sptr += dilation_w * 8;
kptr += 32;
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
#endif
if (elempack == 4 && out_elempack == 4)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps((const float*)bias_data + p * 4);
}
const float* kptr = weight_data_packed.channel(p);
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 4;
for (int k = 0; k < kernel_w; k++)
{
__m128 _val0 = _mm_set1_ps(sptr[0]);
__m128 _val1 = _mm_set1_ps(sptr[1]);
__m128 _val2 = _mm_set1_ps(sptr[2]);
__m128 _val3 = _mm_set1_ps(sptr[3]);
__m128 _w0 = _mm_loadu_ps(kptr);
_sum = _mm_add_ps(_mm_mul_ps(_val0, _w0), _sum);
__m128 _w1 = _mm_loadu_ps(kptr + 4);
_sum = _mm_add_ps(_mm_mul_ps(_val1, _w1), _sum);
__m128 _w2 = _mm_loadu_ps(kptr + 8);
_sum = _mm_add_ps(_mm_mul_ps(_val2, _w2), _sum);
__m128 _w3 = _mm_loadu_ps(kptr + 12);
_sum = _mm_add_ps(_mm_mul_ps(_val3, _w3), _sum);
sptr += dilation_w * 4;
kptr += 16;
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
if (elempack == 1 && out_elempack == 4)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_set1_ps(0.f);
if (bias_term)
{
_sum = _mm_loadu_ps((const float*)bias_data + p * 4);
}
const float* kptr = weight_data_packed.channel(p);
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w;
for (int k = 0; k < kernel_w; k++)
{
__m128 _val = _mm_set1_ps(sptr[0]);
__m128 _w = _mm_loadu_ps(kptr);
_sum = _mm_add_ps(_mm_mul_ps(_val, _w), _sum);
sptr += dilation_w;
kptr += 4;
}
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
if (elempack == 4 && out_elempack == 1)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[p];
}
const float* kptr = weight_data_packed.channel(p);
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w * 4;
for (int k = 0; k < kernel_w; k++)
{
__m128 _val = _mm_loadu_ps(sptr);
__m128 _w = _mm_loadu_ps(kptr);
__m128 _s4 = _mm_mul_ps(_val, _w);
sum += _mm_reduce_add_ps(_s4); // dot
sptr += dilation_w * 4;
kptr += 4;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
}
}
}
#endif // __SSE2__
if (elempack == 1 && out_elempack == 1)
{
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outh; p++)
{
float* outptr = top_blob.row(p);
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_term)
{
sum = bias_data[p];
}
const float* kptr = (const float*)weight_data + kernel_w * h * p;
for (int q = 0; q < h; q++)
{
const float* sptr = bottom_blob_bordered.row(q) + j * stride_w;
for (int k = 0; k < kernel_w; k++)
{
float val = sptr[0];
float wt = kptr[0];
sum += val * wt;
sptr += dilation_w;
kptr += 1;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
}
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xe8, %rsp
movq %rcx, %r12
movq %rdx, %r13
movq %rdi, %rbx
movq 0x10(%rsi), %r14
movslq 0x18(%rsi), %rax
movq %rax, 0x58(%rsp)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movl 0xd4(%rdi,%rax), %r15d
decl %r15d
imull 0xd8(%rdi,%rax), %r15d
addq %rax, %rdi
leaq 0x70(%rsp), %rdx
movq $0x0, 0x40(%rdx)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, (%rdx)
vmovups %xmm0, 0xc(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovups %xmm0, 0x2c(%rdx)
callq 0x3ef6a2
movl $0xffffff9c, %ebp # imm = 0xFFFFFF9C
cmpq $0x0, 0x70(%rsp)
je 0x3fc2fc
movslq 0xa8(%rsp), %rax
imulq 0xb0(%rsp), %rax
testq %rax, %rax
je 0x3fc2fc
movq %r13, 0x48(%rsp)
notl %r15d
movl 0x9c(%rsp), %esi
movq (%rbx), %rax
movq -0x18(%rax), %r9
movl 0xd0(%rbx,%r9), %r10d
movl $0x1, %r8d
cmpb $0x1, 0x27(%r12)
jne 0x3f937a
xorl %eax, %eax
testb $0x3, %r10b
sete %al
testb $0x7, %r10b
leal 0x1(%rax,%rax,2), %eax
movl $0x8, %r8d
cmovnel %eax, %r8d
movl 0xa0(%rsp), %r13d
movq %r14, %rax
xorl %edx, %edx
divq 0x58(%rsp)
movq %rax, %rdi
movl %r8d, %ecx
addl %r15d, %esi
movl %esi, %eax
cltd
idivl 0xdc(%rbx,%r9)
imulq %rdi, %rcx
movq %rax, 0x38(%rsp)
leal 0x1(%rax), %esi
movl %r10d, %eax
cltd
idivl %r8d
movq 0x8(%r12), %r9
movq 0x48(%rsp), %r14
movq %r14, %rdi
movl %esi, 0x60(%rsp)
movl %eax, 0x14(%rsp)
movl %eax, %edx
movl %r8d, 0x64(%rsp)
callq 0x5a14a
cmpq $0x0, (%r14)
je 0x3fc2fc
movslq 0x38(%r14), %rax
imulq 0x40(%r14), %rax
testq %rax, %rax
je 0x3fc2fc
cmpl $0x8, 0x58(%rsp)
setne %al
cmpl $0x8, 0x64(%rsp)
sete %cl
cmpl $0x0, 0x14(%rsp)
setg %dl
andb %cl, %dl
xorb $0x1, %dl
movb %dl, 0x50(%rsp)
orb %dl, %al
jne 0x3f9adf
movq (%rbx), %rax
movq %rax, 0x30(%rsp)
movl 0x14(%rsp), %eax
movq %rax, 0x40(%rsp)
movq $0x0, 0x8(%rsp)
vxorps %xmm11, %xmm11, %xmm11
vbroadcastss 0x1b24f(%rip), %ymm6 # 0x414690
vbroadcastss 0x1b8ce(%rip), %ymm1 # 0x414d18
vbroadcastss 0x1dc19(%rip), %ymm12 # 0x41706c
vbroadcastss 0x1dc14(%rip), %ymm13 # 0x417070
cmpl $0x0, 0x38(%rsp)
js 0x3f9ac7
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %r8
movq 0x8(%rsp), %rcx
imulq %rcx, %r8
imulq 0x10(%rax), %r8
addq (%rax), %r8
movq (%rbx), %rax
movq %rax, 0x18(%rsp)
shlq $0x5, %rcx
movq %rcx, 0x28(%rsp)
xorl %r11d, %r11d
xorl %r12d, %r12d
movq 0x30(%rsp), %rax
movq -0x18(%rax), %r14
cmpl $0x0, 0xec(%rbx,%r14)
je 0x3f94c0
movq 0x190(%rbx,%r14), %rax
movq 0x28(%rsp), %rcx
vmovups (%rax,%rcx), %ymm14
jmp 0x3f94c5
vxorps %xmm14, %xmm14, %xmm14
testl %r13d, %r13d
jle 0x3f95e9
movq 0x48(%rbx), %r15
imulq 0x8(%rsp), %r15
imulq 0x18(%rbx), %r15
addq 0x8(%rbx), %r15
movq 0x70(%rsp), %rdx
movslq 0x9c(%rsp), %rbp
movq 0x18(%rsp), %rax
movq -0x18(%rax), %rax
movl 0xd4(%rbx,%rax), %ecx
movl 0xdc(%rbx,%rax), %esi
imull %r11d, %esi
movslq %esi, %rsi
leaq (%rdx,%rsi,4), %rsi
addq $0x1c, %rsi
imulq 0x80(%rsp), %rbp
xorl %r10d, %r10d
testl %ecx, %ecx
jle 0x3f95da
movl 0xd8(%rbx,%rax), %edx
shll $0x3, %edx
movslq %edx, %rdi
shlq $0x2, %rdi
movl %ecx, %edx
movq %rsi, %r9
vbroadcastss -0x1c(%r9), %ymm2
vbroadcastss -0x18(%r9), %ymm3
vbroadcastss -0x14(%r9), %ymm4
vbroadcastss -0x10(%r9), %ymm7
vbroadcastss -0xc(%r9), %ymm8
vbroadcastss -0x8(%r9), %ymm15
vbroadcastss -0x4(%r9), %ymm9
vbroadcastss (%r9), %ymm10
vmulps (%r15), %ymm2, %ymm2
vaddps %ymm2, %ymm14, %ymm2
vmulps 0x20(%r15), %ymm3, %ymm3
vmulps 0x40(%r15), %ymm4, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x60(%r15), %ymm7, %ymm3
vmulps 0x80(%r15), %ymm8, %ymm4
vmulps 0xa0(%r15), %ymm15, %ymm7
vaddps %ymm4, %ymm3, %ymm3
vaddps %ymm7, %ymm3, %ymm3
vmulps 0xc0(%r15), %ymm9, %ymm4
vaddps %ymm3, %ymm2, %ymm2
vmulps 0xe0(%r15), %ymm10, %ymm3
vaddps %ymm3, %ymm4, %ymm3
vaddps %ymm3, %ymm2, %ymm14
addq $0x100, %r15 # imm = 0x100
addq %rdi, %r9
decl %edx
jne 0x3f953e
incq %r10
addq %rbp, %rsi
cmpq %r13, %r10
jne 0x3f9520
movl 0xf4(%rbx,%r14), %eax
decl %eax
cmpl $0x5, %eax
ja 0x3f9aa7
leaq 0x2a555(%rip), %rcx # 0x423b58
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
vmaxps %ymm11, %ymm14, %ymm14
jmp 0x3f9aa7
vbroadcastss 0x1da39(%rip), %ymm9 # 0x417058
vminps %ymm9, %ymm14, %ymm2
vbroadcastss 0x1da2f(%rip), %ymm10 # 0x41705c
vmaxps %ymm2, %ymm10, %ymm2
vbroadcastss 0x1da26(%rip), %ymm11 # 0x417060
vmulps %ymm2, %ymm11, %ymm7
vaddps %ymm6, %ymm7, %ymm7
vroundps $0x1, %ymm7, %ymm15
vcmpltps %ymm15, %ymm7, %ymm7
vandps %ymm1, %ymm7, %ymm7
vsubps %ymm7, %ymm15, %ymm7
vbroadcastss 0x1e101(%rip), %ymm9 # 0x417760
vmulps %ymm7, %ymm9, %ymm15
vsubps %ymm15, %ymm2, %ymm2
vmulps %ymm2, %ymm2, %ymm15
vmulps %ymm2, %ymm12, %ymm3
vaddps %ymm3, %ymm13, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vbroadcastss 0x1d9f3(%rip), %ymm0 # 0x417074
vaddps %ymm0, %ymm3, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vbroadcastss 0x1d9e6(%rip), %ymm5 # 0x417078
vaddps %ymm5, %ymm3, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vmovaps %ymm1, %ymm0
vbroadcastss 0x1d9d5(%rip), %ymm1 # 0x41707c
vaddps %ymm1, %ymm3, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vaddps %ymm6, %ymm3, %ymm3
vmulps %ymm3, %ymm15, %ymm3
vaddps %ymm0, %ymm2, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vcvttps2dq %ymm7, %ymm3
vpslld $0x17, %xmm3, %xmm7
vextractf128 $0x1, %ymm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vbroadcastss 0x1b63c(%rip), %xmm5 # 0x414d18
vpaddd %xmm5, %xmm3, %xmm3
vpaddd %xmm5, %xmm7, %xmm7
vinsertf128 $0x1, %xmm3, %ymm7, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vaddps %ymm0, %ymm2, %ymm7
vbroadcastss 0x1d985(%rip), %ymm2 # 0x417080
vmaxps %ymm2, %ymm7, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vpsrld $0x17, %xmm3, %xmm3
vbroadcastss 0x1d971(%rip), %ymm4 # 0x417084
vandps %ymm4, %ymm2, %ymm15
vorps %ymm6, %ymm15, %ymm15
vbroadcastss 0x1d968(%rip), %ymm4 # 0x41708c
vcmpleps %ymm15, %ymm4, %ymm4
vandnps %ymm15, %ymm4, %ymm8
vbroadcastss 0x1d958(%rip), %ymm10 # 0x417090
vaddps %ymm10, %ymm15, %ymm15
vaddps %ymm8, %ymm15, %ymm15
vextractf128 $0x1, %ymm4, %xmm8
vpsubd %xmm8, %xmm3, %xmm3
vpsrld $0x17, %xmm2, %xmm2
vbroadcastss 0x1dfe5(%rip), %xmm8 # 0x417740
vpaddd %xmm3, %xmm8, %xmm3
vpsubd %xmm4, %xmm2, %xmm2
vpaddd %xmm2, %xmm8, %xmm2
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vmulps %ymm15, %ymm15, %ymm3
vbroadcastss 0x1d919(%rip), %ymm4 # 0x417094
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d910(%rip), %ymm8 # 0x417098
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d903(%rip), %ymm8 # 0x41709c
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d8f6(%rip), %ymm8 # 0x4170a0
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d8e9(%rip), %ymm8 # 0x4170a4
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d8dc(%rip), %ymm8 # 0x4170a8
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d8cf(%rip), %ymm8 # 0x4170ac
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d8c2(%rip), %ymm8 # 0x4170b0
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d8b5(%rip), %ymm8 # 0x4170b4
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1df54(%rip), %ymm8 # 0x417764
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vcmpleps 0x1faff(%rip), %ymm7, %ymm4 # 0x419320
vcvtdq2ps %ymm2, %ymm2
vmulps %ymm2, %ymm9, %ymm2
vaddps %ymm2, %ymm15, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1f5aa(%rip), %ymm3 # 0x418de4
vmulps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1defd(%rip), %ymm3 # 0x417744
vblendvps %ymm4, %ymm3, %ymm2, %ymm2
vbroadcastss 0x1d802(%rip), %ymm3 # 0x417058
vminps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1d7f9(%rip), %ymm3 # 0x41705c
vmaxps %ymm3, %ymm2, %ymm2
vmulps %ymm2, %ymm11, %ymm3
vxorps %xmm11, %xmm11, %xmm11
vaddps %ymm6, %ymm3, %ymm3
vroundps $0x1, %ymm3, %ymm4
vcmpltps %ymm4, %ymm3, %ymm3
vandps %ymm0, %ymm3, %ymm3
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm3, %ymm9, %ymm4
vsubps %ymm4, %ymm2, %ymm2
vmulps %ymm2, %ymm2, %ymm4
vmulps %ymm2, %ymm12, %ymm7
vaddps %ymm7, %ymm13, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vbroadcastss 0x1d7cc(%rip), %ymm8 # 0x417074
vaddps %ymm7, %ymm8, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vbroadcastss 0x1d7bf(%rip), %ymm8 # 0x417078
vaddps %ymm7, %ymm8, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vaddps %ymm1, %ymm7, %ymm7
vmovaps %ymm0, %ymm1
vmulps %ymm2, %ymm7, %ymm7
vaddps %ymm6, %ymm7, %ymm7
vmulps %ymm7, %ymm4, %ymm4
vaddps %ymm0, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vcvttps2dq %ymm3, %ymm3
vpslld $0x17, %xmm3, %xmm4
vextractf128 $0x1, %ymm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm5, %xmm3, %xmm3
vpaddd %xmm5, %xmm4, %xmm4
vinsertf128 $0x1, %xmm3, %ymm4, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vaddps %ymm0, %ymm2, %ymm2
vrcpps %ymm2, %ymm3
vaddps %ymm3, %ymm3, %ymm4
vmulps %ymm4, %ymm2, %ymm2
vbroadcastss 0x1f4d0(%rip), %ymm0 # 0x418dec
vsubps %ymm2, %ymm0, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vaddps %ymm4, %ymm10, %ymm3
vaddps %ymm2, %ymm3, %ymm2
jmp 0x3f9aa3
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %ymm7
vbroadcastss 0x4(%rax), %ymm15
vmaxps %ymm7, %ymm14, %ymm7
vminps %ymm7, %ymm15, %ymm14
jmp 0x3f9aa7
vbroadcastss 0x1d6fa(%rip), %ymm2 # 0x417054
vxorps %ymm2, %ymm14, %ymm7
vbroadcastss 0x1d6f1(%rip), %ymm2 # 0x417058
vminps %ymm2, %ymm7, %ymm7
vbroadcastss 0x1d6e8(%rip), %ymm2 # 0x41705c
vmaxps %ymm2, %ymm7, %ymm7
vbroadcastss 0x1d6df(%rip), %ymm2 # 0x417060
vmulps %ymm2, %ymm7, %ymm14
vaddps %ymm6, %ymm14, %ymm14
vroundps $0x1, %ymm14, %ymm15
vcmpltps %ymm15, %ymm14, %ymm14
vandps %ymm1, %ymm14, %ymm14
vsubps %ymm14, %ymm15, %ymm14
vbroadcastss 0x1fbdd(%rip), %ymm2 # 0x419584
vmulps %ymm2, %ymm14, %ymm15
vaddps %ymm7, %ymm15, %ymm7
vmulps %ymm7, %ymm7, %ymm15
vbroadcastss 0x1d6b0(%rip), %ymm2 # 0x41706c
vmulps %ymm2, %ymm7, %ymm2
vbroadcastss 0x1d6a7(%rip), %ymm3 # 0x417070
vaddps %ymm3, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1d69a(%rip), %ymm0 # 0x417074
vaddps %ymm0, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1d68d(%rip), %ymm0 # 0x417078
vaddps %ymm0, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1d680(%rip), %ymm0 # 0x41707c
vaddps %ymm0, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vaddps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm15, %ymm2
vaddps %ymm1, %ymm7, %ymm7
vaddps %ymm2, %ymm7, %ymm2
vcvttps2dq %ymm14, %ymm7
vpslld $0x17, %xmm7, %xmm14
vextractf128 $0x1, %ymm7, %xmm7
vpslld $0x17, %xmm7, %xmm7
vbroadcastss 0x1b2e6(%rip), %xmm0 # 0x414d18
vpaddd %xmm0, %xmm7, %xmm7
vpaddd %xmm0, %xmm14, %xmm14
vinsertf128 $0x1, %xmm7, %ymm14, %ymm7
vmulps %ymm7, %ymm2, %ymm2
vaddps %ymm1, %ymm2, %ymm2
vrcpps %ymm2, %ymm7
vmulps %ymm7, %ymm2, %ymm2
vsubps %ymm2, %ymm1, %ymm2
vmulps %ymm2, %ymm7, %ymm2
vaddps %ymm2, %ymm7, %ymm14
jmp 0x3f9aa7
movq 0xf8(%rbx,%r14), %rax
vmaxps %ymm11, %ymm14, %ymm7
vminps %ymm11, %ymm14, %ymm14
vbroadcastss (%rax), %ymm15
vmulps %ymm14, %ymm15, %ymm14
vaddps %ymm7, %ymm14, %ymm14
jmp 0x3f9aa7
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %ymm2
vbroadcastss 0x4(%rax), %ymm3
vmulps %ymm2, %ymm14, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm11, %ymm2
vminps %ymm1, %ymm2, %ymm2
vmulps %ymm2, %ymm14, %ymm14
vmovups %ymm14, (%r8)
addq $0x20, %r8
leal 0x1(%r12), %eax
addl $0x8, %r11d
cmpl 0x38(%rsp), %r12d
movl %eax, %r12d
jne 0x3f9498
movq 0x8(%rsp), %rcx
incq %rcx
movq %rcx, 0x8(%rsp)
cmpq 0x40(%rsp), %rcx
jne 0x3f945c
cmpl $0x1, 0x58(%rsp)
setne %al
orb 0x50(%rsp), %al
jne 0x3fa137
movq (%rbx), %rax
movq %rax, 0x30(%rsp)
movl 0x14(%rsp), %eax
movq %rax, 0x28(%rsp)
xorl %esi, %esi
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x1ab7f(%rip), %ymm12 # 0x414690
vbroadcastss 0x1b1fe(%rip), %ymm6 # 0x414d18
vbroadcastss 0x1d535(%rip), %ymm9 # 0x417058
vbroadcastss 0x1d530(%rip), %ymm10 # 0x41705c
vbroadcastss 0x1d52b(%rip), %ymm11 # 0x417060
cmpl $0x0, 0x38(%rsp)
js 0x3fa129
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %r8
imulq %rsi, %r8
imulq 0x10(%rax), %r8
addq (%rax), %r8
movq (%rbx), %rax
movq %rax, 0x8(%rsp)
movq %rsi, %rax
shlq $0x5, %rax
movq %rax, 0x18(%rsp)
xorl %r11d, %r11d
movq 0x30(%rsp), %rax
movq -0x18(%rax), %r14
cmpl $0x0, 0xec(%rbx,%r14)
je 0x3f9b94
movq 0x190(%rbx,%r14), %rax
movq 0x18(%rsp), %rcx
vmovups (%rax,%rcx), %ymm14
jmp 0x3f9b99
vxorps %xmm14, %xmm14, %xmm14
testl %r13d, %r13d
jle 0x3f9c29
movq 0x48(%rbx), %rax
imulq %rsi, %rax
imulq 0x18(%rbx), %rax
addq 0x8(%rbx), %rax
movslq 0x9c(%rsp), %r15
movq 0x8(%rsp), %rcx
movq -0x18(%rcx), %r12
movl 0xdc(%rbx,%r12), %ecx
imull %r11d, %ecx
movslq %ecx, %rcx
shlq $0x2, %rcx
addq 0x70(%rsp), %rcx
movl 0xd4(%rbx,%r12), %edi
imulq 0x80(%rsp), %r15
xorl %r9d, %r9d
testl %edi, %edi
jle 0x3f9c1e
movslq 0xd8(%rbx,%r12), %r10
shlq $0x2, %r10
movl %edi, %edx
movq %rcx, %rbp
vbroadcastss (%rbp), %ymm2
vmulps (%rax), %ymm2, %ymm2
vaddps %ymm2, %ymm14, %ymm14
addq $0x20, %rax
addq %r10, %rbp
decl %edx
jne 0x3f9c05
incq %r9
addq %r15, %rcx
cmpq %r13, %r9
jne 0x3f9bf0
movl 0xf4(%rbx,%r14), %eax
decl %eax
cmpl $0x5, %eax
ja 0x3fa10e
leaq 0x29f2d(%rip), %rcx # 0x423b70
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
vmaxps %ymm0, %ymm14, %ymm14
jmp 0x3fa10e
vminps %ymm9, %ymm14, %ymm2
vmaxps %ymm2, %ymm10, %ymm2
vmulps %ymm2, %ymm11, %ymm7
vmovaps %ymm12, %ymm4
vaddps %ymm7, %ymm12, %ymm7
vroundps $0x1, %ymm7, %ymm15
vcmpltps %ymm15, %ymm7, %ymm7
vandps %ymm6, %ymm7, %ymm7
vsubps %ymm7, %ymm15, %ymm7
vbroadcastss 0x1dad9(%rip), %ymm0 # 0x417760
vmulps %ymm0, %ymm7, %ymm15
vsubps %ymm15, %ymm2, %ymm2
vmulps %ymm2, %ymm2, %ymm15
vbroadcastss 0x1d3cf(%rip), %ymm12 # 0x41706c
vmulps %ymm2, %ymm12, %ymm3
vbroadcastss 0x1d3c6(%rip), %ymm13 # 0x417070
vaddps %ymm3, %ymm13, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vbroadcastss 0x1d3b9(%rip), %ymm8 # 0x417074
vaddps %ymm3, %ymm8, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vbroadcastss 0x1d3ac(%rip), %ymm1 # 0x417078
vaddps %ymm1, %ymm3, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vmovaps %ymm6, %ymm1
vmovaps %ymm4, %ymm6
vbroadcastss 0x1d397(%rip), %ymm5 # 0x41707c
vaddps %ymm5, %ymm3, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vmulps %ymm3, %ymm15, %ymm3
vaddps %ymm1, %ymm2, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vcvttps2dq %ymm7, %ymm3
vpslld $0x17, %xmm3, %xmm7
vextractf128 $0x1, %ymm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vbroadcastss 0x1affe(%rip), %xmm5 # 0x414d18
vpaddd %xmm5, %xmm3, %xmm3
vpaddd %xmm5, %xmm7, %xmm7
vinsertf128 $0x1, %xmm3, %ymm7, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vaddps %ymm1, %ymm2, %ymm7
vbroadcastss 0x1d347(%rip), %ymm0 # 0x417080
vmaxps %ymm0, %ymm7, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vpsrld $0x17, %xmm3, %xmm3
vbroadcastss 0x1d333(%rip), %ymm0 # 0x417084
vandps %ymm0, %ymm2, %ymm15
vorps %ymm4, %ymm15, %ymm15
vbroadcastss 0x1d32a(%rip), %ymm0 # 0x41708c
vcmpleps %ymm15, %ymm0, %ymm4
vmovaps %ymm8, %ymm0
vandnps %ymm15, %ymm4, %ymm8
vbroadcastss 0x1d316(%rip), %ymm5 # 0x417090
vaddps %ymm5, %ymm15, %ymm15
vaddps %ymm8, %ymm15, %ymm15
vextractf128 $0x1, %ymm4, %xmm8
vpsubd %xmm8, %xmm3, %xmm3
vmovaps %ymm12, %ymm8
vmovaps %ymm13, %ymm12
vmovaps %ymm0, %ymm13
vxorps %xmm5, %xmm5, %xmm5
vpsrld $0x17, %xmm2, %xmm2
vbroadcastss 0x1d992(%rip), %xmm0 # 0x417740
vpaddd %xmm0, %xmm3, %xmm3
vpsubd %xmm4, %xmm2, %xmm2
vpaddd %xmm0, %xmm2, %xmm2
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vmulps %ymm15, %ymm15, %ymm3
vbroadcastss 0x1d2c6(%rip), %ymm0 # 0x417094
vmulps %ymm0, %ymm15, %ymm4
vbroadcastss 0x1d2bd(%rip), %ymm0 # 0x417098
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d2b0(%rip), %ymm0 # 0x41709c
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d2a3(%rip), %ymm0 # 0x4170a0
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d296(%rip), %ymm0 # 0x4170a4
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d289(%rip), %ymm0 # 0x4170a8
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d27c(%rip), %ymm0 # 0x4170ac
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d26f(%rip), %ymm0 # 0x4170b0
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d262(%rip), %ymm0 # 0x4170b4
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d901(%rip), %ymm0 # 0x417764
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vxorps %xmm0, %xmm0, %xmm0
vcmpleps %ymm5, %ymm7, %ymm4
vcvtdq2ps %ymm2, %ymm2
vbroadcastss 0x1d8df(%rip), %ymm5 # 0x417760
vmulps %ymm5, %ymm2, %ymm2
vaddps %ymm2, %ymm15, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1ef4e(%rip), %ymm3 # 0x418de4
vmulps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1d8a1(%rip), %ymm3 # 0x417744
vblendvps %ymm4, %ymm3, %ymm2, %ymm2
vminps %ymm2, %ymm9, %ymm2
vmaxps %ymm2, %ymm10, %ymm2
vmulps %ymm2, %ymm11, %ymm3
vaddps %ymm6, %ymm3, %ymm3
vroundps $0x1, %ymm3, %ymm4
vcmpltps %ymm4, %ymm3, %ymm3
vandps %ymm1, %ymm3, %ymm3
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm5, %ymm3, %ymm4
vsubps %ymm4, %ymm2, %ymm2
vmulps %ymm2, %ymm2, %ymm4
vmulps %ymm2, %ymm8, %ymm7
vaddps %ymm7, %ymm12, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vaddps %ymm7, %ymm13, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vbroadcastss 0x1d183(%rip), %ymm5 # 0x417078
vaddps %ymm5, %ymm7, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vbroadcastss 0x1d176(%rip), %ymm5 # 0x41707c
vaddps %ymm5, %ymm7, %ymm7
vmovaps %ymm6, %ymm8
vmovaps %ymm1, %ymm6
vmulps %ymm2, %ymm7, %ymm7
vmovaps %ymm8, %ymm12
vaddps %ymm7, %ymm8, %ymm7
vmulps %ymm7, %ymm4, %ymm4
vaddps %ymm1, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vcvttps2dq %ymm3, %ymm3
vpslld $0x17, %xmm3, %xmm4
vextractf128 $0x1, %ymm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vbroadcastss 0x1add0(%rip), %xmm1 # 0x414d18
vpaddd %xmm1, %xmm3, %xmm3
vpaddd %xmm1, %xmm4, %xmm4
vinsertf128 $0x1, %xmm3, %ymm4, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vaddps %ymm6, %ymm2, %ymm2
vrcpps %ymm2, %ymm3
vaddps %ymm3, %ymm3, %ymm4
vmulps %ymm4, %ymm2, %ymm2
vbroadcastss 0x1ee79(%rip), %ymm1 # 0x418dec
vsubps %ymm2, %ymm1, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vbroadcastss 0x1d10c(%rip), %ymm1 # 0x417090
vaddps %ymm1, %ymm4, %ymm3
vaddps %ymm2, %ymm3, %ymm2
jmp 0x3fa10a
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %ymm7
vbroadcastss 0x4(%rax), %ymm15
vmaxps %ymm7, %ymm14, %ymm7
vminps %ymm7, %ymm15, %ymm14
jmp 0x3fa10e
vbroadcastss 0x1d09a(%rip), %ymm2 # 0x417054
vxorps %ymm2, %ymm14, %ymm7
vbroadcastss 0x1d091(%rip), %ymm1 # 0x417058
vminps %ymm1, %ymm7, %ymm7
vbroadcastss 0x1d088(%rip), %ymm1 # 0x41705c
vmaxps %ymm1, %ymm7, %ymm7
vbroadcastss 0x1d07f(%rip), %ymm1 # 0x417060
vmulps %ymm1, %ymm7, %ymm14
vaddps %ymm12, %ymm14, %ymm14
vroundps $0x1, %ymm14, %ymm15
vcmpltps %ymm15, %ymm14, %ymm14
vandps %ymm6, %ymm14, %ymm14
vsubps %ymm14, %ymm15, %ymm14
vbroadcastss 0x1f57c(%rip), %ymm2 # 0x419584
vmulps %ymm2, %ymm14, %ymm15
vaddps %ymm7, %ymm15, %ymm7
vmulps %ymm7, %ymm7, %ymm15
vbroadcastss 0x1d04f(%rip), %ymm1 # 0x41706c
vmulps %ymm1, %ymm7, %ymm2
vbroadcastss 0x1d046(%rip), %ymm1 # 0x417070
vaddps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1d039(%rip), %ymm1 # 0x417074
vaddps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1d02c(%rip), %ymm1 # 0x417078
vaddps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1d01f(%rip), %ymm1 # 0x41707c
vaddps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm2, %ymm15, %ymm2
vaddps %ymm6, %ymm7, %ymm7
vaddps %ymm2, %ymm7, %ymm2
vcvttps2dq %ymm14, %ymm7
vpslld $0x17, %xmm7, %xmm14
vextractf128 $0x1, %ymm7, %xmm7
vpslld $0x17, %xmm7, %xmm7
vbroadcastss 0x1ac85(%rip), %xmm1 # 0x414d18
vpaddd %xmm1, %xmm7, %xmm7
vpaddd %xmm1, %xmm14, %xmm14
vinsertf128 $0x1, %xmm7, %ymm14, %ymm7
vmulps %ymm7, %ymm2, %ymm2
vaddps %ymm6, %ymm2, %ymm2
vrcpps %ymm2, %ymm7
vmulps %ymm7, %ymm2, %ymm2
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm7, %ymm2
vaddps %ymm2, %ymm7, %ymm14
jmp 0x3fa10e
movq 0xf8(%rbx,%r14), %rax
vxorps %xmm1, %xmm1, %xmm1
vmaxps %ymm1, %ymm14, %ymm7
vxorps %xmm0, %xmm0, %xmm0
vminps %ymm1, %ymm14, %ymm14
vbroadcastss (%rax), %ymm15
vmulps %ymm14, %ymm15, %ymm14
vaddps %ymm7, %ymm14, %ymm14
jmp 0x3fa10e
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %ymm2
vbroadcastss 0x4(%rax), %ymm3
vmulps %ymm2, %ymm14, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vmaxps %ymm0, %ymm2, %ymm2
vminps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm14, %ymm14
vmovups %ymm14, (%r8)
addq $0x20, %r8
leal 0x1(%r11), %eax
cmpl 0x38(%rsp), %r11d
movl %eax, %r11d
jne 0x3f9b6c
incq %rsi
cmpq 0x28(%rsp), %rsi
jne 0x3f9b35
cmpl $0x4, 0x58(%rsp)
setne %al
orb %al, 0x50(%rsp)
jne 0x3fa7e1
movq (%rbx), %rax
movq %rax, 0x30(%rsp)
movl 0x14(%rsp), %eax
movq %rax, 0x40(%rsp)
movq $0x0, 0x8(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x1a520(%rip), %ymm12 # 0x414690
vbroadcastss 0x1ab9f(%rip), %ymm6 # 0x414d18
vbroadcastss 0x1ced6(%rip), %ymm9 # 0x417058
vbroadcastss 0x1ced1(%rip), %ymm10 # 0x41705c
vbroadcastss 0x1cecc(%rip), %ymm11 # 0x417060
cmpl $0x0, 0x38(%rsp)
js 0x3fa7c9
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %rdi
movq 0x8(%rsp), %rcx
imulq %rcx, %rdi
imulq 0x10(%rax), %rdi
addq (%rax), %rdi
movq (%rbx), %rax
movq %rax, 0x18(%rsp)
shlq $0x5, %rcx
movq %rcx, 0x28(%rsp)
xorl %r10d, %r10d
xorl %r11d, %r11d
movq 0x30(%rsp), %rax
movq -0x18(%rax), %r14
cmpl $0x0, 0xec(%rbx,%r14)
je 0x3fa1f8
movq 0x190(%rbx,%r14), %rax
movq 0x28(%rsp), %rcx
vmovups (%rax,%rcx), %ymm14
jmp 0x3fa1fd
vxorps %xmm14, %xmm14, %xmm14
testl %r13d, %r13d
jle 0x3fa2c6
movq 0x48(%rbx), %r15
imulq 0x8(%rsp), %r15
imulq 0x18(%rbx), %r15
addq 0x8(%rbx), %r15
movq 0x70(%rsp), %rcx
movslq 0x9c(%rsp), %r12
movq 0x18(%rsp), %rax
movq -0x18(%rax), %rbp
movl 0xd4(%rbx,%rbp), %eax
movl 0xdc(%rbx,%rbp), %edx
imull %r10d, %edx
movslq %edx, %rdx
leaq (%rcx,%rdx,4), %rdx
addq $0xc, %rdx
imulq 0x80(%rsp), %r12
xorl %r9d, %r9d
testl %eax, %eax
jle 0x3fa2bb
movl 0xd8(%rbx,%rbp), %ecx
shll $0x2, %ecx
movslq %ecx, %rsi
shlq $0x2, %rsi
movl %eax, %ecx
movq %rdx, %r8
vbroadcastss -0xc(%r8), %ymm2
vbroadcastss -0x8(%r8), %ymm3
vbroadcastss -0x4(%r8), %ymm4
vbroadcastss (%r8), %ymm7
vmulps (%r15), %ymm2, %ymm2
vaddps %ymm2, %ymm14, %ymm2
vmulps 0x20(%r15), %ymm3, %ymm3
vmulps 0x40(%r15), %ymm4, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps 0x60(%r15), %ymm7, %ymm3
vaddps %ymm3, %ymm2, %ymm14
subq $-0x80, %r15
addq %rsi, %r8
decl %ecx
jne 0x3fa272
incq %r9
addq %r12, %rdx
cmpq %r13, %r9
jne 0x3fa258
movl 0xf4(%rbx,%r14), %eax
decl %eax
cmpl $0x5, %eax
ja 0x3fa7ab
leaq 0x298a8(%rip), %rcx # 0x423b88
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
vmaxps %ymm0, %ymm14, %ymm14
jmp 0x3fa7ab
vminps %ymm9, %ymm14, %ymm2
vmaxps %ymm2, %ymm10, %ymm2
vmulps %ymm2, %ymm11, %ymm7
vmovaps %ymm12, %ymm4
vaddps %ymm7, %ymm12, %ymm7
vroundps $0x1, %ymm7, %ymm15
vcmpltps %ymm15, %ymm7, %ymm7
vandps %ymm6, %ymm7, %ymm7
vsubps %ymm7, %ymm15, %ymm7
vbroadcastss 0x1d43c(%rip), %ymm0 # 0x417760
vmulps %ymm0, %ymm7, %ymm15
vsubps %ymm15, %ymm2, %ymm2
vmulps %ymm2, %ymm2, %ymm15
vbroadcastss 0x1cd32(%rip), %ymm12 # 0x41706c
vmulps %ymm2, %ymm12, %ymm3
vbroadcastss 0x1cd29(%rip), %ymm13 # 0x417070
vaddps %ymm3, %ymm13, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vbroadcastss 0x1cd1c(%rip), %ymm8 # 0x417074
vaddps %ymm3, %ymm8, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vbroadcastss 0x1cd0f(%rip), %ymm1 # 0x417078
vaddps %ymm1, %ymm3, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vmovaps %ymm6, %ymm1
vmovaps %ymm4, %ymm6
vbroadcastss 0x1ccfa(%rip), %ymm5 # 0x41707c
vaddps %ymm5, %ymm3, %ymm3
vmulps %ymm2, %ymm3, %ymm3
vaddps %ymm4, %ymm3, %ymm3
vmulps %ymm3, %ymm15, %ymm3
vaddps %ymm1, %ymm2, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vcvttps2dq %ymm7, %ymm3
vpslld $0x17, %xmm3, %xmm7
vextractf128 $0x1, %ymm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vbroadcastss 0x1a961(%rip), %xmm5 # 0x414d18
vpaddd %xmm5, %xmm3, %xmm3
vpaddd %xmm5, %xmm7, %xmm7
vinsertf128 $0x1, %xmm3, %ymm7, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vaddps %ymm1, %ymm2, %ymm7
vbroadcastss 0x1ccaa(%rip), %ymm0 # 0x417080
vmaxps %ymm0, %ymm7, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vpsrld $0x17, %xmm3, %xmm3
vbroadcastss 0x1cc96(%rip), %ymm0 # 0x417084
vandps %ymm0, %ymm2, %ymm15
vorps %ymm4, %ymm15, %ymm15
vbroadcastss 0x1cc8d(%rip), %ymm0 # 0x41708c
vcmpleps %ymm15, %ymm0, %ymm4
vmovaps %ymm8, %ymm0
vandnps %ymm15, %ymm4, %ymm8
vbroadcastss 0x1cc79(%rip), %ymm5 # 0x417090
vaddps %ymm5, %ymm15, %ymm15
vaddps %ymm8, %ymm15, %ymm15
vextractf128 $0x1, %ymm4, %xmm8
vpsubd %xmm8, %xmm3, %xmm3
vmovaps %ymm12, %ymm8
vmovaps %ymm13, %ymm12
vmovaps %ymm0, %ymm13
vxorps %xmm5, %xmm5, %xmm5
vpsrld $0x17, %xmm2, %xmm2
vbroadcastss 0x1d2f5(%rip), %xmm0 # 0x417740
vpaddd %xmm0, %xmm3, %xmm3
vpsubd %xmm4, %xmm2, %xmm2
vpaddd %xmm0, %xmm2, %xmm2
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vmulps %ymm15, %ymm15, %ymm3
vbroadcastss 0x1cc29(%rip), %ymm0 # 0x417094
vmulps %ymm0, %ymm15, %ymm4
vbroadcastss 0x1cc20(%rip), %ymm0 # 0x417098
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1cc13(%rip), %ymm0 # 0x41709c
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1cc06(%rip), %ymm0 # 0x4170a0
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1cbf9(%rip), %ymm0 # 0x4170a4
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1cbec(%rip), %ymm0 # 0x4170a8
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1cbdf(%rip), %ymm0 # 0x4170ac
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1cbd2(%rip), %ymm0 # 0x4170b0
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1cbc5(%rip), %ymm0 # 0x4170b4
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vbroadcastss 0x1d264(%rip), %ymm0 # 0x417764
vaddps %ymm0, %ymm4, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vxorps %xmm0, %xmm0, %xmm0
vcmpleps %ymm5, %ymm7, %ymm4
vcvtdq2ps %ymm2, %ymm2
vbroadcastss 0x1d242(%rip), %ymm5 # 0x417760
vmulps %ymm5, %ymm2, %ymm2
vaddps %ymm2, %ymm15, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1e8b1(%rip), %ymm3 # 0x418de4
vmulps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1d204(%rip), %ymm3 # 0x417744
vblendvps %ymm4, %ymm3, %ymm2, %ymm2
vminps %ymm2, %ymm9, %ymm2
vmaxps %ymm2, %ymm10, %ymm2
vmulps %ymm2, %ymm11, %ymm3
vaddps %ymm6, %ymm3, %ymm3
vroundps $0x1, %ymm3, %ymm4
vcmpltps %ymm4, %ymm3, %ymm3
vandps %ymm1, %ymm3, %ymm3
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm5, %ymm3, %ymm4
vsubps %ymm4, %ymm2, %ymm2
vmulps %ymm2, %ymm2, %ymm4
vmulps %ymm2, %ymm8, %ymm7
vaddps %ymm7, %ymm12, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vaddps %ymm7, %ymm13, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vbroadcastss 0x1cae6(%rip), %ymm5 # 0x417078
vaddps %ymm5, %ymm7, %ymm7
vmulps %ymm2, %ymm7, %ymm7
vbroadcastss 0x1cad9(%rip), %ymm5 # 0x41707c
vaddps %ymm5, %ymm7, %ymm7
vmovaps %ymm6, %ymm8
vmovaps %ymm1, %ymm6
vmulps %ymm2, %ymm7, %ymm7
vmovaps %ymm8, %ymm12
vaddps %ymm7, %ymm8, %ymm7
vmulps %ymm7, %ymm4, %ymm4
vaddps %ymm1, %ymm2, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vcvttps2dq %ymm3, %ymm3
vpslld $0x17, %xmm3, %xmm4
vextractf128 $0x1, %ymm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vbroadcastss 0x1a733(%rip), %xmm1 # 0x414d18
vpaddd %xmm1, %xmm3, %xmm3
vpaddd %xmm1, %xmm4, %xmm4
vinsertf128 $0x1, %xmm3, %ymm4, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vaddps %ymm6, %ymm2, %ymm2
vrcpps %ymm2, %ymm3
vaddps %ymm3, %ymm3, %ymm4
vmulps %ymm4, %ymm2, %ymm2
vbroadcastss 0x1e7dc(%rip), %ymm1 # 0x418dec
vsubps %ymm2, %ymm1, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vbroadcastss 0x1ca6f(%rip), %ymm1 # 0x417090
vaddps %ymm1, %ymm4, %ymm3
vaddps %ymm2, %ymm3, %ymm2
jmp 0x3fa7a7
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %ymm7
vbroadcastss 0x4(%rax), %ymm15
vmaxps %ymm7, %ymm14, %ymm7
vminps %ymm7, %ymm15, %ymm14
jmp 0x3fa7ab
vbroadcastss 0x1c9fd(%rip), %ymm2 # 0x417054
vxorps %ymm2, %ymm14, %ymm7
vbroadcastss 0x1c9f4(%rip), %ymm1 # 0x417058
vminps %ymm1, %ymm7, %ymm7
vbroadcastss 0x1c9eb(%rip), %ymm1 # 0x41705c
vmaxps %ymm1, %ymm7, %ymm7
vbroadcastss 0x1c9e2(%rip), %ymm1 # 0x417060
vmulps %ymm1, %ymm7, %ymm14
vaddps %ymm12, %ymm14, %ymm14
vroundps $0x1, %ymm14, %ymm15
vcmpltps %ymm15, %ymm14, %ymm14
vandps %ymm6, %ymm14, %ymm14
vsubps %ymm14, %ymm15, %ymm14
vbroadcastss 0x1eedf(%rip), %ymm2 # 0x419584
vmulps %ymm2, %ymm14, %ymm15
vaddps %ymm7, %ymm15, %ymm7
vmulps %ymm7, %ymm7, %ymm15
vbroadcastss 0x1c9b2(%rip), %ymm1 # 0x41706c
vmulps %ymm1, %ymm7, %ymm2
vbroadcastss 0x1c9a9(%rip), %ymm1 # 0x417070
vaddps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1c99c(%rip), %ymm1 # 0x417074
vaddps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1c98f(%rip), %ymm1 # 0x417078
vaddps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vbroadcastss 0x1c982(%rip), %ymm1 # 0x41707c
vaddps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm2, %ymm15, %ymm2
vaddps %ymm6, %ymm7, %ymm7
vaddps %ymm2, %ymm7, %ymm2
vcvttps2dq %ymm14, %ymm7
vpslld $0x17, %xmm7, %xmm14
vextractf128 $0x1, %ymm7, %xmm7
vpslld $0x17, %xmm7, %xmm7
vbroadcastss 0x1a5e8(%rip), %xmm1 # 0x414d18
vpaddd %xmm1, %xmm7, %xmm7
vpaddd %xmm1, %xmm14, %xmm14
vinsertf128 $0x1, %xmm7, %ymm14, %ymm7
vmulps %ymm7, %ymm2, %ymm2
vaddps %ymm6, %ymm2, %ymm2
vrcpps %ymm2, %ymm7
vmulps %ymm7, %ymm2, %ymm2
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm7, %ymm2
vaddps %ymm2, %ymm7, %ymm14
jmp 0x3fa7ab
movq 0xf8(%rbx,%r14), %rax
vxorps %xmm1, %xmm1, %xmm1
vmaxps %ymm1, %ymm14, %ymm7
vxorps %xmm0, %xmm0, %xmm0
vminps %ymm1, %ymm14, %ymm14
vbroadcastss (%rax), %ymm15
vmulps %ymm14, %ymm15, %ymm14
vaddps %ymm7, %ymm14, %ymm14
jmp 0x3fa7ab
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %ymm2
vbroadcastss 0x4(%rax), %ymm3
vmulps %ymm2, %ymm14, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vmaxps %ymm0, %ymm2, %ymm2
vminps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm14, %ymm14
vmovups %ymm14, (%rdi)
addq $0x20, %rdi
leal 0x1(%r11), %eax
addl $0x4, %r10d
cmpl 0x38(%rsp), %r11d
movl %eax, %r11d
jne 0x3fa1d0
movq 0x8(%rsp), %rcx
incq %rcx
movq %rcx, 0x8(%rsp)
cmpq 0x40(%rsp), %rcx
jne 0x3fa194
cmpl $0x0, 0x14(%rsp)
setg %al
cmpl $0x8, 0x58(%rsp)
setne %cl
cmpl $0x1, 0x64(%rsp)
sete %dl
andb %al, %dl
xorb $0x1, %dl
movb %dl, 0x27(%rsp)
orb %dl, %cl
jne 0x3fab12
movq 0x48(%rsp), %rax
movq (%rax), %rcx
movq %rcx, 0xd8(%rsp)
movslq 0x2c(%rax), %rcx
imulq 0x10(%rax), %rcx
movq %rcx, 0xd0(%rsp)
movq (%rbx), %rax
movq %rax, 0x30(%rsp)
movq 0x48(%rbx), %rax
imulq 0x18(%rbx), %rax
movq %rax, 0xc8(%rsp)
movq 0x8(%rbx), %rax
movq %rax, 0xc0(%rsp)
movl 0x14(%rsp), %eax
movq %rax, 0xe0(%rsp)
movl 0x60(%rsp), %eax
movq %rax, 0x8(%rsp)
movq $0x0, 0x18(%rsp)
cmpl $0x0, 0x38(%rsp)
js 0x3faaf7
movq 0xd0(%rsp), %r14
movq 0x18(%rsp), %rax
imulq %rax, %r14
addq 0xd8(%rsp), %r14
movq 0xc8(%rsp), %rcx
imulq %rax, %rcx
addq 0xc0(%rsp), %rcx
movq %rcx, 0x28(%rsp)
movslq 0x9c(%rsp), %r12
imulq 0x80(%rsp), %r12
movq 0x70(%rsp), %rax
movq %rax, 0x40(%rsp)
movq (%rbx), %rax
movq %rax, 0x50(%rsp)
xorl %ebp, %ebp
xorl %r15d, %r15d
movq 0x30(%rsp), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xec(%rbx,%rax)
je 0x3fa8fa
movq 0x190(%rbx,%rax), %rcx
movq 0x18(%rsp), %rdx
vmovss (%rcx,%rdx,4), %xmm0
jmp 0x3fa8fe
vxorps %xmm0, %xmm0, %xmm0
testl %r13d, %r13d
jle 0x3fa978
movq 0x50(%rsp), %rcx
movq -0x18(%rcx), %rcx
movl 0xd4(%rbx,%rcx), %edx
movl 0xdc(%rbx,%rcx), %esi
imull %ebp, %esi
movslq %esi, %rsi
movq 0x40(%rsp), %rdi
leaq (%rdi,%rsi,4), %rsi
vxorps %xmm1, %xmm1, %xmm1
xorl %edi, %edi
movq 0x28(%rsp), %r8
testl %edx, %edx
jle 0x3fa96b
movl 0xd8(%rbx,%rcx), %r9d
shll $0x3, %r9d
movslq %r9d, %r9
shlq $0x2, %r9
movq %rsi, %r10
movl %edx, %r11d
vmovups (%r8), %ymm2
vmulps (%r10), %ymm2, %ymm2
vaddps %ymm1, %ymm2, %ymm1
addq $0x20, %r8
addq %r9, %r10
decl %r11d
jne 0x3fa951
incq %rdi
addq %r12, %rsi
cmpq %r13, %rdi
jne 0x3fa934
jmp 0x3fa97c
vxorps %xmm1, %xmm1, %xmm1
vextractf128 $0x1, %ymm1, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vaddps %xmm1, %xmm2, %xmm1
vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3]
vaddss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm2, %xmm4
movl 0xf4(%rbx,%rax), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0x3faad0
leaq 0x291ec(%rip), %rdx # 0x423ba0
movslq (%rdx,%rcx,4), %rcx
addq %rdx, %rcx
jmpq *%rcx
vmaxss 0x1c4c3(%rip), %xmm4, %xmm0 # 0x416e88
jmp 0x3faad4
vmovaps %xmm4, %xmm0
vmovss %xmm4, 0x68(%rsp)
vzeroupper
callq 0x563e0
vaddss 0x1a334(%rip), %xmm0, %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
vmulss 0x68(%rsp), %xmm0, %xmm0
jmp 0x3faad4
movq 0xf8(%rbx,%rax), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
jbe 0x3faad4
vmovaps %xmm1, %xmm0
jmp 0x3faad4
vminss 0x1c633(%rip), %xmm4, %xmm0 # 0x417058
vbroadcastss 0x1c626(%rip), %xmm1 # 0x417054
vxorps %xmm1, %xmm0, %xmm1
vcmpltss 0x1c621(%rip), %xmm0, %xmm0 # 0x41705c
vbroadcastss 0x1c614(%rip), %xmm2 # 0x417058
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vzeroupper
callq 0x563e0
vmovss 0x1a2be(%rip), %xmm1 # 0x414d18
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x3faad4
movq 0xf8(%rbx,%rax), %rax
vxorps %xmm0, %xmm0, %xmm0
vcmpltss %xmm4, %xmm0, %xmm0
vmovss (%rax), %xmm1
vbroadcastss 0x1a296(%rip), %xmm2 # 0x414d18
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmulss %xmm4, %xmm0, %xmm0
jmp 0x3faad4
movq 0xf8(%rbx,%rax), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vbroadcastss 0x1c5ac(%rip), %xmm0 # 0x417054
vxorps %xmm0, %xmm2, %xmm0
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
jb 0x3faad4
vmovss 0x1a256(%rip), %xmm0 # 0x414d18
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x3faaed
vmovaps %xmm4, %xmm0
vmovss %xmm0, (%r14,%r15,4)
incq %r15
addl $0x8, %ebp
cmpq 0x8(%rsp), %r15
jne 0x3fa8d3
jmp 0x3faaf7
vmulss %xmm4, %xmm1, %xmm0
vaddss %xmm2, %xmm0, %xmm0
jmp 0x3faa88
movq 0x18(%rsp), %rcx
incq %rcx
movq %rcx, 0x18(%rsp)
cmpq 0xe0(%rsp), %rcx
jne 0x3fa86e
cmpl $0x0, 0x14(%rsp)
setg %al
cmpl $0x8, 0x58(%rsp)
setne %cl
cmpl $0x4, 0x64(%rsp)
sete %dl
andb %al, %dl
xorb $0x1, %dl
movb %dl, 0x50(%rsp)
orb %dl, %cl
jne 0x3fb181
movq (%rbx), %rax
movq %rax, 0x30(%rsp)
movl 0x14(%rsp), %eax
movq %rax, 0x40(%rsp)
movq $0x0, 0x8(%rsp)
vxorps %xmm10, %xmm10, %xmm10
vbroadcastss 0x19b2d(%rip), %xmm5 # 0x414690
vbroadcastss 0x1a1ac(%rip), %xmm6 # 0x414d18
vbroadcastss 0x1c4fb(%rip), %xmm11 # 0x417070
vbroadcastss 0x1c4f6(%rip), %xmm12 # 0x417074
vbroadcastss 0x1c4f1(%rip), %xmm13 # 0x417078
cmpl $0x0, 0x38(%rsp)
js 0x3fb169
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %r8
movq 0x8(%rsp), %rcx
imulq %rcx, %r8
imulq 0x10(%rax), %r8
addq (%rax), %r8
movq (%rbx), %rax
movq %rax, 0x18(%rsp)
shlq $0x4, %rcx
movq %rcx, 0x28(%rsp)
xorl %r11d, %r11d
xorl %r12d, %r12d
movq 0x30(%rsp), %rax
movq -0x18(%rax), %r14
cmpl $0x0, 0xec(%rbx,%r14)
je 0x3fabeb
movq 0x190(%rbx,%r14), %rax
movq 0x28(%rsp), %rcx
vmovups (%rax,%rcx), %xmm1
jmp 0x3fabef
vxorps %xmm1, %xmm1, %xmm1
testl %r13d, %r13d
jle 0x3fad04
movq 0x48(%rbx), %r15
imulq 0x8(%rsp), %r15
imulq 0x18(%rbx), %r15
addq 0x8(%rbx), %r15
movq 0x70(%rsp), %rcx
movslq 0x9c(%rsp), %rbp
movq 0x18(%rsp), %rax
movq -0x18(%rax), %rax
movl 0xd4(%rbx,%rax), %esi
movl 0xdc(%rbx,%rax), %edx
imull %r11d, %edx
movslq %edx, %rdx
leaq (%rcx,%rdx,4), %rcx
addq $0x1c, %rcx
imulq 0x80(%rsp), %rbp
xorl %r10d, %r10d
testl %esi, %esi
jle 0x3facf5
movl 0xd8(%rbx,%rax), %edx
shll $0x3, %edx
movslq %edx, %rdi
shlq $0x2, %rdi
movq %rcx, %r9
movl %esi, %edx
vbroadcastss -0x1c(%r9), %xmm2
vbroadcastss -0x18(%r9), %xmm3
vbroadcastss -0x14(%r9), %xmm7
vbroadcastss -0x10(%r9), %xmm14
vbroadcastss -0xc(%r9), %xmm15
vbroadcastss -0x8(%r9), %xmm4
vbroadcastss -0x4(%r9), %xmm8
vbroadcastss (%r9), %xmm9
vmulps (%r15), %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps 0x10(%r15), %xmm3, %xmm2
vmulps 0x20(%r15), %xmm7, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x30(%r15), %xmm14, %xmm2
vmulps 0x40(%r15), %xmm15, %xmm3
vmulps 0x50(%r15), %xmm4, %xmm4
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps 0x60(%r15), %xmm8, %xmm3
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x70(%r15), %xmm9, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm1, %xmm1
subq $-0x80, %r15
addq %rdi, %r9
decl %edx
jne 0x3fac68
incq %r10
addq %rbp, %rcx
cmpq %r13, %r10
jne 0x3fac4a
movl 0xf4(%rbx,%r14), %eax
decl %eax
cmpl $0x5, %eax
ja 0x3fb149
leaq 0x28e9a(%rip), %rcx # 0x423bb8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
vmaxps %xmm1, %xmm10, %xmm1
jmp 0x3fb149
vbroadcastss 0x1c31f(%rip), %xmm4 # 0x417058
vminps %xmm4, %xmm1, %xmm7
vbroadcastss 0x1c316(%rip), %xmm8 # 0x41705c
vmaxps %xmm7, %xmm8, %xmm7
vbroadcastss 0x1c30d(%rip), %xmm9 # 0x417060
vmulps %xmm7, %xmm9, %xmm14
vaddps %xmm5, %xmm14, %xmm14
vcvttps2dq %xmm14, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm14, %xmm14
vandps %xmm6, %xmm14, %xmm14
vsubps %xmm14, %xmm15, %xmm14
vbroadcastss 0x1c9e3(%rip), %xmm4 # 0x417760
vmulps %xmm4, %xmm14, %xmm15
vsubps %xmm15, %xmm7, %xmm7
vmulps %xmm7, %xmm7, %xmm15
vbroadcastss 0x1c2d9(%rip), %xmm10 # 0x41706c
vmulps %xmm7, %xmm10, %xmm2
vaddps %xmm2, %xmm11, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm2, %xmm13, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vbroadcastss 0x1c2c4(%rip), %xmm0 # 0x41707c
vaddps %xmm0, %xmm2, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm5, %xmm2, %xmm2
vmulps %xmm2, %xmm15, %xmm2
vaddps %xmm6, %xmm7, %xmm7
vaddps %xmm2, %xmm7, %xmm2
vcvttps2dq %xmm14, %xmm7
vpslld $0x17, %xmm7, %xmm7
vpaddd %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm7
vbroadcastss 0x1c291(%rip), %xmm2 # 0x417080
vmaxps %xmm2, %xmm7, %xmm2
vpsrld $0x17, %xmm2, %xmm14
vbroadcastss 0x1c287(%rip), %xmm3 # 0x417088
vpaddd %xmm3, %xmm14, %xmm14
vbroadcastss 0x1c276(%rip), %xmm3 # 0x417084
vandps %xmm3, %xmm2, %xmm2
vorps %xmm5, %xmm2, %xmm2
vcvtdq2ps %xmm14, %xmm14
vbroadcastss 0x1c268(%rip), %xmm3 # 0x41708c
vcmpltps %xmm3, %xmm2, %xmm15
vandps %xmm2, %xmm15, %xmm3
vbroadcastss 0x1c25a(%rip), %xmm0 # 0x417090
vaddps %xmm0, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vandps %xmm6, %xmm15, %xmm3
vsubps %xmm3, %xmm14, %xmm3
vmulps %xmm2, %xmm2, %xmm14
vbroadcastss 0x1c241(%rip), %xmm15 # 0x417094
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c238(%rip), %xmm8 # 0x417098
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c22a(%rip), %xmm8 # 0x41709c
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c21c(%rip), %xmm8 # 0x4170a0
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c20e(%rip), %xmm8 # 0x4170a4
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c200(%rip), %xmm8 # 0x4170a8
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c1f2(%rip), %xmm8 # 0x4170ac
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c1e4(%rip), %xmm8 # 0x4170b0
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c1d6(%rip), %xmm8 # 0x4170b4
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c874(%rip), %xmm8 # 0x417764
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm15, %xmm14, %xmm14
vcmpleps 0x1915d(%rip), %xmm7, %xmm7 # 0x414060
vmulps %xmm4, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm14, %xmm2
vbroadcastss 0x1decc(%rip), %xmm3 # 0x418de4
vmulps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1c81f(%rip), %xmm3 # 0x417744
vblendvps %xmm7, %xmm3, %xmm2, %xmm2
vbroadcastss 0x1c124(%rip), %xmm3 # 0x417058
vminps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1c11b(%rip), %xmm3 # 0x41705c
vmaxps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm3
vaddps %xmm5, %xmm3, %xmm3
vcvttps2dq %xmm3, %xmm7
vcvtdq2ps %xmm7, %xmm7
vcmpltps %xmm7, %xmm3, %xmm3
vandps %xmm6, %xmm3, %xmm3
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm4, %xmm3, %xmm7
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm2, %xmm7
vmulps %xmm2, %xmm10, %xmm14
vxorps %xmm10, %xmm10, %xmm10
vaddps %xmm11, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm12, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm13, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vbroadcastss 0x1c0e1(%rip), %xmm4 # 0x41707c
vaddps %xmm4, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm5, %xmm14, %xmm14
vmulps %xmm7, %xmm14, %xmm7
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm7, %xmm2, %xmm2
vcvttps2dq %xmm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vaddps %xmm3, %xmm3, %xmm7
vmulps %xmm7, %xmm2, %xmm2
vbroadcastss 0x1de0f(%rip), %xmm4 # 0x418dec
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm0, %xmm7, %xmm3
vaddps %xmm2, %xmm3, %xmm2
jmp 0x3fb145
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %xmm7
vbroadcastss 0x4(%rax), %xmm15
vmaxps %xmm7, %xmm1, %xmm1
vminps %xmm1, %xmm15, %xmm1
jmp 0x3fb149
vbroadcastss 0x1c039(%rip), %xmm2 # 0x417054
vxorps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1c030(%rip), %xmm2 # 0x417058
vminps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1c027(%rip), %xmm2 # 0x41705c
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1c01e(%rip), %xmm2 # 0x417060
vmulps %xmm2, %xmm1, %xmm7
vaddps %xmm5, %xmm7, %xmm7
vcvttps2dq %xmm7, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm7, %xmm7
vandps %xmm6, %xmm7, %xmm7
vsubps %xmm7, %xmm15, %xmm7
vbroadcastss 0x1e51a(%rip), %xmm2 # 0x419584
vmulps %xmm2, %xmm7, %xmm15
vaddps %xmm1, %xmm15, %xmm1
vmulps %xmm1, %xmm1, %xmm15
vbroadcastss 0x1bfed(%rip), %xmm2 # 0x41706c
vmulps %xmm2, %xmm1, %xmm14
vbroadcastss 0x1bfe4(%rip), %xmm2 # 0x417070
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1bfd7(%rip), %xmm2 # 0x417074
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1bfca(%rip), %xmm2 # 0x417078
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1bfbd(%rip), %xmm0 # 0x41707c
vaddps %xmm0, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vaddps %xmm5, %xmm14, %xmm14
vmulps %xmm14, %xmm15, %xmm14
vaddps %xmm6, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm1
vcvttps2dq %xmm7, %xmm7
vpslld $0x17, %xmm7, %xmm7
vpaddd %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vrcpps %xmm1, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm1, %xmm7, %xmm1
vaddps %xmm1, %xmm7, %xmm1
jmp 0x3fb149
movq 0xf8(%rbx,%r14), %rax
vmaxps %xmm1, %xmm10, %xmm7
vminps %xmm1, %xmm10, %xmm1
vbroadcastss (%rax), %xmm15
vmulps %xmm1, %xmm15, %xmm1
vaddps %xmm7, %xmm1, %xmm1
jmp 0x3fb149
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %xmm2
vbroadcastss 0x4(%rax), %xmm3
vmulps %xmm1, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm10, %xmm2
vminps %xmm6, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm1
vmovups %xmm1, (%r8)
addq $0x10, %r8
leal 0x1(%r12), %eax
addl $0x8, %r11d
cmpl 0x38(%rsp), %r12d
movl %eax, %r12d
jne 0x3fabc3
movq 0x8(%rsp), %rcx
incq %rcx
movq %rcx, 0x8(%rsp)
cmpq 0x40(%rsp), %rcx
jne 0x3fab87
cmpl $0x4, 0x58(%rsp)
setne %al
orb 0x50(%rsp), %al
jne 0x3fb796
movq (%rbx), %rax
movq %rax, 0x30(%rsp)
movl 0x14(%rsp), %eax
movq %rax, 0x40(%rsp)
movq $0x0, 0x8(%rsp)
vbroadcastss 0x194da(%rip), %xmm10 # 0x414690
vbroadcastss 0x19b59(%rip), %xmm6 # 0x414d18
vbroadcastss 0x1be98(%rip), %xmm9 # 0x417060
vbroadcastss 0x1c58f(%rip), %xmm5 # 0x417760
vbroadcastss 0x1be96(%rip), %xmm11 # 0x417070
vbroadcastss 0x1be91(%rip), %xmm12 # 0x417074
vbroadcastss 0x1be8c(%rip), %xmm13 # 0x417078
cmpl $0x0, 0x38(%rsp)
js 0x3fb77e
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %r8
movq 0x8(%rsp), %rcx
imulq %rcx, %r8
imulq 0x10(%rax), %r8
addq (%rax), %r8
movq (%rbx), %rax
movq %rax, 0x18(%rsp)
shlq $0x4, %rcx
movq %rcx, 0x28(%rsp)
xorl %r11d, %r11d
xorl %r12d, %r12d
movq 0x30(%rsp), %rax
movq -0x18(%rax), %r14
cmpl $0x0, 0xec(%rbx,%r14)
je 0x3fb250
movq 0x190(%rbx,%r14), %rax
movq 0x28(%rsp), %rcx
vmovups (%rax,%rcx), %xmm1
jmp 0x3fb254
vxorps %xmm1, %xmm1, %xmm1
testl %r13d, %r13d
jle 0x3fb319
movq 0x48(%rbx), %rax
imulq 0x8(%rsp), %rax
imulq 0x18(%rbx), %rax
addq 0x8(%rbx), %rax
movq 0x70(%rsp), %rcx
movslq 0x9c(%rsp), %r15
movq 0x18(%rsp), %rdx
movq -0x18(%rdx), %rbp
movl 0xd4(%rbx,%rbp), %esi
movl 0xdc(%rbx,%rbp), %edx
imull %r11d, %edx
movslq %edx, %rdx
leaq (%rcx,%rdx,4), %rcx
addq $0xc, %rcx
imulq 0x80(%rsp), %r15
xorl %r10d, %r10d
testl %esi, %esi
jle 0x3fb30e
movl 0xd8(%rbx,%rbp), %edx
shll $0x2, %edx
movslq %edx, %rdi
shlq $0x2, %rdi
movq %rcx, %r9
movl %esi, %edx
vbroadcastss -0xc(%r9), %xmm2
vbroadcastss -0x8(%r9), %xmm3
vbroadcastss -0x4(%r9), %xmm7
vbroadcastss (%r9), %xmm14
vmulps (%rax), %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps 0x10(%rax), %xmm3, %xmm2
vmulps 0x20(%rax), %xmm7, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x30(%rax), %xmm14, %xmm2
vaddps %xmm2, %xmm1, %xmm1
addq $0x40, %rax
addq %rdi, %r9
decl %edx
jne 0x3fb2c9
incq %r10
addq %r15, %rcx
cmpq %r13, %r10
jne 0x3fb2af
movl 0xf4(%rbx,%r14), %eax
decl %eax
cmpl $0x5, %eax
ja 0x3fb75e
leaq 0x2889d(%rip), %rcx # 0x423bd0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
vmaxps 0x18d1c(%rip), %xmm1, %xmm1 # 0x414060
jmp 0x3fb75e
vbroadcastss 0x1bd06(%rip), %xmm4 # 0x417058
vminps %xmm4, %xmm1, %xmm7
vbroadcastss 0x1bcfd(%rip), %xmm8 # 0x41705c
vmaxps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm9, %xmm14
vaddps %xmm10, %xmm14, %xmm14
vcvttps2dq %xmm14, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm14, %xmm14
vandps %xmm6, %xmm14, %xmm14
vsubps %xmm14, %xmm15, %xmm14
vmulps %xmm5, %xmm14, %xmm15
vsubps %xmm15, %xmm7, %xmm7
vmulps %xmm7, %xmm7, %xmm15
vmovaps %xmm10, %xmm4
vbroadcastss 0x1bccd(%rip), %xmm10 # 0x41706c
vmulps %xmm7, %xmm10, %xmm2
vaddps %xmm2, %xmm11, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm2, %xmm13, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vbroadcastss 0x1bcb8(%rip), %xmm0 # 0x41707c
vaddps %xmm0, %xmm2, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps %xmm2, %xmm15, %xmm2
vaddps %xmm6, %xmm7, %xmm7
vaddps %xmm2, %xmm7, %xmm2
vcvttps2dq %xmm14, %xmm7
vpslld $0x17, %xmm7, %xmm7
vpaddd %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm7
vbroadcastss 0x1bc85(%rip), %xmm2 # 0x417080
vmaxps %xmm2, %xmm7, %xmm2
vpsrld $0x17, %xmm2, %xmm14
vbroadcastss 0x1bc7b(%rip), %xmm3 # 0x417088
vpaddd %xmm3, %xmm14, %xmm14
vbroadcastss 0x1bc6a(%rip), %xmm3 # 0x417084
vandps %xmm3, %xmm2, %xmm2
vorps %xmm4, %xmm2, %xmm2
vcvtdq2ps %xmm14, %xmm14
vbroadcastss 0x1bc5c(%rip), %xmm3 # 0x41708c
vcmpltps %xmm3, %xmm2, %xmm15
vandps %xmm2, %xmm15, %xmm3
vbroadcastss 0x1bc4e(%rip), %xmm0 # 0x417090
vaddps %xmm0, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vandps %xmm6, %xmm15, %xmm3
vsubps %xmm3, %xmm14, %xmm3
vmulps %xmm2, %xmm2, %xmm14
vbroadcastss 0x1bc35(%rip), %xmm15 # 0x417094
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bc2c(%rip), %xmm8 # 0x417098
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bc1e(%rip), %xmm8 # 0x41709c
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bc10(%rip), %xmm8 # 0x4170a0
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bc02(%rip), %xmm8 # 0x4170a4
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bbf4(%rip), %xmm8 # 0x4170a8
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bbe6(%rip), %xmm8 # 0x4170ac
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bbd8(%rip), %xmm8 # 0x4170b0
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bbca(%rip), %xmm8 # 0x4170b4
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1c268(%rip), %xmm8 # 0x417764
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm15, %xmm14, %xmm14
vcmpleps 0x18b51(%rip), %xmm7, %xmm7 # 0x414060
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm14, %xmm2
vbroadcastss 0x1d8c0(%rip), %xmm3 # 0x418de4
vmulps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1c213(%rip), %xmm3 # 0x417744
vblendvps %xmm7, %xmm3, %xmm2, %xmm2
vbroadcastss 0x1bb18(%rip), %xmm3 # 0x417058
vminps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1bb0f(%rip), %xmm3 # 0x41705c
vmaxps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vcvttps2dq %xmm3, %xmm7
vcvtdq2ps %xmm7, %xmm7
vcmpltps %xmm7, %xmm3, %xmm3
vandps %xmm6, %xmm3, %xmm3
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm5, %xmm3, %xmm7
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm2, %xmm7
vmulps %xmm2, %xmm10, %xmm14
vmovaps %xmm4, %xmm10
vaddps %xmm11, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm12, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm13, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vbroadcastss 0x1bad6(%rip), %xmm4 # 0x41707c
vaddps %xmm4, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm10, %xmm14, %xmm14
vmulps %xmm7, %xmm14, %xmm7
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm7, %xmm2, %xmm2
vcvttps2dq %xmm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vaddps %xmm3, %xmm3, %xmm7
vmulps %xmm7, %xmm2, %xmm2
vbroadcastss 0x1d803(%rip), %xmm4 # 0x418dec
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm0, %xmm7, %xmm3
vaddps %xmm2, %xmm3, %xmm2
jmp 0x3fb75a
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %xmm7
vbroadcastss 0x4(%rax), %xmm15
vmaxps %xmm7, %xmm1, %xmm1
vminps %xmm1, %xmm15, %xmm1
jmp 0x3fb75e
vbroadcastss 0x1ba2d(%rip), %xmm2 # 0x417054
vxorps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1ba24(%rip), %xmm2 # 0x417058
vminps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1ba1b(%rip), %xmm2 # 0x41705c
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1ba12(%rip), %xmm2 # 0x417060
vmulps %xmm2, %xmm1, %xmm7
vaddps %xmm7, %xmm10, %xmm7
vcvttps2dq %xmm7, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm7, %xmm7
vandps %xmm6, %xmm7, %xmm7
vsubps %xmm7, %xmm15, %xmm7
vbroadcastss 0x1df0e(%rip), %xmm2 # 0x419584
vmulps %xmm2, %xmm7, %xmm15
vaddps %xmm1, %xmm15, %xmm1
vmulps %xmm1, %xmm1, %xmm15
vbroadcastss 0x1b9e1(%rip), %xmm2 # 0x41706c
vmulps %xmm2, %xmm1, %xmm14
vbroadcastss 0x1b9d8(%rip), %xmm2 # 0x417070
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1b9cb(%rip), %xmm2 # 0x417074
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1b9be(%rip), %xmm2 # 0x417078
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1b9b1(%rip), %xmm0 # 0x41707c
vaddps %xmm0, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vaddps %xmm10, %xmm14, %xmm14
vmulps %xmm14, %xmm15, %xmm14
vaddps %xmm6, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm1
vcvttps2dq %xmm7, %xmm7
vpslld $0x17, %xmm7, %xmm7
vpaddd %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vrcpps %xmm1, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm1, %xmm7, %xmm1
vaddps %xmm1, %xmm7, %xmm1
jmp 0x3fb75e
movq 0xf8(%rbx,%r14), %rax
vxorps %xmm0, %xmm0, %xmm0
vmaxps %xmm0, %xmm1, %xmm7
vminps %xmm0, %xmm1, %xmm1
vbroadcastss (%rax), %xmm15
vmulps %xmm1, %xmm15, %xmm1
vaddps %xmm7, %xmm1, %xmm1
jmp 0x3fb75e
movq 0xf8(%rbx,%r14), %rax
vbroadcastss (%rax), %xmm2
vbroadcastss 0x4(%rax), %xmm3
vmulps %xmm1, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmaxps 0x1890a(%rip), %xmm2, %xmm2 # 0x414060
vminps %xmm6, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm1
vmovups %xmm1, (%r8)
addq $0x10, %r8
leal 0x1(%r12), %eax
addl $0x4, %r11d
cmpl 0x38(%rsp), %r12d
movl %eax, %r12d
jne 0x3fb228
movq 0x8(%rsp), %rcx
incq %rcx
movq %rcx, 0x8(%rsp)
cmpq 0x40(%rsp), %rcx
jne 0x3fb1ec
cmpl $0x1, 0x58(%rsp)
setne %al
orb %al, 0x50(%rsp)
jne 0x3fbd5b
movq (%rbx), %rax
movq %rax, 0x30(%rsp)
movl 0x14(%rsp), %eax
movq %rax, 0x28(%rsp)
xorl %edx, %edx
vbroadcastss 0x18ecc(%rip), %xmm10 # 0x414690
vbroadcastss 0x1954b(%rip), %xmm6 # 0x414d18
vbroadcastss 0x1b88a(%rip), %xmm9 # 0x417060
vbroadcastss 0x1bf81(%rip), %xmm5 # 0x417760
vbroadcastss 0x1b888(%rip), %xmm11 # 0x417070
vbroadcastss 0x1b883(%rip), %xmm12 # 0x417074
vbroadcastss 0x1b87e(%rip), %xmm13 # 0x417078
cmpl $0x0, 0x38(%rsp)
js 0x3fbd4d
movq 0x48(%rsp), %rax
movslq 0x2c(%rax), %rdi
imulq %rdx, %rdi
imulq 0x10(%rax), %rdi
addq (%rax), %rdi
movq (%rbx), %rax
movq %rax, 0x8(%rsp)
movq %rdx, %rax
shlq $0x4, %rax
movq %rax, 0x18(%rsp)
xorl %r10d, %r10d
movq 0x30(%rsp), %rax
movq -0x18(%rax), %r11
cmpl $0x0, 0xec(%rbx,%r11)
je 0x3fb859
movq 0x190(%rbx,%r11), %rax
movq 0x18(%rsp), %rcx
vmovups (%rax,%rcx), %xmm1
jmp 0x3fb85d
vxorps %xmm1, %xmm1, %xmm1
testl %r13d, %r13d
jle 0x3fb8ee
movq 0x48(%rbx), %r14
imulq %rdx, %r14
imulq 0x18(%rbx), %r14
addq 0x8(%rbx), %r14
movslq 0x9c(%rsp), %r15
movq 0x8(%rsp), %rax
movq -0x18(%rax), %r12
movl 0xdc(%rbx,%r12), %eax
imull %r10d, %eax
movslq %eax, %rsi
shlq $0x2, %rsi
addq 0x70(%rsp), %rsi
movl 0xd4(%rbx,%r12), %eax
imulq 0x80(%rsp), %r15
xorl %r8d, %r8d
testl %eax, %eax
jle 0x3fb8e3
movslq 0xd8(%rbx,%r12), %r9
shlq $0x2, %r9
movq %rsi, %rbp
movl %eax, %ecx
vbroadcastss (%rbp), %xmm2
vmulps (%r14), %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
addq $0x10, %r14
addq %r9, %rbp
decl %ecx
jne 0x3fb8c9
incq %r8
addq %r15, %rsi
cmpq %r13, %r8
jne 0x3fb8b4
movl 0xf4(%rbx,%r11), %eax
decl %eax
cmpl $0x5, %eax
ja 0x3fbd33
leaq 0x282e0(%rip), %rcx # 0x423be8
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
vmaxps 0x18747(%rip), %xmm1, %xmm1 # 0x414060
jmp 0x3fbd33
vbroadcastss 0x1b731(%rip), %xmm4 # 0x417058
vminps %xmm4, %xmm1, %xmm7
vbroadcastss 0x1b728(%rip), %xmm8 # 0x41705c
vmaxps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm9, %xmm14
vaddps %xmm10, %xmm14, %xmm14
vcvttps2dq %xmm14, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm14, %xmm14
vandps %xmm6, %xmm14, %xmm14
vsubps %xmm14, %xmm15, %xmm14
vmulps %xmm5, %xmm14, %xmm15
vsubps %xmm15, %xmm7, %xmm7
vmulps %xmm7, %xmm7, %xmm15
vmovaps %xmm10, %xmm4
vbroadcastss 0x1b6f8(%rip), %xmm10 # 0x41706c
vmulps %xmm7, %xmm10, %xmm2
vaddps %xmm2, %xmm11, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm2, %xmm13, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vbroadcastss 0x1b6e3(%rip), %xmm0 # 0x41707c
vaddps %xmm0, %xmm2, %xmm2
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vmulps %xmm2, %xmm15, %xmm2
vaddps %xmm6, %xmm7, %xmm7
vaddps %xmm2, %xmm7, %xmm2
vcvttps2dq %xmm14, %xmm7
vpslld $0x17, %xmm7, %xmm7
vpaddd %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm7
vbroadcastss 0x1b6b0(%rip), %xmm2 # 0x417080
vmaxps %xmm2, %xmm7, %xmm2
vpsrld $0x17, %xmm2, %xmm14
vbroadcastss 0x1b6a6(%rip), %xmm3 # 0x417088
vpaddd %xmm3, %xmm14, %xmm14
vbroadcastss 0x1b695(%rip), %xmm3 # 0x417084
vandps %xmm3, %xmm2, %xmm2
vorps %xmm4, %xmm2, %xmm2
vcvtdq2ps %xmm14, %xmm14
vbroadcastss 0x1b687(%rip), %xmm3 # 0x41708c
vcmpltps %xmm3, %xmm2, %xmm15
vandps %xmm2, %xmm15, %xmm3
vbroadcastss 0x1b679(%rip), %xmm0 # 0x417090
vaddps %xmm0, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vandps %xmm6, %xmm15, %xmm3
vsubps %xmm3, %xmm14, %xmm3
vmulps %xmm2, %xmm2, %xmm14
vbroadcastss 0x1b660(%rip), %xmm15 # 0x417094
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1b657(%rip), %xmm8 # 0x417098
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1b649(%rip), %xmm8 # 0x41709c
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1b63b(%rip), %xmm8 # 0x4170a0
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1b62d(%rip), %xmm8 # 0x4170a4
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1b61f(%rip), %xmm8 # 0x4170a8
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1b611(%rip), %xmm8 # 0x4170ac
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1b603(%rip), %xmm8 # 0x4170b0
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1b5f5(%rip), %xmm8 # 0x4170b4
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm2, %xmm15, %xmm15
vbroadcastss 0x1bc93(%rip), %xmm8 # 0x417764
vaddps %xmm8, %xmm15, %xmm15
vmulps %xmm15, %xmm14, %xmm14
vcmpleps 0x1857c(%rip), %xmm7, %xmm7 # 0x414060
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm14, %xmm2
vbroadcastss 0x1d2eb(%rip), %xmm3 # 0x418de4
vmulps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1bc3e(%rip), %xmm3 # 0x417744
vblendvps %xmm7, %xmm3, %xmm2, %xmm2
vbroadcastss 0x1b543(%rip), %xmm3 # 0x417058
vminps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1b53a(%rip), %xmm3 # 0x41705c
vmaxps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vcvttps2dq %xmm3, %xmm7
vcvtdq2ps %xmm7, %xmm7
vcmpltps %xmm7, %xmm3, %xmm3
vandps %xmm6, %xmm3, %xmm3
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm5, %xmm3, %xmm7
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm2, %xmm7
vmulps %xmm2, %xmm10, %xmm14
vmovaps %xmm4, %xmm10
vaddps %xmm11, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm12, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm13, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vbroadcastss 0x1b501(%rip), %xmm4 # 0x41707c
vaddps %xmm4, %xmm14, %xmm14
vmulps %xmm2, %xmm14, %xmm14
vaddps %xmm10, %xmm14, %xmm14
vmulps %xmm7, %xmm14, %xmm7
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm7, %xmm2, %xmm2
vcvttps2dq %xmm3, %xmm3
vpslld $0x17, %xmm3, %xmm3
vpaddd %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vaddps %xmm3, %xmm3, %xmm7
vmulps %xmm7, %xmm2, %xmm2
vbroadcastss 0x1d22e(%rip), %xmm4 # 0x418dec
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm0, %xmm7, %xmm3
vaddps %xmm2, %xmm3, %xmm2
jmp 0x3fbd2f
movq 0xf8(%rbx,%r11), %rax
vbroadcastss (%rax), %xmm7
vbroadcastss 0x4(%rax), %xmm15
vmaxps %xmm7, %xmm1, %xmm1
vminps %xmm1, %xmm15, %xmm1
jmp 0x3fbd33
vbroadcastss 0x1b458(%rip), %xmm2 # 0x417054
vxorps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1b44f(%rip), %xmm2 # 0x417058
vminps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1b446(%rip), %xmm2 # 0x41705c
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1b43d(%rip), %xmm2 # 0x417060
vmulps %xmm2, %xmm1, %xmm7
vaddps %xmm7, %xmm10, %xmm7
vcvttps2dq %xmm7, %xmm15
vcvtdq2ps %xmm15, %xmm15
vcmpltps %xmm15, %xmm7, %xmm7
vandps %xmm6, %xmm7, %xmm7
vsubps %xmm7, %xmm15, %xmm7
vbroadcastss 0x1d939(%rip), %xmm2 # 0x419584
vmulps %xmm2, %xmm7, %xmm15
vaddps %xmm1, %xmm15, %xmm1
vmulps %xmm1, %xmm1, %xmm15
vbroadcastss 0x1b40c(%rip), %xmm2 # 0x41706c
vmulps %xmm2, %xmm1, %xmm14
vbroadcastss 0x1b403(%rip), %xmm2 # 0x417070
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1b3f6(%rip), %xmm2 # 0x417074
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1b3e9(%rip), %xmm2 # 0x417078
vaddps %xmm2, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vbroadcastss 0x1b3dc(%rip), %xmm0 # 0x41707c
vaddps %xmm0, %xmm14, %xmm14
vmulps %xmm1, %xmm14, %xmm14
vaddps %xmm10, %xmm14, %xmm14
vmulps %xmm14, %xmm15, %xmm14
vaddps %xmm6, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm1
vcvttps2dq %xmm7, %xmm7
vpslld $0x17, %xmm7, %xmm7
vpaddd %xmm6, %xmm7, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vrcpps %xmm1, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm1, %xmm7, %xmm1
vaddps %xmm1, %xmm7, %xmm1
jmp 0x3fbd33
movq 0xf8(%rbx,%r11), %rax
vxorps %xmm0, %xmm0, %xmm0
vmaxps %xmm0, %xmm1, %xmm7
vminps %xmm0, %xmm1, %xmm1
vbroadcastss (%rax), %xmm15
vmulps %xmm1, %xmm15, %xmm1
vaddps %xmm7, %xmm1, %xmm1
jmp 0x3fbd33
movq 0xf8(%rbx,%r11), %rax
vbroadcastss (%rax), %xmm2
vbroadcastss 0x4(%rax), %xmm3
vmulps %xmm1, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmaxps 0x18335(%rip), %xmm2, %xmm2 # 0x414060
vminps %xmm6, %xmm2, %xmm2
vmulps %xmm1, %xmm2, %xmm1
vmovups %xmm1, (%rdi)
addq $0x10, %rdi
leal 0x1(%r10), %eax
cmpl 0x38(%rsp), %r10d
movl %eax, %r10d
jne 0x3fb831
incq %rdx
cmpq 0x28(%rsp), %rdx
jne 0x3fb7fa
cmpl $0x4, 0x58(%rsp)
setne %al
orb 0x27(%rsp), %al
jne 0x3fc065
movq 0x48(%rsp), %rax
movq (%rax), %rcx
movq %rcx, 0xd8(%rsp)
movslq 0x2c(%rax), %rcx
imulq 0x10(%rax), %rcx
movq %rcx, 0xd0(%rsp)
movq (%rbx), %rax
movq %rax, 0x30(%rsp)
movq 0x48(%rbx), %rax
imulq 0x18(%rbx), %rax
movq %rax, 0xc8(%rsp)
movq 0x8(%rbx), %rax
movq %rax, 0xc0(%rsp)
movl 0x14(%rsp), %eax
movq %rax, 0xe0(%rsp)
movl 0x60(%rsp), %eax
movq %rax, 0x8(%rsp)
movq $0x0, 0x18(%rsp)
cmpl $0x0, 0x38(%rsp)
js 0x3fc04a
movq 0xd0(%rsp), %rbp
movq 0x18(%rsp), %rax
imulq %rax, %rbp
addq 0xd8(%rsp), %rbp
movq 0xc8(%rsp), %rcx
imulq %rax, %rcx
addq 0xc0(%rsp), %rcx
movq %rcx, 0x28(%rsp)
movslq 0x9c(%rsp), %r12
imulq 0x80(%rsp), %r12
movq 0x70(%rsp), %rax
movq %rax, 0x40(%rsp)
movq (%rbx), %rax
movq %rax, 0x50(%rsp)
xorl %r15d, %r15d
xorl %r14d, %r14d
movq 0x30(%rsp), %rax
movq -0x18(%rax), %rax
cmpl $0x0, 0xec(%rbx,%rax)
je 0x3fbe5e
movq 0x190(%rbx,%rax), %rcx
movq 0x18(%rsp), %rdx
vmovss (%rcx,%rdx,4), %xmm4
jmp 0x3fbe62
vxorps %xmm4, %xmm4, %xmm4
testl %r13d, %r13d
jle 0x3fbeec
movq 0x50(%rsp), %rcx
movq -0x18(%rcx), %rcx
movl 0xd4(%rbx,%rcx), %edx
movl 0xdc(%rbx,%rcx), %esi
imull %r15d, %esi
movslq %esi, %rsi
movq 0x40(%rsp), %rdi
leaq (%rdi,%rsi,4), %rsi
xorl %edi, %edi
movq 0x28(%rsp), %r8
testl %edx, %edx
jle 0x3fbee1
movl 0xd8(%rbx,%rcx), %r9d
shll $0x2, %r9d
movslq %r9d, %r9
shlq $0x2, %r9
movq %rsi, %r10
movl %edx, %r11d
vmovups (%r8), %xmm0
vmulps (%r10), %xmm0, %xmm0
vshufpd $0x1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0]
vaddps %xmm0, %xmm1, %xmm0
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vaddss %xmm0, %xmm4, %xmm0
vaddss %xmm0, %xmm1, %xmm4
addq $0x10, %r8
addq %r9, %r10
decl %r11d
jne 0x3fbeb6
incq %rdi
addq %r12, %rsi
cmpq %r13, %rdi
jne 0x3fbe99
movl 0xf4(%rbx,%rax), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0x3fc021
leaq 0x27cfb(%rip), %rdx # 0x423c00
movslq (%rdx,%rcx,4), %rcx
addq %rdx, %rcx
jmpq *%rcx
vmaxss 0x1af72(%rip), %xmm4, %xmm0 # 0x416e88
jmp 0x3fc025
vmovaps %xmm4, %xmm0
vmovss %xmm4, 0x68(%rsp)
vzeroupper
callq 0x563e0
vaddss 0x18de3(%rip), %xmm0, %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
vmulss 0x68(%rsp), %xmm0, %xmm0
jmp 0x3fc025
movq 0xf8(%rbx,%rax), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
jbe 0x3fc025
vmovaps %xmm1, %xmm0
jmp 0x3fc025
vminss 0x1b0e2(%rip), %xmm4, %xmm0 # 0x417058
vbroadcastss 0x1b0d5(%rip), %xmm1 # 0x417054
vxorps %xmm1, %xmm0, %xmm1
vcmpltss 0x1b0d0(%rip), %xmm0, %xmm0 # 0x41705c
vbroadcastss 0x1b0c3(%rip), %xmm2 # 0x417058
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vzeroupper
callq 0x563e0
vmovss 0x18d6d(%rip), %xmm1 # 0x414d18
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x3fc025
movq 0xf8(%rbx,%rax), %rax
vxorps %xmm0, %xmm0, %xmm0
vcmpltss %xmm4, %xmm0, %xmm0
vmovss (%rax), %xmm1
vbroadcastss 0x18d45(%rip), %xmm2 # 0x414d18
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmulss %xmm4, %xmm0, %xmm0
jmp 0x3fc025
movq 0xf8(%rbx,%rax), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vbroadcastss 0x1b05b(%rip), %xmm0 # 0x417054
vxorps %xmm0, %xmm2, %xmm0
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
jb 0x3fc025
vmovss 0x18d05(%rip), %xmm0 # 0x414d18
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x3fc040
vmovaps %xmm4, %xmm0
vmovss %xmm0, (%rbp,%r14,4)
incq %r14
addl $0x4, %r15d
cmpq 0x8(%rsp), %r14
jne 0x3fbe37
jmp 0x3fc04a
vmulss %xmm4, %xmm1, %xmm0
vaddss %xmm2, %xmm0, %xmm0
jmp 0x3fbfd9
movq 0x18(%rsp), %rcx
incq %rcx
movq %rcx, 0x18(%rsp)
cmpq 0xe0(%rsp), %rcx
jne 0x3fbdd1
cmpl $0x1, 0x58(%rsp)
setne %al
orb %al, 0x27(%rsp)
jne 0x3fc2fa
movq 0x48(%rsp), %rax
movq (%rax), %rcx
movq %rcx, 0x68(%rsp)
movslq 0x2c(%rax), %rcx
imulq 0x10(%rax), %rcx
movq %rcx, 0x48(%rsp)
movq (%rbx), %r11
movl 0x14(%rsp), %eax
movq %rax, 0x50(%rsp)
movl 0x60(%rsp), %eax
movq %rax, 0x30(%rsp)
xorl %r15d, %r15d
movq %r11, 0x28(%rsp)
cmpl $0x0, 0x38(%rsp)
js 0x3fc2ec
movq 0x48(%rsp), %rbp
imulq %r15, %rbp
addq 0x68(%rsp), %rbp
movl %r15d, %eax
imull %r13d, %eax
movl %eax, 0x8(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0x18(%rsp)
movslq 0x9c(%rsp), %r12
imulq 0x80(%rsp), %r12
xorl %r14d, %r14d
movq -0x18(%r11), %rax
cmpl $0x0, 0xec(%rbx,%rax)
je 0x3fc10f
movq 0x190(%rbx,%rax), %rcx
vmovss (%rcx,%r15,4), %xmm4
jmp 0x3fc113
vxorps %xmm4, %xmm4, %xmm4
testl %r13d, %r13d
jle 0x3fc188
movl 0xd4(%rbx,%rax), %ecx
movl 0x8(%rsp), %edx
imull %ecx, %edx
movslq %edx, %rdx
shlq $0x2, %rdx
addq 0x148(%rbx,%rax), %rdx
movl 0xdc(%rbx,%rax), %esi
imull %r14d, %esi
movslq %esi, %rsi
movq 0x18(%rsp), %rdi
leaq (%rdi,%rsi,4), %rsi
xorl %edi, %edi
testl %ecx, %ecx
jle 0x3fc17d
movslq 0xd8(%rbx,%rax), %r8
shlq $0x2, %r8
movq %rsi, %r9
movl %ecx, %r10d
vmovss (%rdx), %xmm0
vmulss (%r9), %xmm0, %xmm0
vaddss %xmm4, %xmm0, %xmm4
addq $0x4, %rdx
addq %r8, %r9
decl %r10d
jne 0x3fc164
incq %rdi
addq %r12, %rsi
cmpq %r13, %rdi
jne 0x3fc14e
movl 0xf4(%rbx,%rax), %ecx
decl %ecx
cmpl $0x5, %ecx
ja 0x3fc2c7
leaq 0x27a77(%rip), %rdx # 0x423c18
movslq (%rdx,%rcx,4), %rcx
addq %rdx, %rcx
jmpq *%rcx
vmaxss 0x1acd6(%rip), %xmm4, %xmm0 # 0x416e88
jmp 0x3fc2cb
vmovaps %xmm4, %xmm0
vmovss %xmm4, 0x40(%rsp)
vzeroupper
callq 0x563e0
vaddss 0x18b47(%rip), %xmm0, %xmm0 # 0x414d18
callq 0x56200
callq 0x56160
movq 0x28(%rsp), %r11
vmulss 0x40(%rsp), %xmm0, %xmm0
jmp 0x3fc2cb
movq 0xf8(%rbx,%rax), %rax
vmovss 0x4(%rax), %xmm1
vmaxss (%rax), %xmm4, %xmm0
vucomiss %xmm1, %xmm0
jbe 0x3fc2cb
vmovaps %xmm1, %xmm0
jmp 0x3fc2cb
vminss 0x1ae41(%rip), %xmm4, %xmm0 # 0x417058
vbroadcastss 0x1ae34(%rip), %xmm1 # 0x417054
vxorps %xmm1, %xmm0, %xmm1
vcmpltss 0x1ae2f(%rip), %xmm0, %xmm0 # 0x41705c
vbroadcastss 0x1ae22(%rip), %xmm2 # 0x417058
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vzeroupper
callq 0x563e0
movq 0x28(%rsp), %r11
vmovss 0x18ac7(%rip), %xmm1 # 0x414d18
vaddss %xmm1, %xmm0, %xmm0
vdivss %xmm0, %xmm1, %xmm0
jmp 0x3fc2cb
movq 0xf8(%rbx,%rax), %rax
vxorps %xmm0, %xmm0, %xmm0
vcmpltss %xmm4, %xmm0, %xmm0
vmovss (%rax), %xmm1
vbroadcastss 0x18a9f(%rip), %xmm2 # 0x414d18
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmulss %xmm4, %xmm0, %xmm0
jmp 0x3fc2cb
movq 0xf8(%rbx,%rax), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm2
vbroadcastss 0x1adb5(%rip), %xmm0 # 0x417054
vxorps %xmm0, %xmm2, %xmm0
vdivss %xmm1, %xmm0, %xmm3
vxorps %xmm0, %xmm0, %xmm0
vucomiss %xmm3, %xmm4
jb 0x3fc2cb
vmovss 0x18a5f(%rip), %xmm0 # 0x414d18
vdivss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm3, %xmm0
vucomiss %xmm0, %xmm4
jbe 0x3fc2e2
vmovaps %xmm4, %xmm0
vmovss %xmm0, (%rbp,%r14,4)
incq %r14
cmpq 0x30(%rsp), %r14
jne 0x3fc0f1
jmp 0x3fc2ec
vmulss %xmm4, %xmm1, %xmm0
vaddss %xmm2, %xmm0, %xmm0
jmp 0x3fc27f
incq %r15
cmpq 0x50(%rsp), %r15
jne 0x3fc0af
xorl %ebp, %ebp
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x3fc338
lock
decl (%rax)
jne 0x3fc338
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
je 0x3fc328
movq (%rdi), %rax
vzeroupper
callq *0x18(%rax)
jmp 0x3fc338
testq %rsi, %rsi
je 0x3fc338
movq %rsi, %rdi
vzeroupper
callq 0x563b0
movl %ebp, %eax
addq $0xe8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
jmp 0x3fc394
jmp 0x3fc353
movq %rax, %rbx
movq 0x78(%rsp), %rax
testq %rax, %rax
je 0x3fc38c
lock
decl (%rax)
jne 0x3fc38c
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdi
testq %rdi, %rdi
jne 0x3fc386
testq %rsi, %rsi
je 0x3fc38c
movq %rsi, %rdi
callq 0x563b0
jmp 0x3fc38c
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/build_O3/src/layer/x86/convolution1d_x86_avx.cpp
|
ncnn::DeconvolutionDepthWise3D::load_param(ncnn::ParamDict const&)
|
int DeconvolutionDepthWise3D::load_param(const ParamDict& pd)
{
num_output = pd.get(0, 0);
kernel_w = pd.get(1, 0);
kernel_h = pd.get(11, kernel_w);
kernel_d = pd.get(21, kernel_w);
dilation_w = pd.get(2, 1);
dilation_h = pd.get(12, dilation_w);
dilation_d = pd.get(22, dilation_w);
stride_w = pd.get(3, 1);
stride_h = pd.get(13, stride_w);
stride_d = pd.get(23, stride_w);
pad_left = pd.get(4, 0);
pad_right = pd.get(15, pad_left);
pad_top = pd.get(14, pad_left);
pad_bottom = pd.get(16, pad_top);
pad_front = pd.get(24, pad_left);
pad_behind = pd.get(17, pad_front);
output_pad_right = pd.get(18, 0);
output_pad_bottom = pd.get(19, output_pad_right);
output_pad_behind = pd.get(20, output_pad_right);
output_w = pd.get(25, 0);
output_h = pd.get(26, output_w);
output_d = pd.get(27, output_w);
bias_term = pd.get(5, 0);
weight_data_size = pd.get(6, 0);
group = pd.get(7, 1);
activation_type = pd.get(9, 0);
activation_params = pd.get(10, Mat());
return 0;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0xa0, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0xd0(%rbx)
movq %r14, %rdi
movl $0x1, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0xd4(%rbx)
movq %r14, %rdi
movl $0xb, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xd8(%rbx)
movl 0xd4(%rbx), %edx
movq %r14, %rdi
movl $0x15, %esi
callq 0x69336
movl %eax, 0xdc(%rbx)
movq %r14, %rdi
movl $0x2, %esi
movl $0x1, %edx
callq 0x69336
movl %eax, 0xe0(%rbx)
movq %r14, %rdi
movl $0xc, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xe4(%rbx)
movl 0xe0(%rbx), %edx
movq %r14, %rdi
movl $0x16, %esi
callq 0x69336
movl %eax, 0xe8(%rbx)
movq %r14, %rdi
movl $0x3, %esi
movl $0x1, %edx
callq 0x69336
movl %eax, 0xec(%rbx)
movq %r14, %rdi
movl $0xd, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xf0(%rbx)
movl 0xec(%rbx), %edx
movq %r14, %rdi
movl $0x17, %esi
callq 0x69336
movl %eax, 0xf4(%rbx)
movq %r14, %rdi
movl $0x4, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0xf8(%rbx)
movq %r14, %rdi
movl $0xf, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xfc(%rbx)
movl 0xf8(%rbx), %edx
movq %r14, %rdi
movl $0xe, %esi
callq 0x69336
movl %eax, 0x100(%rbx)
movq %r14, %rdi
movl $0x10, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0x104(%rbx)
movl 0xf8(%rbx), %edx
movq %r14, %rdi
movl $0x18, %esi
callq 0x69336
movl %eax, 0x108(%rbx)
movq %r14, %rdi
movl $0x11, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0x10c(%rbx)
movq %r14, %rdi
movl $0x12, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0x110(%rbx)
movq %r14, %rdi
movl $0x13, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0x114(%rbx)
movl 0x110(%rbx), %edx
movq %r14, %rdi
movl $0x14, %esi
callq 0x69336
movl %eax, 0x118(%rbx)
movq %r14, %rdi
movl $0x19, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0x11c(%rbx)
movq %r14, %rdi
movl $0x1a, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0x120(%rbx)
movl 0x11c(%rbx), %edx
movq %r14, %rdi
movl $0x1b, %esi
callq 0x69336
movl %eax, 0x124(%rbx)
movq %r14, %rdi
movl $0x5, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0x128(%rbx)
movq %r14, %rdi
movl $0x6, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0x12c(%rbx)
movq %r14, %rdi
movl $0x7, %esi
movl $0x1, %edx
callq 0x69336
movl %eax, 0x130(%rbx)
movq %r14, %rdi
movl $0x9, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0x134(%rbx)
leaq 0x50(%rsp), %rcx
movq $0x0, 0x40(%rcx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
movq %r15, %rdi
movq %r14, %rsi
movl $0xa, %edx
callq 0x69372
leaq 0x138(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x40a3b1
testq %rax, %rax
je 0x40a31d
lock
incl (%rax)
movq 0x140(%rbx), %rax
testq %rax, %rax
je 0x40a356
lock
decl (%rax)
jne 0x40a356
movq 0x138(%rbx), %rsi
movq 0x158(%rbx), %rdi
testq %rdi, %rdi
je 0x40a349
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x40a356
testq %rsi, %rsi
je 0x40a356
movq %rsi, %rdi
callq 0x563b0
movq (%rsp), %rax
movq %rax, 0x138(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0x140(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0x148(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0x150(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x158(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x160(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x170(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x178(%rbx)
testq %rax, %rax
je 0x40a3de
lock
decl (%rax)
jne 0x40a3de
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x40a3d1
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x40a3de
testq %rsi, %rsi
je 0x40a3de
movq %rsi, %rdi
callq 0x563b0
movq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
movl $0x0, 0x38(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x40a433
lock
decl (%rax)
jne 0x40a433
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x40a426
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x40a433
testq %rsi, %rsi
je 0x40a433
movq %rsi, %rdi
callq 0x563b0
xorl %eax, %eax
addq $0xa0, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x40a477
lock
decl (%rax)
jne 0x40a477
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x40a471
testq %rsi, %rsi
je 0x40a477
movq %rsi, %rdi
callq 0x563b0
jmp 0x40a477
movq (%rdi), %rax
callq *0x18(%rax)
movq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
movl $0x0, 0x38(%rsp)
jmp 0x40a4a4
jmp 0x40a4df
jmp 0x40a4df
jmp 0x40a4df
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x40a4d7
lock
decl (%rax)
jne 0x40a4d7
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x40a4d1
testq %rsi, %rsi
je 0x40a4d7
movq %rsi, %rdi
callq 0x563b0
jmp 0x40a4d7
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
nop
|
/ysh329[P]ncnn/src/layer/deconvolutiondepthwise3d.cpp
|
ncnn::sum_dim(std::vector<int, std::allocator<int>> const&, int, std::vector<ncnn::Mat, std::allocator<ncnn::Mat>> const&, std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>> const&, std::vector<int, std::allocator<int>>&)
|
static float sum_dim(const std::vector<int>& dim_sizes, int d, const std::vector<Mat>& bottom_blobs, const std::vector<std::string>& tokens, std::vector<int>& indexes)
{
if (d == (int)dim_sizes.size())
{
float v = 1.f;
for (size_t b = 0; b < bottom_blobs.size(); b++)
{
v *= get_indexed_value(bottom_blobs[b], tokens[b], indexes);
}
return v;
}
float sum = 0.f;
for (int i = 0; i < dim_sizes[d]; i++)
{
indexes[d] = i;
sum += sum_dim(dim_sizes, d + 1, bottom_blobs, tokens, indexes);
}
return sum;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %r8, %rbx
movq %rcx, %r15
movq %rdx, %rbp
movq %rdi, %r12
movq (%rdi), %rax
movq 0x8(%rdi), %rdi
subq %rax, %rdi
shrq $0x2, %rdi
movl %esi, (%rsp)
cmpl %esi, %edi
jne 0x40ca6c
movq (%rbp), %rax
movq 0x8(%rbp), %rdx
subq %rax, %rdx
je 0x40caba
sarq $0x3, %rdx
movabsq $-0x71c71c71c71c71c7, %r12 # imm = 0x8E38E38E38E38E39
imulq %rdx, %r12
movq (%r15), %rdx
movq (%rbx), %rsi
cmpq $0x1, %r12
adcq $0x0, %r12
movss 0x83ec(%rip), %xmm1 # 0x414d18
leaq 0x1757d(%rip), %rdi # 0x423eb0
movl 0x28(%rax), %r9d
decl %r9d
cmpl $0x3, %r9d
ja 0x40ca67
movq (%rdx), %r8
movslq (%rdi,%r9,4), %r9
addq %rdi, %r9
jmpq *%r9
movsbq (%r8), %r8
movslq -0x1a4(%rsi,%r8,4), %r8
shlq $0x2, %r8
addq (%rax), %r8
jmp 0x40ca4b
movsbq (%r8), %r9
movslq -0x1a4(%rsi,%r9,4), %r9
movsbq 0x1(%r8), %r10
movslq -0x1a4(%rsi,%r10,4), %r10
movsbq 0x2(%r8), %r8
movslq -0x1a4(%rsi,%r8,4), %r8
movslq 0x2c(%rax), %r11
imulq 0x40(%rax), %r9
movq 0x10(%rax), %rbx
imulq %rbx, %r9
addq (%rax), %r9
imulq %rbx, %r11
imulq %r10, %r11
addq %r9, %r11
leaq (%r11,%r8,4), %r8
jmp 0x40ca4b
movsbq (%r8), %r9
movslq -0x1a4(%rsi,%r9,4), %r9
movsbq 0x1(%r8), %r10
movslq -0x1a4(%rsi,%r10,4), %r10
movsbq 0x2(%r8), %r11
movslq -0x1a4(%rsi,%r11,4), %r11
movsbq 0x3(%r8), %r8
movslq -0x1a4(%rsi,%r8,4), %r8
movslq 0x2c(%rax), %rbx
imulq 0x40(%rax), %r9
movq 0x10(%rax), %r14
imulq %r14, %r9
addq (%rax), %r9
movslq 0x30(%rax), %r15
imulq %r14, %rbx
imulq %rbx, %r15
imulq %r10, %r15
imulq %r11, %rbx
addq %r15, %rbx
addq %r9, %rbx
leaq (%rbx,%r8,4), %r8
jmp 0x40ca4b
movsbq (%r8), %r9
movslq -0x1a4(%rsi,%r9,4), %r9
movsbq 0x1(%r8), %r8
movslq 0x2c(%rax), %r10
imulq %r9, %r10
imulq 0x10(%rax), %r10
movslq -0x1a4(%rsi,%r8,4), %r8
addq (%rax), %r10
leaq (%r10,%r8,4), %r8
movss (%r8), %xmm0
mulss %xmm0, %xmm1
addq $0x48, %rax
addq $0x20, %rdx
decq %r12
jne 0x40c933
jmp 0x40cac7
xorps %xmm0, %xmm0
jmp 0x40ca50
movslq (%rsp), %r13
cmpl $0x0, (%rax,%r13,4)
jle 0x40cac4
incl (%rsp)
xorps %xmm1, %xmm1
xorl %r14d, %r14d
movss %xmm1, 0x4(%rsp)
movq (%rbx), %rax
movl %r14d, (%rax,%r13,4)
movq %r12, %rdi
movl (%rsp), %esi
movq %rbp, %rdx
movq %r15, %rcx
movq %rbx, %r8
callq 0x40c8c3
movss 0x4(%rsp), %xmm1
addss %xmm0, %xmm1
incl %r14d
movq (%r12), %rax
cmpl (%rax,%r13,4), %r14d
jl 0x40ca80
jmp 0x40cac7
movss 0x8256(%rip), %xmm1 # 0x414d18
jmp 0x40cac7
xorps %xmm1, %xmm1
movaps %xmm1, %xmm0
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/ysh329[P]ncnn/src/layer/einsum.cpp
|
ncnn::DeformableConv2D::load_param(ncnn::ParamDict const&)
|
int DeformableConv2D::load_param(const ParamDict& pd)
{
num_output = pd.get(0, 0);
kernel_w = pd.get(1, 0);
kernel_h = pd.get(11, kernel_w);
dilation_w = pd.get(2, 1);
dilation_h = pd.get(12, dilation_w);
stride_w = pd.get(3, 1);
stride_h = pd.get(13, stride_w);
pad_left = pd.get(4, 0);
pad_right = pd.get(15, pad_left);
pad_top = pd.get(14, pad_left);
pad_bottom = pd.get(16, pad_top);
bias_term = pd.get(5, 0);
weight_data_size = pd.get(6, 0);
activation_type = pd.get(9, 0);
activation_params = pd.get(10, Mat());
return 0;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0xa0, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq %rsi, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0xd0(%rbx)
movq %r14, %rdi
movl $0x1, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0xd4(%rbx)
movq %r14, %rdi
movl $0xb, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xd8(%rbx)
movq %r14, %rdi
movl $0x2, %esi
movl $0x1, %edx
callq 0x69336
movl %eax, 0xdc(%rbx)
movq %r14, %rdi
movl $0xc, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xe0(%rbx)
movq %r14, %rdi
movl $0x3, %esi
movl $0x1, %edx
callq 0x69336
movl %eax, 0xe4(%rbx)
movq %r14, %rdi
movl $0xd, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xe8(%rbx)
movq %r14, %rdi
movl $0x4, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0xec(%rbx)
movq %r14, %rdi
movl $0xf, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xf0(%rbx)
movl 0xec(%rbx), %edx
movq %r14, %rdi
movl $0xe, %esi
callq 0x69336
movl %eax, 0xf4(%rbx)
movq %r14, %rdi
movl $0x10, %esi
movl %eax, %edx
callq 0x69336
movl %eax, 0xf8(%rbx)
movq %r14, %rdi
movl $0x5, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0xfc(%rbx)
movq %r14, %rdi
movl $0x6, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0x100(%rbx)
movq %r14, %rdi
movl $0x9, %esi
xorl %edx, %edx
callq 0x69336
movl %eax, 0x104(%rbx)
leaq 0x50(%rsp), %rcx
movq $0x0, 0x40(%rcx)
xorps %xmm0, %xmm0
movaps %xmm0, (%rcx)
movups %xmm0, 0xc(%rcx)
movaps %xmm0, 0x20(%rcx)
movups %xmm0, 0x2c(%rcx)
movq %rsp, %r15
movq %r15, %rdi
movq %r14, %rsi
movl $0xa, %edx
callq 0x69372
leaq 0x108(%rbx), %rcx
movq 0x8(%rsp), %rax
cmpq %r15, %rcx
je 0x40d08a
testq %rax, %rax
je 0x40cff6
lock
incl (%rax)
movq 0x110(%rbx), %rax
testq %rax, %rax
je 0x40d02f
lock
decl (%rax)
jne 0x40d02f
movq 0x108(%rbx), %rsi
movq 0x128(%rbx), %rdi
testq %rdi, %rdi
je 0x40d022
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x40d02f
testq %rsi, %rsi
je 0x40d02f
movq %rsi, %rdi
callq 0x563b0
movq (%rsp), %rax
movq %rax, 0x108(%rbx)
movq 0x8(%rsp), %rax
movq %rax, 0x110(%rbx)
movq 0x10(%rsp), %rcx
movq %rcx, 0x118(%rbx)
movl 0x18(%rsp), %ecx
movl %ecx, 0x120(%rbx)
movq 0x20(%rsp), %rcx
movq %rcx, 0x128(%rbx)
movups 0x28(%rsp), %xmm0
movups %xmm0, 0x130(%rbx)
movl 0x38(%rsp), %ecx
movl %ecx, 0x140(%rbx)
movq 0x40(%rsp), %rcx
movq %rcx, 0x148(%rbx)
testq %rax, %rax
je 0x40d0b7
lock
decl (%rax)
jne 0x40d0b7
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
je 0x40d0aa
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x40d0b7
testq %rsi, %rsi
je 0x40d0b7
movq %rsi, %rdi
callq 0x563b0
movq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
movl $0x0, 0x38(%rsp)
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x40d10c
lock
decl (%rax)
jne 0x40d10c
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x40d0ff
movq (%rdi), %rax
callq *0x18(%rax)
jmp 0x40d10c
testq %rsi, %rsi
je 0x40d10c
movq %rsi, %rdi
callq 0x563b0
xorl %eax, %eax
addq $0xa0, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %rbx
movq 0x8(%rsp), %rax
testq %rax, %rax
je 0x40d150
lock
decl (%rax)
jne 0x40d150
movq (%rsp), %rsi
movq 0x20(%rsp), %rdi
testq %rdi, %rdi
jne 0x40d14a
testq %rsi, %rsi
je 0x40d150
movq %rsi, %rdi
callq 0x563b0
jmp 0x40d150
movq (%rdi), %rax
callq *0x18(%rax)
movq $0x0, 0x40(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movups %xmm0, 0xc(%rsp)
movups %xmm0, 0x28(%rsp)
movl $0x0, 0x38(%rsp)
jmp 0x40d17d
jmp 0x40d1b8
jmp 0x40d1b8
jmp 0x40d1b8
movq %rax, %rbx
movq 0x58(%rsp), %rax
testq %rax, %rax
je 0x40d1b0
lock
decl (%rax)
jne 0x40d1b0
movq 0x50(%rsp), %rsi
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
jne 0x40d1aa
testq %rsi, %rsi
je 0x40d1b0
movq %rsi, %rdi
callq 0x563b0
jmp 0x40d1b0
movq (%rdi), %rax
callq *0x18(%rax)
movq %rbx, %rdi
callq 0x56310
movq %rax, %rdi
callq 0x598e3
|
/ysh329[P]ncnn/src/layer/deformableconv2d.cpp
|
cJSON_strdup
|
static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks)
{
size_t length = 0;
unsigned char *copy = NULL;
if (string == NULL)
{
return NULL;
}
length = strlen((const char*)string) + sizeof("");
copy = (unsigned char*)hooks->allocate(length);
if (copy == NULL)
{
return NULL;
}
memcpy(copy, string, length);
return copy;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq $0x0, -0x20(%rbp)
movq $0x0, -0x28(%rbp)
cmpq $0x0, -0x10(%rbp)
jne 0x3721
movq $0x0, -0x8(%rbp)
jmp 0x376d
movq -0x10(%rbp), %rdi
callq 0x30a0
addq $0x1, %rax
movq %rax, -0x20(%rbp)
movq -0x18(%rbp), %rax
movq (%rax), %rax
movq -0x20(%rbp), %rdi
callq *%rax
movq %rax, -0x28(%rbp)
cmpq $0x0, -0x28(%rbp)
jne 0x3754
movq $0x0, -0x8(%rbp)
jmp 0x376d
movq -0x28(%rbp), %rdi
movq -0x10(%rbp), %rsi
movq -0x20(%rbp), %rdx
callq 0x3100
movq -0x28(%rbp), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x30, %rsp
popq %rbp
retq
nopw (%rax,%rax)
|
/dparo[P]master-thesis/deps/cJSON/tests/../cJSON.c
|
get_object_item
|
static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive)
{
cJSON *current_element = NULL;
if ((object == NULL) || (name == NULL))
{
return NULL;
}
current_element = object->child;
if (case_sensitive)
{
while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0))
{
current_element = current_element->next;
}
}
else
{
while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0))
{
current_element = current_element->next;
}
}
if ((current_element == NULL) || (current_element->string == NULL)) {
return NULL;
}
return current_element;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movl %edx, -0x1c(%rbp)
movq $0x0, -0x28(%rbp)
cmpq $0x0, -0x10(%rbp)
je 0x46c9
cmpq $0x0, -0x18(%rbp)
jne 0x46d6
movq $0x0, -0x8(%rbp)
jmp 0x479c
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq %rax, -0x28(%rbp)
cmpl $0x0, -0x1c(%rbp)
je 0x4738
jmp 0x46ea
xorl %eax, %eax
cmpq $0x0, -0x28(%rbp)
movb %al, -0x29(%rbp)
je 0x4720
movq -0x28(%rbp), %rcx
xorl %eax, %eax
cmpq $0x0, 0x38(%rcx)
movb %al, -0x29(%rbp)
je 0x4720
movq -0x18(%rbp), %rdi
movq -0x28(%rbp), %rax
movq 0x38(%rax), %rsi
callq 0x30e0
cmpl $0x0, %eax
setne %al
movb %al, -0x29(%rbp)
movb -0x29(%rbp), %al
testb $0x1, %al
jne 0x4729
jmp 0x4736
movq -0x28(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x28(%rbp)
jmp 0x46ea
jmp 0x4778
jmp 0x473a
xorl %eax, %eax
cmpq $0x0, -0x28(%rbp)
movb %al, -0x2a(%rbp)
je 0x4760
movq -0x18(%rbp), %rdi
movq -0x28(%rbp), %rax
movq 0x38(%rax), %rsi
callq 0x8bf0
cmpl $0x0, %eax
setne %al
movb %al, -0x2a(%rbp)
movb -0x2a(%rbp), %al
testb $0x1, %al
jne 0x4769
jmp 0x4776
movq -0x28(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x28(%rbp)
jmp 0x473a
jmp 0x4778
cmpq $0x0, -0x28(%rbp)
je 0x478a
movq -0x28(%rbp), %rax
cmpq $0x0, 0x38(%rax)
jne 0x4794
movq $0x0, -0x8(%rbp)
jmp 0x479c
movq -0x28(%rbp), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x30, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/dparo[P]master-thesis/deps/cJSON/tests/../cJSON.c
|
minify_string
|
static void minify_string(char **input, char **output) {
(*output)[0] = (*input)[0];
*input += static_strlen("\"");
*output += static_strlen("\"");
for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) {
(*output)[0] = (*input)[0];
if ((*input)[0] == '\"') {
(*output)[0] = '\"';
*input += static_strlen("\"");
*output += static_strlen("\"");
return;
} else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) {
(*output)[1] = (*input)[1];
*input += static_strlen("\"");
*output += static_strlen("\"");
}
}
}
|
pushq %rbp
movq %rsp, %rbp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq -0x8(%rbp), %rax
movq (%rax), %rax
movb (%rax), %cl
movq -0x10(%rbp), %rax
movq (%rax), %rax
movb %cl, (%rax)
movq -0x8(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
movq -0x10(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
movq -0x8(%rbp), %rax
movq (%rax), %rax
movsbl (%rax), %eax
cmpl $0x0, %eax
je 0x633a
movq -0x8(%rbp), %rax
movq (%rax), %rax
movb (%rax), %cl
movq -0x10(%rbp), %rax
movq (%rax), %rax
movb %cl, (%rax)
movq -0x8(%rbp), %rax
movq (%rax), %rax
movsbl (%rax), %eax
cmpl $0x22, %eax
jne 0x62c6
movq -0x10(%rbp), %rax
movq (%rax), %rax
movb $0x22, (%rax)
movq -0x8(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
movq -0x10(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
jmp 0x633a
movq -0x8(%rbp), %rax
movq (%rax), %rax
movsbl (%rax), %eax
cmpl $0x5c, %eax
jne 0x6315
movq -0x8(%rbp), %rax
movq (%rax), %rax
movsbl 0x1(%rax), %eax
cmpl $0x22, %eax
jne 0x6315
movq -0x8(%rbp), %rax
movq (%rax), %rax
movb 0x1(%rax), %cl
movq -0x10(%rbp), %rax
movq (%rax), %rax
movb %cl, 0x1(%rax)
movq -0x8(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
movq -0x10(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
jmp 0x6317
jmp 0x6319
movq -0x8(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
movq -0x10(%rbp), %rax
movq (%rax), %rcx
addq $0x1, %rcx
movq %rcx, (%rax)
jmp 0x626a
popq %rbp
retq
nopl (%rax)
|
/dparo[P]master-thesis/deps/cJSON/tests/../cJSON.c
|
ensure
|
static unsigned char* ensure(printbuffer * const p, size_t needed)
{
unsigned char *newbuffer = NULL;
size_t newsize = 0;
if ((p == NULL) || (p->buffer == NULL))
{
return NULL;
}
if ((p->length > 0) && (p->offset >= p->length))
{
/* make sure that offset is valid */
return NULL;
}
if (needed > INT_MAX)
{
/* sizes bigger than INT_MAX are currently not supported */
return NULL;
}
needed += p->offset + 1;
if (needed <= p->length)
{
return p->buffer + p->offset;
}
if (p->noalloc) {
return NULL;
}
/* calculate new buffer size */
if (needed > (INT_MAX / 2))
{
/* overflow of int, use INT_MAX if possible */
if (needed <= INT_MAX)
{
newsize = INT_MAX;
}
else
{
return NULL;
}
}
else
{
newsize = needed * 2;
}
if (p->hooks.reallocate != NULL)
{
/* reallocate with realloc if available */
newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize);
if (newbuffer == NULL)
{
p->hooks.deallocate(p->buffer);
p->length = 0;
p->buffer = NULL;
return NULL;
}
}
else
{
/* otherwise reallocate manually */
newbuffer = (unsigned char*)p->hooks.allocate(newsize);
if (!newbuffer)
{
p->hooks.deallocate(p->buffer);
p->length = 0;
p->buffer = NULL;
return NULL;
}
memcpy(newbuffer, p->buffer, p->offset + 1);
p->hooks.deallocate(p->buffer);
}
p->length = newsize;
p->buffer = newbuffer;
return newbuffer + p->offset;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq $0x0, -0x20(%rbp)
movq $0x0, -0x28(%rbp)
cmpq $0x0, -0x10(%rbp)
je 0x7f11
movq -0x10(%rbp), %rax
cmpq $0x0, (%rax)
jne 0x7f1e
movq $0x0, -0x8(%rbp)
jmp 0x80e9
movq -0x10(%rbp), %rax
cmpq $0x0, 0x8(%rax)
jbe 0x7f48
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq -0x10(%rbp), %rcx
cmpq 0x8(%rcx), %rax
jb 0x7f48
movq $0x0, -0x8(%rbp)
jmp 0x80e9
cmpq $0x7fffffff, -0x18(%rbp) # imm = 0x7FFFFFFF
jbe 0x7f5f
movq $0x0, -0x8(%rbp)
jmp 0x80e9
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
addq $0x1, %rax
addq -0x18(%rbp), %rax
movq %rax, -0x18(%rbp)
movq -0x18(%rbp), %rax
movq -0x10(%rbp), %rcx
cmpq 0x8(%rcx), %rax
ja 0x7f99
movq -0x10(%rbp), %rax
movq (%rax), %rax
movq -0x10(%rbp), %rcx
addq 0x10(%rcx), %rax
movq %rax, -0x8(%rbp)
jmp 0x80e9
movq -0x10(%rbp), %rax
cmpl $0x0, 0x20(%rax)
je 0x7fb0
movq $0x0, -0x8(%rbp)
jmp 0x80e9
cmpq $0x3fffffff, -0x18(%rbp) # imm = 0x3FFFFFFF
jbe 0x7fdd
cmpq $0x7fffffff, -0x18(%rbp) # imm = 0x7FFFFFFF
ja 0x7fce
movq $0x7fffffff, -0x28(%rbp) # imm = 0x7FFFFFFF
jmp 0x7fdb
movq $0x0, -0x8(%rbp)
jmp 0x80e9
jmp 0x7fe8
movq -0x18(%rbp), %rax
shlq %rax
movq %rax, -0x28(%rbp)
movq -0x10(%rbp), %rax
cmpq $0x0, 0x38(%rax)
je 0x804a
movq -0x10(%rbp), %rax
movq 0x38(%rax), %rax
movq -0x10(%rbp), %rcx
movq (%rcx), %rdi
movq -0x28(%rbp), %rsi
callq *%rax
movq %rax, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
jne 0x8048
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rax
movq -0x10(%rbp), %rcx
movq (%rcx), %rdi
callq *%rax
movq -0x10(%rbp), %rax
movq $0x0, 0x8(%rax)
movq -0x10(%rbp), %rax
movq $0x0, (%rax)
movq $0x0, -0x8(%rbp)
jmp 0x80e9
jmp 0x80c2
movq -0x10(%rbp), %rax
movq 0x28(%rax), %rax
movq -0x28(%rbp), %rdi
callq *%rax
movq %rax, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
jne 0x8095
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rax
movq -0x10(%rbp), %rcx
movq (%rcx), %rdi
callq *%rax
movq -0x10(%rbp), %rax
movq $0x0, 0x8(%rax)
movq -0x10(%rbp), %rax
movq $0x0, (%rax)
movq $0x0, -0x8(%rbp)
jmp 0x80e9
movq -0x20(%rbp), %rdi
movq -0x10(%rbp), %rax
movq (%rax), %rsi
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rdx
addq $0x1, %rdx
callq 0x3100
movq -0x10(%rbp), %rax
movq 0x30(%rax), %rax
movq -0x10(%rbp), %rcx
movq (%rcx), %rdi
callq *%rax
movq -0x28(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, 0x8(%rax)
movq -0x20(%rbp), %rcx
movq -0x10(%rbp), %rax
movq %rcx, (%rax)
movq -0x20(%rbp), %rax
movq -0x10(%rbp), %rcx
addq 0x10(%rcx), %rax
movq %rax, -0x8(%rbp)
movq -0x8(%rbp), %rax
addq $0x30, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/dparo[P]master-thesis/deps/cJSON/tests/../cJSON.c
|
print_array
|
static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer)
{
unsigned char *output_pointer = NULL;
size_t length = 0;
cJSON *current_element = item->child;
if (output_buffer == NULL)
{
return false;
}
/* Compose the output array. */
/* opening square bracket */
output_pointer = ensure(output_buffer, 1);
if (output_pointer == NULL)
{
return false;
}
*output_pointer = '[';
output_buffer->offset++;
output_buffer->depth++;
while (current_element != NULL)
{
if (!print_value(current_element, output_buffer))
{
return false;
}
update_offset(output_buffer);
if (current_element->next)
{
length = (size_t) (output_buffer->format ? 2 : 1);
output_pointer = ensure(output_buffer, length + 1);
if (output_pointer == NULL)
{
return false;
}
*output_pointer++ = ',';
if(output_buffer->format)
{
*output_pointer++ = ' ';
}
*output_pointer = '\0';
output_buffer->offset += length;
}
current_element = current_element->next;
}
output_pointer = ensure(output_buffer, 2);
if (output_pointer == NULL)
{
return false;
}
*output_pointer++ = ']';
*output_pointer = '\0';
output_buffer->depth--;
return true;
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq $0x0, -0x20(%rbp)
movq $0x0, -0x28(%rbp)
movq -0x10(%rbp), %rax
movq 0x10(%rax), %rax
movq %rax, -0x30(%rbp)
cmpq $0x0, -0x18(%rbp)
jne 0x839f
movl $0x0, -0x4(%rbp)
jmp 0x8517
movq -0x18(%rbp), %rdi
movl $0x1, %esi
callq 0x7ee0
movq %rax, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
jne 0x83c4
movl $0x0, -0x4(%rbp)
jmp 0x8517
movq -0x20(%rbp), %rax
movb $0x5b, (%rax)
movq -0x18(%rbp), %rax
movq 0x10(%rax), %rcx
addq $0x1, %rcx
movq %rcx, 0x10(%rax)
movq -0x18(%rbp), %rax
movq 0x18(%rax), %rcx
addq $0x1, %rcx
movq %rcx, 0x18(%rax)
cmpq $0x0, -0x30(%rbp)
je 0x84c5
movq -0x30(%rbp), %rdi
movq -0x18(%rbp), %rsi
callq 0x4240
cmpl $0x0, %eax
jne 0x8414
movl $0x0, -0x4(%rbp)
jmp 0x8517
movq -0x18(%rbp), %rdi
callq 0x6f60
movq -0x30(%rbp), %rax
cmpq $0x0, (%rax)
je 0x84b5
movq -0x18(%rbp), %rax
movl 0x24(%rax), %edx
movl $0x1, %eax
movl $0x2, %ecx
cmpl $0x0, %edx
cmovnel %ecx, %eax
cltq
movq %rax, -0x28(%rbp)
movq -0x18(%rbp), %rdi
movq -0x28(%rbp), %rsi
addq $0x1, %rsi
callq 0x7ee0
movq %rax, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
jne 0x8470
movl $0x0, -0x4(%rbp)
jmp 0x8517
movq -0x20(%rbp), %rax
movq %rax, %rcx
addq $0x1, %rcx
movq %rcx, -0x20(%rbp)
movb $0x2c, (%rax)
movq -0x18(%rbp), %rax
cmpl $0x0, 0x24(%rax)
je 0x849e
movq -0x20(%rbp), %rax
movq %rax, %rcx
addq $0x1, %rcx
movq %rcx, -0x20(%rbp)
movb $0x20, (%rax)
movq -0x20(%rbp), %rax
movb $0x0, (%rax)
movq -0x28(%rbp), %rcx
movq -0x18(%rbp), %rax
addq 0x10(%rax), %rcx
movq %rcx, 0x10(%rax)
movq -0x30(%rbp), %rax
movq (%rax), %rax
movq %rax, -0x30(%rbp)
jmp 0x83eb
movq -0x18(%rbp), %rdi
movl $0x2, %esi
callq 0x7ee0
movq %rax, -0x20(%rbp)
cmpq $0x0, -0x20(%rbp)
jne 0x84e7
movl $0x0, -0x4(%rbp)
jmp 0x8517
movq -0x20(%rbp), %rax
movq %rax, %rcx
addq $0x1, %rcx
movq %rcx, -0x20(%rbp)
movb $0x5d, (%rax)
movq -0x20(%rbp), %rax
movb $0x0, (%rax)
movq -0x18(%rbp), %rax
movq 0x18(%rax), %rcx
addq $-0x1, %rcx
movq %rcx, 0x18(%rax)
movl $0x1, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x30, %rsp
popq %rbp
retq
|
/dparo[P]master-thesis/deps/cJSON/tests/../cJSON.c
|
UnityAssertEqualNumber
|
void UnityAssertEqualNumber(const UNITY_INT expected,
const UNITY_INT actual,
const char* msg,
const UNITY_LINE_TYPE lineNumber,
const UNITY_DISPLAY_STYLE_T style)
{
RETURN_IF_FAIL_OR_IGNORE;
if (expected != actual)
{
UnityTestResultsFailBegin(lineNumber);
UnityPrint(UnityStrExpected);
UnityPrintNumberByStyle(expected, style);
UnityPrint(UnityStrWas);
UnityPrintNumberByStyle(actual, style);
UnityAddMsgIfSpecified(msg);
UNITY_FAIL_AND_BAIL;
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movq %rdx, -0x18(%rbp)
movq %rcx, -0x20(%rbp)
movl %r8d, -0x24(%rbp)
cmpq $0x0, 0x69a4(%rip) # 0x10148
jne 0x97b0
cmpq $0x0, 0x69a2(%rip) # 0x10150
je 0x97b2
jmp 0x981e
movq -0x8(%rbp), %rax
cmpq -0x10(%rbp), %rax
je 0x981e
movq -0x20(%rbp), %rdi
callq 0x96c0
leaq 0x2c77(%rip), %rdi # 0xc443
callq 0x8d00
movq -0x8(%rbp), %rdi
movl -0x24(%rbp), %esi
callq 0x8f70
leaq 0x2c6a(%rip), %rdi # 0xc44e
callq 0x8d00
movq -0x10(%rbp), %rdi
movl -0x24(%rbp), %esi
callq 0x8f70
movq -0x18(%rbp), %rdi
callq 0x9700
movq $0x1, 0x693f(%rip) # 0x10148
leaq 0x68f8(%rip), %rdi # 0x10108
addq $0x50, %rdi
movl $0x1, %esi
callq 0x3140
addq $0x30, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/dparo[P]master-thesis/deps/cJSON/tests/unity/src/unity.c
|
UnityIsOneArrayNull
|
static int UnityIsOneArrayNull(UNITY_INTERNAL_PTR expected,
UNITY_INTERNAL_PTR actual,
const UNITY_LINE_TYPE lineNumber,
const char* msg)
{
if (expected == actual) return 0; /* Both are NULL or same pointer */
/* print and return true if just expected is NULL */
if (expected == NULL)
{
UnityTestResultsFailBegin(lineNumber);
UnityPrint(UnityStrNullPointerForExpected);
UnityAddMsgIfSpecified(msg);
return 1;
}
/* print and return true if just actual is NULL */
if (actual == NULL)
{
UnityTestResultsFailBegin(lineNumber);
UnityPrint(UnityStrNullPointerForActual);
UnityAddMsgIfSpecified(msg);
return 1;
}
return 0; /* return false if neither is NULL */
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x30, %rsp
movq %rdi, -0x10(%rbp)
movq %rsi, -0x18(%rbp)
movq %rdx, -0x20(%rbp)
movq %rcx, -0x28(%rbp)
movq -0x10(%rbp), %rax
cmpq -0x18(%rbp), %rax
jne 0x9c8b
movl $0x0, -0x4(%rbp)
jmp 0x9cee
cmpq $0x0, -0x10(%rbp)
jne 0x9cb9
movq -0x20(%rbp), %rdi
callq 0x96c0
leaq 0x293e(%rip), %rdi # 0xc5e0
callq 0x8d00
movq -0x28(%rbp), %rdi
callq 0x9700
movl $0x1, -0x4(%rbp)
jmp 0x9cee
cmpq $0x0, -0x18(%rbp)
jne 0x9ce7
movq -0x20(%rbp), %rdi
callq 0x96c0
leaq 0x2930(%rip), %rdi # 0xc600
callq 0x8d00
movq -0x28(%rbp), %rdi
callq 0x9700
movl $0x1, -0x4(%rbp)
jmp 0x9cee
movl $0x0, -0x4(%rbp)
movl -0x4(%rbp), %eax
addq $0x30, %rsp
popq %rbp
retq
nopw (%rax,%rax)
|
/dparo[P]master-thesis/deps/cJSON/tests/unity/src/unity.c
|
UnityAssertEqualFloatArray
|
void UnityAssertEqualFloatArray(UNITY_PTR_ATTRIBUTE const UNITY_FLOAT* expected,
UNITY_PTR_ATTRIBUTE const UNITY_FLOAT* actual,
const UNITY_UINT32 num_elements,
const char* msg,
const UNITY_LINE_TYPE lineNumber,
const UNITY_FLAGS_T flags)
{
UNITY_UINT32 elements = num_elements;
UNITY_PTR_ATTRIBUTE const UNITY_FLOAT* ptr_expected = expected;
UNITY_PTR_ATTRIBUTE const UNITY_FLOAT* ptr_actual = actual;
RETURN_IF_FAIL_OR_IGNORE;
if (elements == 0)
{
UnityPrintPointlessAndBail();
}
if (expected == actual) return; /* Both are NULL or same pointer */
if (UnityIsOneArrayNull((UNITY_INTERNAL_PTR)expected, (UNITY_INTERNAL_PTR)actual, lineNumber, msg))
UNITY_FAIL_AND_BAIL;
while (elements--)
{
if (!UnityFloatsWithin(*ptr_expected * UNITY_FLOAT_PRECISION, *ptr_expected, *ptr_actual))
{
UnityTestResultsFailBegin(lineNumber);
UnityPrint(UnityStrElement);
UnityPrintNumberUnsigned(num_elements - elements - 1);
UNITY_PRINT_EXPECTED_AND_ACTUAL_FLOAT((UNITY_DOUBLE)*ptr_expected, (UNITY_DOUBLE)*ptr_actual);
UnityAddMsgIfSpecified(msg);
UNITY_FAIL_AND_BAIL;
}
if (flags == UNITY_ARRAY_TO_ARRAY)
{
ptr_expected++;
}
ptr_actual++;
}
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x40, %rsp
movq %rdi, -0x8(%rbp)
movq %rsi, -0x10(%rbp)
movl %edx, -0x14(%rbp)
movq %rcx, -0x20(%rbp)
movq %r8, -0x28(%rbp)
movl %r9d, -0x2c(%rbp)
movl -0x14(%rbp), %eax
movl %eax, -0x30(%rbp)
movq -0x8(%rbp), %rax
movq %rax, -0x38(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x40(%rbp)
cmpq $0x0, 0x640b(%rip) # 0x10148
jne 0x9d49
cmpq $0x0, 0x6409(%rip) # 0x10150
je 0x9d4e
jmp 0x9ecc
cmpl $0x0, -0x30(%rbp)
jne 0x9d92
movq -0x28(%rbp), %rdi
callq 0x96c0
leaq 0x273c(%rip), %rdi # 0xc4a0
callq 0x8d00
movq -0x20(%rbp), %rdi
callq 0x9700
movq $0x1, 0x63cb(%rip) # 0x10148
leaq 0x6384(%rip), %rdi # 0x10108
addq $0x50, %rdi
movl $0x1, %esi
callq 0x3140
movq -0x8(%rbp), %rax
cmpq -0x10(%rbp), %rax
jne 0x9da1
jmp 0x9ecc
movq -0x8(%rbp), %rdi
movq -0x10(%rbp), %rsi
movq -0x28(%rbp), %rdx
movq -0x20(%rbp), %rcx
callq 0x9c60
cmpl $0x0, %eax
je 0x9ddb
movq $0x1, 0x6382(%rip) # 0x10148
leaq 0x633b(%rip), %rdi # 0x10108
addq $0x50, %rdi
movl $0x1, %esi
callq 0x3140
jmp 0x9ddd
movl -0x30(%rbp), %eax
movl %eax, %ecx
addl $-0x1, %ecx
movl %ecx, -0x30(%rbp)
cmpl $0x0, %eax
je 0x9ecc
movq -0x38(%rbp), %rax
movss 0x2593(%rip), %xmm0 # 0xc390
mulss (%rax), %xmm0
movq -0x38(%rbp), %rax
movss (%rax), %xmm1
movq -0x40(%rbp), %rax
movss (%rax), %xmm2
callq 0x9ee0
cmpl $0x0, %eax
jne 0x9ea9
movq -0x28(%rbp), %rdi
callq 0x96c0
leaq 0x26a8(%rip), %rdi # 0xc4d7
callq 0x8d00
movl -0x14(%rbp), %eax
subl -0x30(%rbp), %eax
subl $0x1, %eax
movl %eax, %eax
movl %eax, %edi
callq 0x9020
leaq 0x25f6(%rip), %rdi # 0xc443
callq 0x8d00
movq -0x38(%rbp), %rax
movss (%rax), %xmm0
cvtss2sd %xmm0, %xmm0
callq 0x9120
leaq 0x25e4(%rip), %rdi # 0xc44e
callq 0x8d00
movq -0x40(%rbp), %rax
movss (%rax), %xmm0
cvtss2sd %xmm0, %xmm0
callq 0x9120
movq -0x20(%rbp), %rdi
callq 0x9700
movq $0x1, 0x62b4(%rip) # 0x10148
leaq 0x626d(%rip), %rdi # 0x10108
addq $0x50, %rdi
movl $0x1, %esi
callq 0x3140
cmpl $0x1, -0x2c(%rbp)
jne 0x9ebb
movq -0x38(%rbp), %rax
addq $0x4, %rax
movq %rax, -0x38(%rbp)
movq -0x40(%rbp), %rax
addq $0x4, %rax
movq %rax, -0x40(%rbp)
jmp 0x9ddd
addq $0x40, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
|
/dparo[P]master-thesis/deps/cJSON/tests/unity/src/unity.c
|
Catch::makeCommandLineParser(Catch::ConfigData&)
|
Clara::Parser makeCommandLineParser( ConfigData& config ) {
using namespace Clara;
auto const setWarning = [&]( std::string const& warning ) {
if ( warning == "NoAssertions" ) {
config.warnings = static_cast<WarnAbout::What>(config.warnings | WarnAbout::NoAssertions);
return ParserResult::ok( ParseResultType::Matched );
} else if ( warning == "UnmatchedTestSpec" ) {
config.warnings = static_cast<WarnAbout::What>(config.warnings | WarnAbout::UnmatchedTestSpec);
return ParserResult::ok( ParseResultType::Matched );
}
return ParserResult ::runtimeError(
"Unrecognised warning option: '" + warning + '\'' );
};
auto const loadTestNamesFromFile = [&]( std::string const& filename ) {
std::ifstream f( filename.c_str() );
if( !f.is_open() )
return ParserResult::runtimeError( "Unable to load input file: '" + filename + '\'' );
std::string line;
while( std::getline( f, line ) ) {
line = trim(line);
if( !line.empty() && !startsWith( line, '#' ) ) {
if( !startsWith( line, '"' ) )
line = '"' + line + '"';
config.testsOrTags.push_back( line );
config.testsOrTags.emplace_back( "," );
}
}
//Remove comma in the end
if(!config.testsOrTags.empty())
config.testsOrTags.erase( config.testsOrTags.end()-1 );
return ParserResult::ok( ParseResultType::Matched );
};
auto const setTestOrder = [&]( std::string const& order ) {
if( startsWith( "declared", order ) )
config.runOrder = TestRunOrder::Declared;
else if( startsWith( "lexical", order ) )
config.runOrder = TestRunOrder::LexicographicallySorted;
else if( startsWith( "random", order ) )
config.runOrder = TestRunOrder::Randomized;
else
return ParserResult::runtimeError( "Unrecognised ordering: '" + order + '\'' );
return ParserResult::ok( ParseResultType::Matched );
};
auto const setRngSeed = [&]( std::string const& seed ) {
if( seed == "time" ) {
config.rngSeed = generateRandomSeed(GenerateFrom::Time);
return ParserResult::ok(ParseResultType::Matched);
} else if (seed == "random-device") {
config.rngSeed = generateRandomSeed(GenerateFrom::RandomDevice);
return ParserResult::ok(ParseResultType::Matched);
}
// TODO: ideally we should be parsing uint32_t directly
// fix this later when we add new parse overload
auto parsedSeed = parseUInt( seed, 0 );
if ( !parsedSeed ) {
return ParserResult::runtimeError( "Could not parse '" + seed + "' as seed" );
}
config.rngSeed = *parsedSeed;
return ParserResult::ok( ParseResultType::Matched );
};
auto const setDefaultColourMode = [&]( std::string const& colourMode ) {
Optional<ColourMode> maybeMode = Catch::Detail::stringToColourMode(toLower( colourMode ));
if ( !maybeMode ) {
return ParserResult::runtimeError(
"colour mode must be one of: default, ansi, win32, "
"or none. '" +
colourMode + "' is not recognised" );
}
auto mode = *maybeMode;
if ( !isColourImplAvailable( mode ) ) {
return ParserResult::runtimeError(
"colour mode '" + colourMode +
"' is not supported in this binary" );
}
config.defaultColourMode = mode;
return ParserResult::ok( ParseResultType::Matched );
};
auto const setWaitForKeypress = [&]( std::string const& keypress ) {
auto keypressLc = toLower( keypress );
if (keypressLc == "never")
config.waitForKeypress = WaitForKeypress::Never;
else if( keypressLc == "start" )
config.waitForKeypress = WaitForKeypress::BeforeStart;
else if( keypressLc == "exit" )
config.waitForKeypress = WaitForKeypress::BeforeExit;
else if( keypressLc == "both" )
config.waitForKeypress = WaitForKeypress::BeforeStartAndExit;
else
return ParserResult::runtimeError( "keypress argument must be one of: never, start, exit or both. '" + keypress + "' not recognised" );
return ParserResult::ok( ParseResultType::Matched );
};
auto const setVerbosity = [&]( std::string const& verbosity ) {
auto lcVerbosity = toLower( verbosity );
if( lcVerbosity == "quiet" )
config.verbosity = Verbosity::Quiet;
else if( lcVerbosity == "normal" )
config.verbosity = Verbosity::Normal;
else if( lcVerbosity == "high" )
config.verbosity = Verbosity::High;
else
return ParserResult::runtimeError( "Unrecognised verbosity, '" + verbosity + '\'' );
return ParserResult::ok( ParseResultType::Matched );
};
auto const setReporter = [&]( std::string const& userReporterSpec ) {
if ( userReporterSpec.empty() ) {
return ParserResult::runtimeError( "Received empty reporter spec." );
}
Optional<ReporterSpec> parsed =
parseReporterSpec( userReporterSpec );
if ( !parsed ) {
return ParserResult::runtimeError(
"Could not parse reporter spec '" + userReporterSpec +
"'" );
}
auto const& reporterSpec = *parsed;
auto const& factories =
getRegistryHub().getReporterRegistry().getFactories();
auto result = factories.find( reporterSpec.name() );
if ( result == factories.end() ) {
return ParserResult::runtimeError(
"Unrecognized reporter, '" + reporterSpec.name() +
"'. Check available with --list-reporters" );
}
const bool hadOutputFile = reporterSpec.outputFile().some();
config.reporterSpecifications.push_back( CATCH_MOVE( *parsed ) );
// It would be enough to check this only once at the very end, but
// there is not a place where we could call this check, so do it
// every time it could fail. For valid inputs, this is still called
// at most once.
if (!hadOutputFile) {
int n_reporters_without_file = 0;
for (auto const& spec : config.reporterSpecifications) {
if (spec.outputFile().none()) {
n_reporters_without_file++;
}
}
if (n_reporters_without_file > 1) {
return ParserResult::runtimeError( "Only one reporter may have unspecified output file." );
}
}
return ParserResult::ok( ParseResultType::Matched );
};
auto const setShardCount = [&]( std::string const& shardCount ) {
auto parsedCount = parseUInt( shardCount );
if ( !parsedCount ) {
return ParserResult::runtimeError(
"Could not parse '" + shardCount + "' as shard count" );
}
if ( *parsedCount == 0 ) {
return ParserResult::runtimeError(
"Shard count must be positive" );
}
config.shardCount = *parsedCount;
return ParserResult::ok( ParseResultType::Matched );
};
auto const setShardIndex = [&](std::string const& shardIndex) {
auto parsedIndex = parseUInt( shardIndex );
if ( !parsedIndex ) {
return ParserResult::runtimeError(
"Could not parse '" + shardIndex + "' as shard index" );
}
config.shardIndex = *parsedIndex;
return ParserResult::ok( ParseResultType::Matched );
};
auto cli
= ExeName( config.processName )
| Help( config.showHelp )
| Opt( config.showSuccessfulTests )
["-s"]["--success"]
( "include successful tests in output" )
| Opt( config.shouldDebugBreak )
["-b"]["--break"]
( "break into debugger on failure" )
| Opt( config.noThrow )
["-e"]["--nothrow"]
( "skip exception tests" )
| Opt( config.showInvisibles )
["-i"]["--invisibles"]
( "show invisibles (tabs, newlines)" )
| Opt( config.defaultOutputFilename, "filename" )
["-o"]["--out"]
( "default output filename" )
| Opt( accept_many, setReporter, "name[::key=value]*" )
["-r"]["--reporter"]
( "reporter to use (defaults to console)" )
| Opt( config.name, "name" )
["-n"]["--name"]
( "suite name" )
| Opt( [&]( bool ){ config.abortAfter = 1; } )
["-a"]["--abort"]
( "abort at first failure" )
| Opt( [&]( int x ){ config.abortAfter = x; }, "no. failures" )
["-x"]["--abortx"]
( "abort after x failures" )
| Opt( accept_many, setWarning, "warning name" )
["-w"]["--warn"]
( "enable warnings" )
| Opt( [&]( bool flag ) { config.showDurations = flag ? ShowDurations::Always : ShowDurations::Never; }, "yes|no" )
["-d"]["--durations"]
( "show test durations" )
| Opt( config.minDuration, "seconds" )
["-D"]["--min-duration"]
( "show test durations for tests taking at least the given number of seconds" )
| Opt( loadTestNamesFromFile, "filename" )
["-f"]["--input-file"]
( "load test names to run from a file" )
| Opt( config.filenamesAsTags )
["-#"]["--filenames-as-tags"]
( "adds a tag for the filename" )
| Opt( config.sectionsToRun, "section name" )
["-c"]["--section"]
( "specify section to run" )
| Opt( setVerbosity, "quiet|normal|high" )
["-v"]["--verbosity"]
( "set output verbosity" )
| Opt( config.listTests )
["--list-tests"]
( "list all/matching test cases" )
| Opt( config.listTags )
["--list-tags"]
( "list all/matching tags" )
| Opt( config.listReporters )
["--list-reporters"]
( "list all available reporters" )
| Opt( config.listListeners )
["--list-listeners"]
( "list all listeners" )
| Opt( setTestOrder, "decl|lex|rand" )
["--order"]
( "test case order (defaults to decl)" )
| Opt( setRngSeed, "'time'|'random-device'|number" )
["--rng-seed"]
( "set a specific seed for random numbers" )
| Opt( setDefaultColourMode, "ansi|win32|none|default" )
["--colour-mode"]
( "what color mode should be used as default" )
| Opt( config.libIdentify )
["--libidentify"]
( "report name and version according to libidentify standard" )
| Opt( setWaitForKeypress, "never|start|exit|both" )
["--wait-for-keypress"]
( "waits for a keypress before exiting" )
| Opt( config.skipBenchmarks)
["--skip-benchmarks"]
( "disable running benchmarks")
| Opt( config.benchmarkSamples, "samples" )
["--benchmark-samples"]
( "number of samples to collect (default: 100)" )
| Opt( config.benchmarkResamples, "resamples" )
["--benchmark-resamples"]
( "number of resamples for the bootstrap (default: 100000)" )
| Opt( config.benchmarkConfidenceInterval, "confidence interval" )
["--benchmark-confidence-interval"]
( "confidence interval for the bootstrap (between 0 and 1, default: 0.95)" )
| Opt( config.benchmarkNoAnalysis )
["--benchmark-no-analysis"]
( "perform only measurements; do not perform any analysis" )
| Opt( config.benchmarkWarmupTime, "benchmarkWarmupTime" )
["--benchmark-warmup-time"]
( "amount of time in milliseconds spent on warming up each test (default: 100)" )
| Opt( setShardCount, "shard count" )
["--shard-count"]
( "split the tests to execute into this many groups" )
| Opt( setShardIndex, "shard index" )
["--shard-index"]
( "index of the group of tests to execute (see --shard-count)" ) |
Opt( config.allowZeroTests )
["--allow-running-no-tests"]
( "Treat 'No tests run' as a success" )
| Arg( config.testsOrTags, "test name|pattern|tags" )
( "which test or tests to use" );
return cli;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2b58, %rsp # imm = 0x2B58
movq %rsi, %r14
movq %rdi, 0x60(%rsp)
addq $0xa8, %rsi
leaq 0x1360(%rsp), %rdi
callq 0x2bc36
leaq 0x7(%r14), %rsi
leaq 0x1dc0(%rsp), %rdi
callq 0x2d04a
leaq 0x1e38(%rsp), %rdi
leaq 0x1360(%rsp), %rsi
leaq 0x1dc0(%rsp), %rdx
callq 0x4b752
leaq 0x4(%r14), %rsi
leaq 0x1cd0(%rsp), %rdi
callq 0x2b110
leaq 0x50(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0xd9f3c(%rip), %rsi # 0xf8c19
leaq 0xd9f37(%rip), %rdx # 0xf8c1b
leaq 0x40(%rsp), %rdi
callq 0x53aee
leaq 0x1d30(%rsp), %rbx
leaq 0x40(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x3b8(%rsp), %r13
movq %r13, -0x10(%r13)
leaq 0xd9f06(%rip), %rsi # 0xf8c1c
leaq 0xd9f08(%rip), %rdx # 0xf8c25
leaq 0x3a8(%rsp), %rdi
callq 0x53aee
leaq 0x3a8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x398(%rsp), %rbp
movq %rbp, -0x10(%rbp)
leaq 0xd9ed9(%rip), %rsi # 0xf8c26
leaq 0xd9ef4(%rip), %rdx # 0xf8c48
leaq 0x388(%rsp), %rdi
callq 0x53aee
leaq 0x1d10(%rsp), %rdi
leaq 0x388(%rsp), %rsi
callq 0x12290
leaq 0x1e98(%rsp), %rdi
leaq 0x1e38(%rsp), %rsi
leaq 0x1cd0(%rsp), %rdx
callq 0x4b8b8
leaq 0x5(%r14), %rsi
leaq 0x1c58(%rsp), %rdi
callq 0x2b110
leaq 0x30(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0xd9e94(%rip), %rsi # 0xf8c49
leaq 0xd9e8f(%rip), %rdx # 0xf8c4b
leaq 0x20(%rsp), %rdi
callq 0x53aee
leaq 0x1cb8(%rsp), %rbx
leaq 0x20(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x10(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9e61(%rip), %rsi # 0xf8c4c
leaq 0xd9e61(%rip), %rdx # 0xf8c53
movq %rsp, %rdi
callq 0x53aee
movq %rsp, %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xcd8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9e3c(%rip), %rsi # 0xf8c54
leaq 0xd9e53(%rip), %rdx # 0xf8c72
leaq 0xcc8(%rsp), %rdi
callq 0x53aee
leaq 0x1c98(%rsp), %rdi
leaq 0xcc8(%rsp), %rsi
callq 0x12290
leaq 0x1ef8(%rsp), %rdi
leaq 0x1e98(%rsp), %rsi
leaq 0x1c58(%rsp), %rdx
callq 0x4b8b8
leaq 0x6(%r14), %rsi
leaq 0x1be0(%rsp), %rdi
callq 0x2b110
leaq 0x378(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9df1(%rip), %rsi # 0xf8c73
leaq 0xd9dec(%rip), %rdx # 0xf8c75
leaq 0x368(%rsp), %rdi
callq 0x53aee
leaq 0x1c40(%rsp), %rbx
leaq 0x368(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x358(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9db5(%rip), %rsi # 0xf8c76
leaq 0xd9db7(%rip), %rdx # 0xf8c7f
leaq 0x348(%rsp), %rdi
callq 0x53aee
leaq 0x348(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xcb8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9d88(%rip), %rsi # 0xf8c80
leaq 0xd9d95(%rip), %rdx # 0xf8c94
leaq 0xca8(%rsp), %rdi
callq 0x53aee
leaq 0x1c20(%rsp), %rdi
leaq 0xca8(%rsp), %rsi
callq 0x12290
leaq 0x1f58(%rsp), %rdi
leaq 0x1ef8(%rsp), %rsi
leaq 0x1be0(%rsp), %rdx
callq 0x4b8b8
leaq 0x8(%r14), %rsi
leaq 0x1b68(%rsp), %rdi
callq 0x2b110
leaq 0x338(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9d33(%rip), %rsi # 0xf8c95
leaq 0xd9d2e(%rip), %rdx # 0xf8c97
leaq 0x328(%rsp), %rdi
callq 0x53aee
leaq 0x1bc8(%rsp), %rbx
leaq 0x328(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x318(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9cf7(%rip), %rsi # 0xf8c98
leaq 0xd9cfc(%rip), %rdx # 0xf8ca4
leaq 0x308(%rsp), %rdi
callq 0x53aee
leaq 0x308(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xc98(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9ccd(%rip), %rsi # 0xf8ca5
leaq 0xd9ce6(%rip), %rdx # 0xf8cc5
leaq 0xc88(%rsp), %rdi
callq 0x53aee
leaq 0x1ba8(%rsp), %rdi
leaq 0xc88(%rsp), %rsi
callq 0x12290
leaq 0x1fb8(%rsp), %rdi
leaq 0x1f58(%rsp), %rsi
leaq 0x1b68(%rsp), %rdx
callq 0x4b8b8
leaq 0xc78(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9e6e(%rip), %rsi # 0xf8e9f
leaq 0xd9e6f(%rip), %rdx # 0xf8ea7
leaq 0xc68(%rsp), %rdi
callq 0x53aee
leaq 0x68(%r14), %rsi
leaq 0x16d0(%rsp), %rdi
leaq 0xc68(%rsp), %rdx
callq 0x581d6
leaq 0x124ef3(%rip), %rax # 0x143f58
movq %rax, 0x16d0(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x1730(%rsp)
movq $0x0, 0x1740(%rsp)
leaq 0x2f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9c2f(%rip), %rsi # 0xf8cc6
leaq 0xd9c2a(%rip), %rdx # 0xf8cc8
leaq 0x2e8(%rsp), %rdi
callq 0x53aee
leaq 0x1730(%rsp), %rbx
leaq 0x2e8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xc58(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9bf3(%rip), %rsi # 0xf8cc9
leaq 0xd9bf1(%rip), %rdx # 0xf8cce
leaq 0xc48(%rsp), %rdi
callq 0x53aee
leaq 0xc48(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xc38(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9bc2(%rip), %rsi # 0xf8ccf
leaq 0xd9bd2(%rip), %rdx # 0xf8ce6
leaq 0xc28(%rsp), %rdi
callq 0x53aee
leaq 0x1710(%rsp), %rdi
leaq 0xc28(%rsp), %rsi
callq 0x12290
leaq 0x2018(%rsp), %rdi
leaq 0x1fb8(%rsp), %rsi
leaq 0x16d0(%rsp), %rdx
callq 0x4b8b8
leaq 0x2d8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9b81(%rip), %rsi # 0xf8ce7
leaq 0xd9b8c(%rip), %rdx # 0xf8cf9
leaq 0x2c8(%rsp), %rdi
callq 0x53aee
leaq 0x127dc7(%rip), %rax # 0x146f48
movq %rax, 0x11f8(%rsp)
movl $0x0, 0x1200(%rsp)
movl $0x20, %edi
callq 0x12540
movabsq $0x100000001, %r12 # imm = 0x100000001
movq %r12, 0x8(%rax)
leaq 0x126fbd(%rip), %rcx # 0x146170
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
movq %r14, 0x18(%rax)
leaq 0x126ff8(%rip), %rdx # 0x1461c0
movq %rdx, 0x10(%rax)
leaq 0x1218(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0x1228(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x2c8(%rsp), %rsi
movq 0x2d0(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0x1248(%rsp), %rdx
movq %rdx, -0x10(%rdx)
xorl %eax, %eax
movq %rax, -0x8(%rdx)
movb $0x0, (%rdx)
leaq 0x124d3c(%rip), %rcx # 0x143f58
movq %rcx, -0x50(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, 0x20(%rdx)
leaq 0x2b8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9abc(%rip), %rsi # 0xf8cfa
leaq 0xd9ab7(%rip), %rdx # 0xf8cfc
leaq 0x2a8(%rsp), %rdi
callq 0x53aee
leaq 0x1258(%rsp), %rbx
leaq 0x2a8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xc18(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9a80(%rip), %rsi # 0xf8cfd
leaq 0xd9a83(%rip), %rdx # 0xf8d07
leaq 0xc08(%rsp), %rdi
callq 0x53aee
leaq 0xc08(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xbf8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9a54(%rip), %rsi # 0xf8d08
leaq 0xd9a72(%rip), %rdx # 0xf8d2d
leaq 0xbe8(%rsp), %rdi
callq 0x53aee
leaq 0x1238(%rsp), %rdi
leaq 0xbe8(%rsp), %rsi
callq 0x12290
leaq 0x2078(%rsp), %rdi
leaq 0x2018(%rsp), %rsi
leaq 0x11f8(%rsp), %rdx
callq 0x4b8b8
leaq 0xbd8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9a31(%rip), %rsi # 0xf8d3e
leaq 0xd9a2e(%rip), %rdx # 0xf8d42
leaq 0xbc8(%rsp), %rdi
callq 0x53aee
leaq 0x88(%r14), %rsi
leaq 0x1658(%rsp), %rdi
leaq 0xbc8(%rsp), %rdx
callq 0x581d6
leaq 0x124c14(%rip), %rax # 0x143f58
movq %rax, 0x1658(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x16b8(%rsp)
movq $0x0, 0x16c8(%rsp)
leaq 0xbb8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd99b8(%rip), %rsi # 0xf8d2e
leaq 0xd99b3(%rip), %rdx # 0xf8d30
leaq 0xba8(%rsp), %rdi
callq 0x53aee
leaq 0x16b8(%rsp), %rbx
leaq 0xba8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x298(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd997c(%rip), %rsi # 0xf8d31
leaq 0xd997b(%rip), %rdx # 0xf8d37
leaq 0x288(%rsp), %rdi
callq 0x53aee
leaq 0x288(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xb98(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd994c(%rip), %rsi # 0xf8d38
leaq 0xd994f(%rip), %rdx # 0xf8d42
leaq 0xb88(%rsp), %rdi
callq 0x53aee
leaq 0x1698(%rsp), %rdi
leaq 0xb88(%rsp), %rsi
callq 0x12290
leaq 0x20d8(%rsp), %rdi
leaq 0x2078(%rsp), %rsi
leaq 0x1658(%rsp), %rdx
callq 0x4b8b8
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x126e19(%rip), %rcx # 0x146260
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x126e58(%rip), %rdx # 0x1462b0
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0x127ae1(%rip), %rdx # 0x146f48
movq %rdx, 0x1180(%rsp)
movl $0x0, 0x1188(%rsp)
movq %rcx, 0x1190(%rsp)
movq %rax, 0x1198(%rsp)
movq 0x128aff(%rip), %rcx # 0x147f90
cmpb $0x0, (%rcx)
je 0x1f49f
movl $0x2, 0x8(%rax)
jmp 0x1f4a3
lock
incl 0x8(%rax)
leaq 0x11b0(%rsp), %rcx
movq %rcx, -0x10(%rcx)
xorl %ebx, %ebx
movq %rbx, -0x8(%rcx)
movb %bl, (%rcx)
leaq 0x11d0(%rsp), %r15
movq %r15, -0x10(%r15)
movq %rbx, -0x8(%r15)
movb %bl, (%r15)
movq %rax, %rdi
callq 0x4fb08
leaq 0x124a7f(%rip), %rax # 0x143f58
movq %rax, -0x50(%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r15)
movq %rbx, 0x20(%r15)
leaq 0xb78(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9847(%rip), %rsi # 0xf8d43
leaq 0xd9842(%rip), %rdx # 0xf8d45
leaq 0xb68(%rsp), %rdi
callq 0x53aee
leaq 0x11e0(%rsp), %rbx
leaq 0xb68(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xb58(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd980b(%rip), %rsi # 0xf8d46
leaq 0xd980b(%rip), %rdx # 0xf8d4d
leaq 0xb48(%rsp), %rdi
callq 0x53aee
leaq 0xb48(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xb38(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd97dc(%rip), %rsi # 0xf8d4e
leaq 0xd97eb(%rip), %rdx # 0xf8d64
leaq 0xb28(%rsp), %rdi
callq 0x53aee
leaq 0x11c0(%rsp), %rdi
leaq 0xb28(%rsp), %rsi
callq 0x12290
leaq 0x2138(%rsp), %rdi
leaq 0x20d8(%rsp), %rsi
leaq 0x1180(%rsp), %rdx
callq 0x4b8b8
leaq 0x278(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd979a(%rip), %rsi # 0xf8d65
leaq 0xd979f(%rip), %rdx # 0xf8d71
leaq 0x268(%rsp), %rdi
callq 0x53aee
leaq 0x127962(%rip), %rax # 0x146f48
movq %rax, 0x1108(%rsp)
movl $0x0, 0x1110(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x126cf2(%rip), %rcx # 0x146300
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x126d31(%rip), %rdx # 0x146350
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0x1128(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0x1138(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x268(%rsp), %rsi
movq 0x270(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0x1158(%rsp), %rdx
movq %rdx, -0x10(%rdx)
xorl %eax, %eax
movq %rax, -0x8(%rdx)
movb $0x0, (%rdx)
leaq 0x1248e1(%rip), %rcx # 0x143f58
movq %rcx, -0x50(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, 0x20(%rdx)
leaq 0xb18(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd96d9(%rip), %rsi # 0xf8d72
leaq 0xd96d4(%rip), %rdx # 0xf8d74
leaq 0xb08(%rsp), %rdi
callq 0x53aee
leaq 0x1168(%rsp), %rbx
leaq 0xb08(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x258(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd969d(%rip), %rsi # 0xf8d75
leaq 0xd969e(%rip), %rdx # 0xf8d7d
leaq 0x248(%rsp), %rdi
callq 0x53aee
leaq 0x248(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xaf8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd966f(%rip), %rsi # 0xf8d7e
leaq 0xd967e(%rip), %rdx # 0xf8d94
leaq 0xae8(%rsp), %rdi
callq 0x53aee
leaq 0x1148(%rsp), %rdi
leaq 0xae8(%rsp), %rsi
callq 0x12290
leaq 0x2198(%rsp), %rdi
leaq 0x2138(%rsp), %rsi
leaq 0x1108(%rsp), %rdx
callq 0x4b8b8
leaq 0x238(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd962d(%rip), %rsi # 0xf8d95
leaq 0xd9632(%rip), %rdx # 0xf8da1
leaq 0x228(%rsp), %rdi
callq 0x53aee
leaq 0x1277c5(%rip), %rax # 0x146f48
movq %rax, 0x1090(%rsp)
movl $0x0, 0x1098(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x126bf5(%rip), %rcx # 0x1463a0
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
movq %r14, 0x18(%rax)
leaq 0x126c30(%rip), %rdx # 0x1463f0
movq %rdx, 0x10(%rax)
leaq 0x10b0(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0x10c0(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x228(%rsp), %rsi
movq 0x230(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0x10e0(%rsp), %rdx
movq %rdx, -0x10(%rdx)
xorl %eax, %eax
movq %rax, -0x8(%rdx)
movb $0x0, (%rdx)
leaq 0x124744(%rip), %rcx # 0x143f58
movq %rcx, -0x50(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, 0x20(%rdx)
leaq 0xad8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd956c(%rip), %rsi # 0xf8da2
leaq 0xd9567(%rip), %rdx # 0xf8da4
leaq 0xac8(%rsp), %rdi
callq 0x53aee
leaq 0x10f0(%rsp), %rbx
leaq 0xac8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xab8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9530(%rip), %rsi # 0xf8da5
leaq 0xd952f(%rip), %rdx # 0xf8dab
leaq 0xaa8(%rsp), %rdi
callq 0x53aee
leaq 0xaa8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xa98(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9500(%rip), %rsi # 0xf8dac
leaq 0xd9508(%rip), %rdx # 0xf8dbb
leaq 0xa88(%rsp), %rdi
callq 0x53aee
leaq 0x10d0(%rsp), %rdi
leaq 0xa88(%rsp), %rsi
callq 0x12290
leaq 0x21f8(%rsp), %rdi
leaq 0x2198(%rsp), %rsi
leaq 0x1090(%rsp), %rdx
callq 0x4b8b8
leaq 0x218(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd94b7(%rip), %rsi # 0xf8dbc
leaq 0xd94b6(%rip), %rdx # 0xf8dc2
leaq 0x208(%rsp), %rdi
callq 0x53aee
leaq 0x127628(%rip), %rax # 0x146f48
movq %rax, 0x1018(%rsp)
movl $0x0, 0x1020(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x126b48(%rip), %rcx # 0x146490
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x126b87(%rip), %rdx # 0x1464e0
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0x1038(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0x1048(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x208(%rsp), %rsi
movq 0x210(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0x1068(%rsp), %rdx
movq %rdx, -0x10(%rdx)
xorl %eax, %eax
movq %rax, -0x8(%rdx)
movb $0x0, (%rdx)
leaq 0x1245a7(%rip), %rcx # 0x143f58
movq %rcx, -0x50(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, 0x20(%rdx)
leaq 0x1f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd93f0(%rip), %rsi # 0xf8dc3
leaq 0xd93eb(%rip), %rdx # 0xf8dc5
leaq 0x1e8(%rsp), %rdi
callq 0x53aee
leaq 0x1078(%rsp), %rbx
leaq 0x1e8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x1d8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd93b4(%rip), %rsi # 0xf8dc6
leaq 0xd93b8(%rip), %rdx # 0xf8dd1
leaq 0x1c8(%rsp), %rdi
callq 0x53aee
leaq 0x1c8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xa78(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9389(%rip), %rsi # 0xf8dd2
leaq 0xd9395(%rip), %rdx # 0xf8de5
leaq 0xa68(%rsp), %rdi
callq 0x53aee
leaq 0x1058(%rsp), %rdi
leaq 0xa68(%rsp), %rsi
callq 0x12290
leaq 0x2258(%rsp), %rdi
leaq 0x21f8(%rsp), %rsi
leaq 0x1018(%rsp), %rdx
callq 0x4b8b8
leaq 0xa58(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9398(%rip), %rsi # 0xf8e3a
leaq 0xd9398(%rip), %rdx # 0xf8e41
leaq 0xa48(%rsp), %rdi
callq 0x53aee
leaq 0x50(%r14), %rsi
leaq 0x15e0(%rsp), %rdi
leaq 0xa48(%rsp), %rdx
callq 0x58476
leaq 0x124482(%rip), %rax # 0x143f58
movq %rax, 0x15e0(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x1640(%rsp)
movq $0x0, 0x1650(%rsp)
leaq 0xa38(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd92de(%rip), %rsi # 0xf8de6
leaq 0xd92d9(%rip), %rdx # 0xf8de8
leaq 0xa28(%rsp), %rdi
callq 0x53aee
leaq 0x1640(%rsp), %rbx
leaq 0xa28(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0xa18(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd92a2(%rip), %rsi # 0xf8de9
leaq 0xd92a9(%rip), %rdx # 0xf8df7
leaq 0xa08(%rsp), %rdi
callq 0x53aee
leaq 0xa08(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x9f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd927a(%rip), %rsi # 0xf8df8
leaq 0xd92bc(%rip), %rdx # 0xf8e41
leaq 0x9e8(%rsp), %rdi
callq 0x53aee
leaq 0x1620(%rsp), %rdi
leaq 0x9e8(%rsp), %rsi
callq 0x12290
leaq 0x22b8(%rsp), %rdi
leaq 0x2258(%rsp), %rsi
leaq 0x15e0(%rsp), %rdx
callq 0x4b8b8
leaq 0x1b8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd92c8(%rip), %rsi # 0xf8e9f
leaq 0xd92c9(%rip), %rdx # 0xf8ea7
leaq 0x1a8(%rsp), %rdi
callq 0x53aee
leaq 0x127356(%rip), %rax # 0x146f48
movq %rax, 0xfa0(%rsp)
movl $0x0, 0xfa8(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x126916(%rip), %rcx # 0x146530
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x126955(%rip), %rdx # 0x146580
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0xfc0(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0xfd0(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x1a8(%rsp), %rsi
movq 0x1b0(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0xff0(%rsp), %rdx
movq %rdx, -0x10(%rdx)
xorl %eax, %eax
movq %rax, -0x8(%rdx)
movb $0x0, (%rdx)
leaq 0x1242d5(%rip), %rcx # 0x143f58
movq %rcx, -0x50(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, 0x20(%rdx)
leaq 0x198(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd919d(%rip), %rsi # 0xf8e42
leaq 0xd9198(%rip), %rdx # 0xf8e44
leaq 0x188(%rsp), %rdi
callq 0x53aee
leaq 0x1000(%rsp), %rbx
leaq 0x188(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x9d8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9161(%rip), %rsi # 0xf8e45
leaq 0xd9166(%rip), %rdx # 0xf8e51
leaq 0x9c8(%rsp), %rdi
callq 0x53aee
leaq 0x9c8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x9b8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9137(%rip), %rsi # 0xf8e52
leaq 0xd9152(%rip), %rdx # 0xf8e74
leaq 0x9a8(%rsp), %rdi
callq 0x53aee
leaq 0xfe0(%rsp), %rdi
leaq 0x9a8(%rsp), %rsi
callq 0x12290
leaq 0x2318(%rsp), %rdi
leaq 0x22b8(%rsp), %rsi
leaq 0xfa0(%rsp), %rdx
callq 0x4b8b8
leaq 0x9(%r14), %rsi
leaq 0x1af0(%rsp), %rdi
callq 0x2b110
leaq 0x178(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd90f0(%rip), %rsi # 0xf8e75
leaq 0xd90eb(%rip), %rdx # 0xf8e77
leaq 0x168(%rsp), %rdi
callq 0x53aee
leaq 0x1b50(%rsp), %rbx
leaq 0x168(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x998(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd90b4(%rip), %rsi # 0xf8e78
leaq 0xd90c0(%rip), %rdx # 0xf8e8b
leaq 0x988(%rsp), %rdi
callq 0x53aee
leaq 0x988(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x978(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9091(%rip), %rsi # 0xf8e8c
leaq 0xd90a5(%rip), %rdx # 0xf8ea7
leaq 0x968(%rsp), %rdi
callq 0x53aee
leaq 0x1b30(%rsp), %rdi
leaq 0x968(%rsp), %rsi
callq 0x12290
leaq 0x2378(%rsp), %rdi
leaq 0x2318(%rsp), %rsi
leaq 0x1af0(%rsp), %rdx
callq 0x4b8b8
leaq 0x958(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd9054(%rip), %rsi # 0xf8ea8
leaq 0xd9059(%rip), %rdx # 0xf8eb4
leaq 0x948(%rsp), %rdi
callq 0x53aee
leaq 0xf8(%r14), %rsi
leaq 0x1568(%rsp), %rdi
leaq 0x948(%rsp), %rdx
callq 0x58a8c
leaq 0x1240cd(%rip), %rax # 0x143f58
movq %rax, 0x1568(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x15c8(%rsp)
movq $0x0, 0x15d8(%rsp)
leaq 0x938(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8ff8(%rip), %rsi # 0xf8eb5
leaq 0xd8ff3(%rip), %rdx # 0xf8eb7
leaq 0x928(%rsp), %rdi
callq 0x53aee
leaq 0x15c8(%rsp), %rbx
leaq 0x928(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x918(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8fbc(%rip), %rsi # 0xf8eb8
leaq 0xd8fbe(%rip), %rdx # 0xf8ec1
leaq 0x908(%rsp), %rdi
callq 0x53aee
leaq 0x908(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x8f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8f8f(%rip), %rsi # 0xf8ec2
leaq 0xd8f9e(%rip), %rdx # 0xf8ed8
leaq 0x8e8(%rsp), %rdi
callq 0x53aee
leaq 0x15a8(%rsp), %rdi
leaq 0x8e8(%rsp), %rsi
callq 0x12290
leaq 0x23d8(%rsp), %rdi
leaq 0x2378(%rsp), %rsi
leaq 0x1568(%rsp), %rdx
callq 0x4b8b8
leaq 0x158(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8f4d(%rip), %rsi # 0xf8ed9
leaq 0xd8f57(%rip), %rdx # 0xf8eea
leaq 0x148(%rsp), %rdi
callq 0x53aee
leaq 0x126fa1(%rip), %rax # 0x146f48
movq %rax, 0xf28(%rsp)
movl $0x0, 0xf30(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x126601(%rip), %rcx # 0x1465d0
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x126640(%rip), %rdx # 0x146620
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0xf48(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0xf58(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x148(%rsp), %rsi
movq 0x150(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0xf78(%rsp), %rdx
movq %rdx, -0x10(%rdx)
xorl %eax, %eax
movq %rax, -0x8(%rdx)
movb $0x0, (%rdx)
leaq 0x123f20(%rip), %rcx # 0x143f58
movq %rcx, -0x50(%rdx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rdx)
movq %rax, 0x20(%rdx)
leaq 0x8d8(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0xd8e91(%rip), %rsi # 0xf8eeb
leaq 0xd8e8c(%rip), %rdx # 0xf8eed
leaq 0x8c8(%rsp), %rdi
callq 0x53aee
leaq 0xf88(%rsp), %rbx
leaq 0x8c8(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x138(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8e55(%rip), %rsi # 0xf8eee
leaq 0xd8e59(%rip), %rdx # 0xf8ef9
leaq 0x128(%rsp), %rdi
callq 0x53aee
leaq 0x128(%rsp), %rsi
movq %rbx, %rdi
callq 0x4cae4
leaq 0x8b8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8e2a(%rip), %rsi # 0xf8efa
leaq 0xd8e37(%rip), %rdx # 0xf8f0e
leaq 0x8a8(%rsp), %rdi
callq 0x53aee
leaq 0xf68(%rsp), %rdi
leaq 0x8a8(%rsp), %rsi
callq 0x12290
leaq 0x2438(%rsp), %rdi
leaq 0x23d8(%rsp), %rsi
leaq 0xf28(%rsp), %rdx
callq 0x4b8b8
leaq 0x1a78(%rsp), %rdi
movq %r14, %rsi
callq 0x2b110
leaq 0x898(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8dd6(%rip), %rsi # 0xf8f0f
leaq 0xd8ddb(%rip), %rdx # 0xf8f1b
leaq 0x888(%rsp), %rdi
callq 0x53aee
leaq 0x1ad8(%rsp), %rdi
leaq 0x888(%rsp), %rsi
callq 0x4cae4
leaq 0x878(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8da7(%rip), %rsi # 0xf8f1c
leaq 0xd8dbc(%rip), %rdx # 0xf8f38
leaq 0x868(%rsp), %rdi
callq 0x53aee
leaq 0x123dc8(%rip), %r13 # 0x143f58
leaq 0x126db1(%rip), %r15 # 0x146f48
leaq 0x1ab8(%rsp), %rdi
leaq 0x868(%rsp), %rsi
callq 0x12290
leaq 0x2498(%rsp), %rdi
leaq 0x2438(%rsp), %rsi
leaq 0x1a78(%rsp), %rdx
callq 0x4b8b8
leaq 0x1(%r14), %rsi
leaq 0x1a00(%rsp), %rdi
callq 0x2b110
leaq 0x858(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8d4c(%rip), %rsi # 0xf8f39
leaq 0xd8d50(%rip), %rdx # 0xf8f44
leaq 0x848(%rsp), %rdi
callq 0x53aee
leaq 0x1a60(%rsp), %rdi
leaq 0x848(%rsp), %rsi
callq 0x4cae4
leaq 0x838(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8d1c(%rip), %rsi # 0xf8f45
leaq 0xd8d2b(%rip), %rdx # 0xf8f5b
leaq 0x828(%rsp), %rdi
callq 0x53aee
leaq 0x1a40(%rsp), %rdi
leaq 0x828(%rsp), %rsi
callq 0x12290
leaq 0x24f8(%rsp), %rdi
leaq 0x2498(%rsp), %rsi
leaq 0x1a00(%rsp), %rdx
callq 0x4b8b8
leaq 0x2(%r14), %rsi
leaq 0x1988(%rsp), %rdi
callq 0x2b110
leaq 0x818(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xdc1e2(%rip), %rsi # 0xfc475
leaq 0xdc1eb(%rip), %rdx # 0xfc485
leaq 0x808(%rsp), %rdi
callq 0x53aee
leaq 0x19e8(%rsp), %rdi
leaq 0x808(%rsp), %rsi
callq 0x4cae4
leaq 0x7f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8c8d(%rip), %rsi # 0xf8f5c
leaq 0xd8ca2(%rip), %rdx # 0xf8f78
leaq 0x7e8(%rsp), %rdi
callq 0x53aee
leaq 0x19c8(%rsp), %rdi
leaq 0x7e8(%rsp), %rsi
callq 0x12290
leaq 0x2558(%rsp), %rdi
leaq 0x24f8(%rsp), %rsi
leaq 0x1988(%rsp), %rdx
callq 0x4b8b8
leaq 0x3(%r14), %rsi
leaq 0x1910(%rsp), %rdi
callq 0x2b110
leaq 0x7d8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8c40(%rip), %rsi # 0xf8f79
leaq 0xd8c49(%rip), %rdx # 0xf8f89
leaq 0x7c8(%rsp), %rdi
callq 0x53aee
leaq 0x1970(%rsp), %rdi
leaq 0x7c8(%rsp), %rsi
callq 0x4cae4
leaq 0x7b8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8c15(%rip), %rsi # 0xf8f8a
leaq 0xd8c20(%rip), %rdx # 0xf8f9c
leaq 0x7a8(%rsp), %rdi
callq 0x53aee
leaq 0x1950(%rsp), %rdi
leaq 0x7a8(%rsp), %rsi
callq 0x12290
leaq 0x25b8(%rsp), %rdi
leaq 0x2558(%rsp), %rsi
leaq 0x1910(%rsp), %rdx
callq 0x4b8b8
leaq 0x118(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8bcf(%rip), %rsi # 0xf8f9d
leaq 0xd8bd5(%rip), %rdx # 0xf8faa
leaq 0x108(%rsp), %rdi
callq 0x53aee
movq %r15, 0xeb0(%rsp)
movl $0x0, 0xeb8(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x126266(%rip), %rcx # 0x146670
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x1262a5(%rip), %rdx # 0x1466c0
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0xed0(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0xee0(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x108(%rsp), %rsi
movq 0x110(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0xf00(%rsp), %rcx
movq %rcx, -0x10(%rcx)
xorl %eax, %eax
movq %rax, -0x8(%rcx)
movb $0x0, (%rcx)
movq %r13, -0x50(%rcx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rcx)
movq %rax, 0x20(%rcx)
leaq 0x798(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8b1d(%rip), %rsi # 0xf8fab
leaq 0xd8b1d(%rip), %rdx # 0xf8fb2
leaq 0x788(%rsp), %rdi
callq 0x53aee
leaq 0xf10(%rsp), %rdi
leaq 0x788(%rsp), %rsi
callq 0x4cae4
leaq 0x778(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8ae9(%rip), %rsi # 0xf8fb3
leaq 0xd8b04(%rip), %rdx # 0xf8fd5
leaq 0x768(%rsp), %rdi
callq 0x53aee
leaq 0xef0(%rsp), %rdi
leaq 0x768(%rsp), %rsi
callq 0x12290
leaq 0x2618(%rsp), %rdi
leaq 0x25b8(%rsp), %rsi
leaq 0xeb0(%rsp), %rdx
callq 0x4b8b8
leaq 0xf8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8ab3(%rip), %rsi # 0xf8fd6
leaq 0xd8ac9(%rip), %rdx # 0xf8ff3
leaq 0xe8(%rsp), %rdi
callq 0x53aee
movq %r15, 0xe38(%rsp)
movl $0x0, 0xe40(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x1261b1(%rip), %rcx # 0x146710
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x1261f0(%rip), %rdx # 0x146760
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0xe58(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0xe68(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0xe8(%rsp), %rsi
movq 0xf0(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0xe88(%rsp), %rcx
movq %rcx, -0x10(%rcx)
xorl %eax, %eax
movq %rax, -0x8(%rcx)
movb $0x0, (%rcx)
movq %r13, -0x50(%rcx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rcx)
movq %rax, 0x20(%rcx)
leaq 0x758(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8a11(%rip), %rsi # 0xf8ff4
leaq 0xd8a14(%rip), %rdx # 0xf8ffe
leaq 0x748(%rsp), %rdi
callq 0x53aee
leaq 0xe98(%rsp), %rdi
leaq 0x748(%rsp), %rsi
callq 0x4cae4
leaq 0x738(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd89e0(%rip), %rsi # 0xf8fff
leaq 0xd89ff(%rip), %rdx # 0xf9025
leaq 0x728(%rsp), %rdi
callq 0x53aee
leaq 0xe78(%rsp), %rdi
leaq 0x728(%rsp), %rsi
callq 0x12290
leaq 0x2678(%rsp), %rdi
leaq 0x2618(%rsp), %rsi
leaq 0xe38(%rsp), %rdx
callq 0x4b8b8
leaq 0xd8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd89ae(%rip), %rsi # 0xf9026
leaq 0xd89be(%rip), %rdx # 0xf903d
leaq 0xc8(%rsp), %rdi
callq 0x53aee
movq %r15, 0xdc0(%rsp)
movl $0x0, 0xdc8(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x1260fc(%rip), %rcx # 0x1467b0
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x12613b(%rip), %rdx # 0x146800
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0xde0(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0xdf0(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0xc8(%rsp), %rsi
movq 0xd0(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0xe10(%rsp), %rcx
movq %rcx, -0x10(%rcx)
xorl %eax, %eax
movq %rax, -0x8(%rcx)
movb $0x0, (%rcx)
movq %r13, -0x50(%rcx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rcx)
movq %rax, 0x20(%rcx)
leaq 0x718(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8906(%rip), %rsi # 0xf903e
leaq 0xd890c(%rip), %rdx # 0xf904b
leaq 0x708(%rsp), %rdi
callq 0x53aee
leaq 0xe20(%rsp), %rdi
leaq 0x708(%rsp), %rsi
callq 0x4cae4
leaq 0x6f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd88d8(%rip), %rsi # 0xf904c
leaq 0xd88fa(%rip), %rdx # 0xf9075
leaq 0x6e8(%rsp), %rdi
callq 0x53aee
leaq 0xe00(%rsp), %rdi
leaq 0x6e8(%rsp), %rsi
callq 0x12290
leaq 0x26d8(%rsp), %rdi
leaq 0x2678(%rsp), %rsi
leaq 0xdc0(%rsp), %rdx
callq 0x4b8b8
leaq 0xa(%r14), %rsi
leaq 0x1898(%rsp), %rdi
callq 0x2b110
leaq 0x6d8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8898(%rip), %rsi # 0xf9076
leaq 0xd889e(%rip), %rdx # 0xf9083
leaq 0x6c8(%rsp), %rdi
callq 0x53aee
leaq 0x18f8(%rsp), %rdi
leaq 0x6c8(%rsp), %rsi
callq 0x4cae4
leaq 0x6b8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd886a(%rip), %rsi # 0xf9084
leaq 0xd889c(%rip), %rdx # 0xf90bd
leaq 0x6a8(%rsp), %rdi
callq 0x53aee
leaq 0x18d8(%rsp), %rdi
leaq 0x6a8(%rsp), %rsi
callq 0x12290
leaq 0x2738(%rsp), %rdi
leaq 0x26d8(%rsp), %rsi
leaq 0x1898(%rsp), %rdx
callq 0x4b8b8
leaq 0xb8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd884b(%rip), %rsi # 0xf90be
leaq 0xd8859(%rip), %rdx # 0xf90d3
leaq 0xa8(%rsp), %rdi
callq 0x53aee
movq %r15, 0xd48(%rsp)
movl $0x0, 0xd50(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x125fa1(%rip), %rcx # 0x146850
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x125fe0(%rip), %rdx # 0x1468a0
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0xd68(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0xd78(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0xa8(%rsp), %rsi
movq 0xb0(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0xd98(%rsp), %rcx
movq %rcx, -0x10(%rcx)
xorl %eax, %eax
movq %rax, -0x8(%rcx)
movb $0x0, (%rcx)
movq %r13, -0x50(%rcx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rcx)
movq %rax, 0x20(%rcx)
leaq 0x698(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd87a1(%rip), %rsi # 0xf90d4
leaq 0xd87ad(%rip), %rdx # 0xf90e7
leaq 0x688(%rsp), %rdi
callq 0x53aee
leaq 0xda8(%rsp), %rdi
leaq 0x688(%rsp), %rsi
callq 0x4cae4
leaq 0x678(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8779(%rip), %rsi # 0xf90e8
leaq 0xd8795(%rip), %rdx # 0xf910b
leaq 0x668(%rsp), %rdi
callq 0x53aee
leaq 0xd88(%rsp), %rdi
leaq 0x668(%rsp), %rsi
callq 0x12290
leaq 0x2798(%rsp), %rdi
leaq 0x2738(%rsp), %rsi
leaq 0xd48(%rsp), %rdx
callq 0x4b8b8
leaq 0x1c(%r14), %rsi
leaq 0x1820(%rsp), %rdi
callq 0x2b110
leaq 0x658(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8733(%rip), %rsi # 0xf910c
leaq 0xd873d(%rip), %rdx # 0xf911d
leaq 0x648(%rsp), %rdi
callq 0x53aee
leaq 0x1880(%rsp), %rdi
leaq 0x648(%rsp), %rsi
callq 0x4cae4
leaq 0x638(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8709(%rip), %rsi # 0xf911e
leaq 0xd871c(%rip), %rdx # 0xf9138
leaq 0x628(%rsp), %rdi
callq 0x53aee
leaq 0x1860(%rsp), %rdi
leaq 0x628(%rsp), %rsi
callq 0x12290
leaq 0x27f8(%rsp), %rdi
leaq 0x2798(%rsp), %rsi
leaq 0x1820(%rsp), %rdx
callq 0x4b8b8
leaq 0x618(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xdaed6(%rip), %rsi # 0xfb944
leaq 0xdaed6(%rip), %rdx # 0xfb94b
leaq 0x608(%rsp), %rdi
callq 0x53aee
leaq 0x20(%r14), %rsi
leaq 0x14f0(%rsp), %rdi
leaq 0x608(%rsp), %rdx
callq 0x58b36
movq %r13, 0x14f0(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x1550(%rsp)
movq $0x0, 0x1560(%rsp)
leaq 0x5f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd866c(%rip), %rsi # 0xf9139
leaq 0xd8678(%rip), %rdx # 0xf914c
leaq 0x5e8(%rsp), %rdi
callq 0x53aee
leaq 0x1550(%rsp), %rdi
leaq 0x5e8(%rsp), %rsi
callq 0x4cae4
leaq 0x5d8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8644(%rip), %rsi # 0xf914d
leaq 0xd8668(%rip), %rdx # 0xf9178
leaq 0x5c8(%rsp), %rdi
callq 0x53aee
leaq 0x1530(%rsp), %rdi
leaq 0x5c8(%rsp), %rsi
callq 0x12290
leaq 0x2858(%rsp), %rdi
leaq 0x27f8(%rsp), %rsi
leaq 0x14f0(%rsp), %rdx
callq 0x4b8b8
leaq 0x5b8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8623(%rip), %rsi # 0xf9185
leaq 0xd8625(%rip), %rdx # 0xf918e
leaq 0x5a8(%rsp), %rdi
callq 0x53aee
leaq 0x30(%r14), %rsi
leaq 0x1478(%rsp), %rdi
leaq 0x5a8(%rsp), %rdx
callq 0x58b36
movq %r13, 0x1478(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x14d8(%rsp)
movq $0x0, 0x14e8(%rsp)
leaq 0x598(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd85b8(%rip), %rsi # 0xf9179
leaq 0xd85c6(%rip), %rdx # 0xf918e
leaq 0x588(%rsp), %rdi
callq 0x53aee
leaq 0x14d8(%rsp), %rdi
leaq 0x588(%rsp), %rsi
callq 0x4cae4
leaq 0x578(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8592(%rip), %rsi # 0xf918f
leaq 0xd85c2(%rip), %rdx # 0xf91c6
leaq 0x568(%rsp), %rdi
callq 0x53aee
leaq 0x14b8(%rsp), %rdi
leaq 0x568(%rsp), %rsi
callq 0x12290
leaq 0x28b8(%rsp), %rdi
leaq 0x2858(%rsp), %rsi
leaq 0x1478(%rsp), %rdx
callq 0x4b8b8
leaq 0x558(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8571(%rip), %rsi # 0xf91c7
leaq 0xd857d(%rip), %rdx # 0xf91da
leaq 0x548(%rsp), %rdi
callq 0x53aee
leaq 0x28(%r14), %rsi
leaq 0x1400(%rsp), %rdi
leaq 0x548(%rsp), %rdx
callq 0x58476
movq %r13, 0x1400(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x1460(%rsp)
movq $0x0, 0x1470(%rsp)
leaq 0x538(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8526(%rip), %rsi # 0xf91db
leaq 0xd853e(%rip), %rdx # 0xf91fa
leaq 0x528(%rsp), %rdi
callq 0x53aee
leaq 0x1460(%rsp), %rdi
leaq 0x528(%rsp), %rsi
callq 0x4cae4
leaq 0x518(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd850a(%rip), %rsi # 0xf91fb
leaq 0xd8549(%rip), %rdx # 0xf9241
leaq 0x508(%rsp), %rdi
callq 0x53aee
leaq 0x1440(%rsp), %rdi
leaq 0x508(%rsp), %rsi
callq 0x12290
leaq 0x2918(%rsp), %rdi
leaq 0x28b8(%rsp), %rsi
leaq 0x1400(%rsp), %rdx
callq 0x4b8b8
leaq 0x1d(%r14), %rsi
leaq 0x17a8(%rsp), %rdi
callq 0x2b110
leaq 0x4f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd84e7(%rip), %rsi # 0xf9242
leaq 0xd84f7(%rip), %rdx # 0xf9259
leaq 0x4e8(%rsp), %rdi
callq 0x53aee
leaq 0x1808(%rsp), %rdi
leaq 0x4e8(%rsp), %rsi
callq 0x4cae4
leaq 0x4d8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd84c3(%rip), %rsi # 0xf925a
leaq 0xd84f2(%rip), %rdx # 0xf9290
leaq 0x4c8(%rsp), %rdi
callq 0x53aee
leaq 0x17e8(%rsp), %rdi
leaq 0x4c8(%rsp), %rsi
callq 0x12290
leaq 0x2978(%rsp), %rdi
leaq 0x2918(%rsp), %rsi
leaq 0x17a8(%rsp), %rdx
callq 0x4b8b8
leaq 0x4b8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd84a1(%rip), %rsi # 0xf9291
leaq 0xd84ad(%rip), %rdx # 0xf92a4
leaq 0x4a8(%rsp), %rdi
callq 0x53aee
leaq 0x38(%r14), %rsi
leaq 0x1388(%rsp), %rdi
leaq 0x4a8(%rsp), %rdx
callq 0x58e0e
movq %r13, 0x1388(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x13e8(%rsp)
movq $0x0, 0x13f8(%rsp)
leaq 0x498(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8456(%rip), %rsi # 0xf92a5
leaq 0xd8466(%rip), %rdx # 0xf92bc
leaq 0x488(%rsp), %rdi
callq 0x53aee
leaq 0x13e8(%rsp), %rdi
leaq 0x488(%rsp), %rsi
callq 0x4cae4
leaq 0x478(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8432(%rip), %rsi # 0xf92bd
leaq 0xd8476(%rip), %rdx # 0xf9308
leaq 0x468(%rsp), %rdi
callq 0x53aee
leaq 0x13c8(%rsp), %rdi
leaq 0x468(%rsp), %rsi
callq 0x12290
leaq 0x29d8(%rsp), %rdi
leaq 0x2978(%rsp), %rsi
leaq 0x1388(%rsp), %rdx
callq 0x4b8b8
leaq 0x98(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xdb80d(%rip), %rsi # 0xfc6f1
leaq 0xdb811(%rip), %rdx # 0xfc6fc
leaq 0x88(%rsp), %rdi
callq 0x53aee
movq %r15, 0x12e8(%rsp)
movl $0x0, 0x12f0(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x1259d0(%rip), %rcx # 0x1468f0
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x125a0f(%rip), %rdx # 0x146940
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0x1308(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0x1318(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x88(%rsp), %rsi
movq 0x90(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0x1338(%rsp), %rbx
movq %rbx, -0x10(%rbx)
xorl %eax, %eax
movq %rax, -0x8(%rbx)
movb $0x0, (%rbx)
movq %r13, -0x50(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%rbx)
movq %rax, 0x20(%rbx)
leaq 0x458(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8365(%rip), %rsi # 0xf9309
leaq 0xd836b(%rip), %rdx # 0xf9316
leaq 0x448(%rsp), %rdi
callq 0x53aee
leaq 0x1348(%rsp), %rdi
leaq 0x448(%rsp), %rsi
callq 0x4cae4
leaq 0x438(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8337(%rip), %rsi # 0xf9317
leaq 0xd8360(%rip), %rdx # 0xf9347
leaq 0x428(%rsp), %rdi
callq 0x53aee
leaq 0x1328(%rsp), %rdi
leaq 0x428(%rsp), %rsi
callq 0x12290
leaq 0x2a38(%rsp), %rdi
leaq 0x29d8(%rsp), %rsi
leaq 0x12e8(%rsp), %rdx
callq 0x4b8b8
leaq 0x78(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xdb6e9(%rip), %rsi # 0xfc71f
leaq 0xdb6ed(%rip), %rdx # 0xfc72a
leaq 0x68(%rsp), %rdi
callq 0x53aee
movq %r15, 0x1270(%rsp)
movl $0x0, 0x1278(%rsp)
movl $0x20, %edi
callq 0x12540
movq %r12, 0x8(%rax)
leaq 0x125921(%rip), %rcx # 0x146990
movq %rcx, (%rax)
movq %rax, %rcx
addq $0x10, %rcx
leaq 0x125960(%rip), %rdx # 0x1469e0
movq %rdx, 0x10(%rax)
movq %r14, 0x18(%rax)
leaq 0x1290(%rsp), %rdi
movq %rcx, -0x10(%rdi)
movq %rax, -0x8(%rdi)
leaq 0x12a0(%rsp), %rax
movq %rax, -0x10(%rax)
movq 0x68(%rsp), %rsi
movq 0x70(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0x12c0(%rsp), %r15
movq %r15, -0x10(%r15)
xorl %eax, %eax
movq %rax, -0x8(%r15)
movb $0x0, (%r15)
movq %r13, -0x50(%r15)
xorps %xmm0, %xmm0
movups %xmm0, 0x10(%r15)
movq %rax, 0x20(%r15)
leaq 0x418(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd8259(%rip), %rsi # 0xf9348
leaq 0xd825f(%rip), %rdx # 0xf9355
leaq 0x408(%rsp), %rdi
callq 0x53aee
leaq 0x12d0(%rsp), %rdi
leaq 0x408(%rsp), %rsi
callq 0x4cae4
leaq 0x3f8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd822b(%rip), %rsi # 0xf9356
leaq 0xd825e(%rip), %rdx # 0xf9390
leaq 0x3e8(%rsp), %rdi
callq 0x53aee
leaq 0x12b0(%rsp), %rdi
leaq 0x3e8(%rsp), %rsi
callq 0x12290
leaq 0x2a98(%rsp), %rdi
leaq 0x2a38(%rsp), %rsi
leaq 0x1270(%rsp), %rdx
callq 0x4b8b8
leaq 0xb(%r14), %rsi
leaq 0x1d48(%rsp), %rdi
callq 0x2b110
leaq 0xd38(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0xd81fb(%rip), %rsi # 0xf9391
leaq 0xd820c(%rip), %rdx # 0xf93a9
leaq 0xd28(%rsp), %rdi
callq 0x53aee
leaq 0x1da8(%rsp), %rbp
leaq 0xd28(%rsp), %rsi
movq %rbp, %rdi
callq 0x4cae4
leaq 0x3d8(%rsp), %rax
movq %rax, -0x10(%rax)
leaq 0xd81d5(%rip), %rsi # 0xf93aa
leaq 0xd81ef(%rip), %rdx # 0xf93cb
leaq 0x3c8(%rsp), %rdi
callq 0x53aee
leaq 0x1d88(%rsp), %rdi
leaq 0x3c8(%rsp), %rsi
callq 0x12290
leaq 0x2af8(%rsp), %rdi
leaq 0x2a98(%rsp), %rsi
leaq 0x1d48(%rsp), %rdx
callq 0x4b8b8
leaq 0xd18(%rsp), %r13
movq %r13, -0x10(%r13)
leaq 0xd819e(%rip), %rsi # 0xf93cc
leaq 0xd81ad(%rip), %rdx # 0xf93e2
leaq 0xd08(%rsp), %rdi
callq 0x53aee
addq $0xe0, %r14
leaq 0x1748(%rsp), %rdi
leaq 0xd08(%rsp), %rdx
movq %r14, %rsi
callq 0x51c4a
leaq 0x124498(%rip), %rax # 0x145700
movq %rax, 0x1748(%rsp)
leaq 0xcf8(%rsp), %r14
movq %r14, -0x10(%r14)
leaq 0xd8160(%rip), %rsi # 0xf93e3
leaq 0xd8173(%rip), %rdx # 0xf93fd
leaq 0xce8(%rsp), %rdi
callq 0x53aee
leaq 0x1788(%rsp), %rdi
leaq 0xce8(%rsp), %rsi
callq 0x12290
leaq 0x2af8(%rsp), %rsi
leaq 0x1748(%rsp), %rdx
movq 0x60(%rsp), %rdi
callq 0x4ba8a
movq 0xce8(%rsp), %rdi
cmpq %r14, %rdi
je 0x212e3
movq 0xcf8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x125c96(%rip), %rax # 0x146f80
leaq 0x1798(%rsp), %rcx
movq %rax, -0x50(%rcx)
movq -0x10(%rcx), %rdi
cmpq %rcx, %rdi
je 0x2130f
movq 0x1798(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1778(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x21330
movq 0x1778(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1760(%rsp), %rdi
testq %rdi, %rdi
je 0x21342
callq 0x4fb08
movq 0xd08(%rsp), %rdi
cmpq %r13, %rdi
je 0x2135f
movq 0xd18(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x123f1a(%rip), %rax # 0x145280
leaq 0x2b40(%rsp), %rdi
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2b28(%rsp), %r14
movq %r14, %rdi
callq 0x4fb78
leaq 0x122c02(%rip), %r13 # 0x143f90
movq %r13, -0x28(%r14)
movq -0x8(%r14), %rdi
testq %rdi, %rdi
je 0x213a0
callq 0x4fb08
movq 0x2b10(%rsp), %rdi
testq %rdi, %rdi
je 0x213b2
callq 0x4fb08
movq 0x3c8(%rsp), %rdi
leaq 0x3d8(%rsp), %rax
cmpq %rax, %rdi
je 0x213d7
movq 0x3d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xd28(%rsp), %rdi
cmpq %r12, %rdi
je 0x213f4
movq 0xd38(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1d98(%rsp), %r14
leaq 0x122b55(%rip), %rax # 0x143f58
movq %rax, -0x50(%r14)
movq %rbp, %rdi
callq 0x4c0f8
leaq 0x125b32(%rip), %rax # 0x146f48
movq %rax, -0x50(%r14)
movq -0x10(%r14), %rdi
cmpq %r14, %rdi
je 0x21433
movq 0x1d98(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1d78(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
leaq 0x398(%rsp), %r12
je 0x2145c
movq 0x1d78(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1d60(%rsp), %rdi
testq %rdi, %rdi
leaq 0x122aea(%rip), %rbp # 0x143f58
je 0x21475
callq 0x4fb08
leaq 0x2ae0(%rsp), %rdi
leaq 0x123dfc(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2ac8(%rsp), %r14
movq %r14, %rdi
callq 0x4fb78
movq %r13, -0x28(%r14)
movq -0x8(%r14), %rdi
testq %rdi, %rdi
je 0x214af
callq 0x4fb08
movq 0x2ab0(%rsp), %rdi
testq %rdi, %rdi
je 0x214c1
callq 0x4fb08
movq 0x3e8(%rsp), %rdi
leaq 0x3f8(%rsp), %rax
cmpq %rax, %rdi
je 0x214e6
movq 0x3f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x408(%rsp), %rdi
leaq 0x418(%rsp), %rax
cmpq %rax, %rdi
je 0x2150b
movq 0x418(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0x1270(%rsp)
leaq 0x12d0(%rsp), %rdi
callq 0x4c0f8
leaq 0x125a21(%rip), %rax # 0x146f48
movq %rax, 0x1270(%rsp)
movq 0x12b0(%rsp), %rdi
cmpq %r15, %rdi
je 0x2154c
movq 0x12c0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1290(%rsp), %rdi
leaq 0x12a0(%rsp), %rax
cmpq %rax, %rdi
je 0x21571
movq 0x12a0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1288(%rsp), %rdi
testq %rdi, %rdi
leaq 0x1259c5(%rip), %r15 # 0x146f48
je 0x2158a
callq 0x4fb08
movq 0x68(%rsp), %rdi
leaq 0x78(%rsp), %rax
cmpq %rax, %rdi
je 0x215a6
movq 0x78(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2a80(%rsp), %rdi
leaq 0x123ccb(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2a68(%rsp), %r14
movq %r14, %rdi
callq 0x4fb78
movq %r13, -0x28(%r14)
movq -0x8(%r14), %rdi
testq %rdi, %rdi
je 0x215e0
callq 0x4fb08
movq 0x2a50(%rsp), %rdi
testq %rdi, %rdi
je 0x215f2
callq 0x4fb08
movq 0x428(%rsp), %rdi
leaq 0x438(%rsp), %rax
cmpq %rax, %rdi
je 0x21617
movq 0x438(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x448(%rsp), %rdi
leaq 0x458(%rsp), %rax
cmpq %rax, %rdi
je 0x2163c
movq 0x458(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0x12e8(%rsp)
leaq 0x1348(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0x12e8(%rsp)
movq 0x1328(%rsp), %rdi
cmpq %rbx, %rdi
je 0x21676
movq 0x1338(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1308(%rsp), %rdi
leaq 0x1318(%rsp), %rax
cmpq %rax, %rdi
je 0x2169b
movq 0x1318(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1300(%rsp), %rdi
testq %rdi, %rdi
je 0x216ad
callq 0x4fb08
movq 0x88(%rsp), %rdi
leaq 0x98(%rsp), %rax
cmpq %rax, %rdi
je 0x216d2
movq 0x98(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2a20(%rsp), %rdi
leaq 0x123b9f(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2a08(%rsp), %r14
movq %r14, %rdi
callq 0x4fb78
movq %r13, -0x28(%r14)
movq -0x8(%r14), %rdi
testq %rdi, %rdi
je 0x2170c
callq 0x4fb08
movq 0x29f0(%rsp), %rdi
testq %rdi, %rdi
leaq 0x10(%rsp), %r14
je 0x21723
callq 0x4fb08
movq 0x468(%rsp), %rdi
leaq 0x478(%rsp), %rax
cmpq %rax, %rdi
je 0x21748
movq 0x478(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x488(%rsp), %rdi
leaq 0x498(%rsp), %rax
cmpq %rax, %rdi
je 0x2176d
movq 0x498(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x13d8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x13e8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x217a3
movq 0x13d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x13b8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x217c4
movq 0x13b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x13a0(%rsp), %rdi
testq %rdi, %rdi
je 0x217d6
callq 0x4fb08
movq 0x4a8(%rsp), %rdi
leaq 0x4b8(%rsp), %rax
cmpq %rax, %rdi
je 0x217fb
movq 0x4b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x29c0(%rsp), %rdi
leaq 0x123a76(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x29a8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x21835
callq 0x4fb08
movq 0x2990(%rsp), %rdi
testq %rdi, %rdi
je 0x21847
callq 0x4fb08
movq 0x4c8(%rsp), %rdi
leaq 0x4d8(%rsp), %rax
cmpq %rax, %rdi
je 0x2186c
movq 0x4d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x4e8(%rsp), %rdi
leaq 0x4f8(%rsp), %rax
cmpq %rax, %rdi
je 0x21891
movq 0x4f8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x17f8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1808(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x218c7
movq 0x17f8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x17d8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x218e8
movq 0x17d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x17c0(%rsp), %rdi
testq %rdi, %rdi
je 0x218fa
callq 0x4fb08
leaq 0x2960(%rsp), %rdi
leaq 0x123977(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2948(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x21934
callq 0x4fb08
movq 0x2930(%rsp), %rdi
testq %rdi, %rdi
je 0x21946
callq 0x4fb08
movq 0x508(%rsp), %rdi
leaq 0x518(%rsp), %rax
cmpq %rax, %rdi
je 0x2196b
movq 0x518(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x528(%rsp), %rdi
leaq 0x538(%rsp), %rax
cmpq %rax, %rdi
je 0x21990
movq 0x538(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1450(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1460(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x219c6
movq 0x1450(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1430(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x219e7
movq 0x1430(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1418(%rsp), %rdi
testq %rdi, %rdi
je 0x219f9
callq 0x4fb08
movq 0x548(%rsp), %rdi
leaq 0x558(%rsp), %rax
cmpq %rax, %rdi
je 0x21a1e
movq 0x558(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2900(%rsp), %rdi
leaq 0x123853(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x28e8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x21a58
callq 0x4fb08
movq 0x28d0(%rsp), %rdi
testq %rdi, %rdi
je 0x21a6a
callq 0x4fb08
movq 0x568(%rsp), %rdi
leaq 0x578(%rsp), %rax
cmpq %rax, %rdi
je 0x21a8f
movq 0x578(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x588(%rsp), %rdi
leaq 0x598(%rsp), %rax
cmpq %rax, %rdi
je 0x21ab4
movq 0x598(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x14c8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x14d8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x21aea
movq 0x14c8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x14a8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x21b0b
movq 0x14a8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1490(%rsp), %rdi
testq %rdi, %rdi
je 0x21b1d
callq 0x4fb08
movq 0x5a8(%rsp), %rdi
leaq 0x5b8(%rsp), %rax
cmpq %rax, %rdi
je 0x21b42
movq 0x5b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x28a0(%rsp), %rdi
leaq 0x12372f(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2888(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x21b7c
callq 0x4fb08
movq 0x2870(%rsp), %rdi
testq %rdi, %rdi
je 0x21b8e
callq 0x4fb08
movq 0x5c8(%rsp), %rdi
leaq 0x5d8(%rsp), %rax
cmpq %rax, %rdi
je 0x21bb3
movq 0x5d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x5e8(%rsp), %rdi
leaq 0x5f8(%rsp), %rax
cmpq %rax, %rdi
je 0x21bd8
movq 0x5f8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1540(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1550(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x21c0e
movq 0x1540(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1520(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x21c2f
movq 0x1520(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1508(%rsp), %rdi
testq %rdi, %rdi
je 0x21c41
callq 0x4fb08
movq 0x608(%rsp), %rdi
leaq 0x618(%rsp), %rax
cmpq %rax, %rdi
je 0x21c66
movq 0x618(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2840(%rsp), %rdi
leaq 0x12360b(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2828(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x21ca0
callq 0x4fb08
movq 0x2810(%rsp), %rdi
testq %rdi, %rdi
je 0x21cb2
callq 0x4fb08
movq 0x628(%rsp), %rdi
leaq 0x638(%rsp), %rax
cmpq %rax, %rdi
je 0x21cd7
movq 0x638(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x648(%rsp), %rdi
leaq 0x658(%rsp), %rax
cmpq %rax, %rdi
je 0x21cfc
movq 0x658(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1870(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1880(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x21d32
movq 0x1870(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1850(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x21d53
movq 0x1850(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1838(%rsp), %rdi
testq %rdi, %rdi
je 0x21d65
callq 0x4fb08
leaq 0x27e0(%rsp), %rdi
leaq 0x12350c(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x27c8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x21d9f
callq 0x4fb08
movq 0x27b0(%rsp), %rdi
testq %rdi, %rdi
je 0x21db1
callq 0x4fb08
movq 0x668(%rsp), %rdi
leaq 0x678(%rsp), %rax
cmpq %rax, %rdi
je 0x21dd6
movq 0x678(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x688(%rsp), %rdi
leaq 0x698(%rsp), %rax
cmpq %rax, %rdi
je 0x21dfb
movq 0x698(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0xd48(%rsp)
leaq 0xda8(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0xd48(%rsp)
movq 0xd88(%rsp), %rdi
leaq 0xd98(%rsp), %rax
cmpq %rax, %rdi
je 0x21e3d
movq 0xd98(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xd68(%rsp), %rdi
leaq 0xd78(%rsp), %rax
cmpq %rax, %rdi
je 0x21e62
movq 0xd78(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xd60(%rsp), %rdi
testq %rdi, %rdi
je 0x21e74
callq 0x4fb08
movq 0xa8(%rsp), %rdi
leaq 0xb8(%rsp), %rax
cmpq %rax, %rdi
je 0x21e99
movq 0xb8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2780(%rsp), %rdi
leaq 0x1233d8(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2768(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x21ed3
callq 0x4fb08
movq 0x2750(%rsp), %rdi
testq %rdi, %rdi
je 0x21ee5
callq 0x4fb08
movq 0x6a8(%rsp), %rdi
leaq 0x6b8(%rsp), %rax
cmpq %rax, %rdi
je 0x21f0a
movq 0x6b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x6c8(%rsp), %rdi
leaq 0x6d8(%rsp), %rax
cmpq %rax, %rdi
je 0x21f2f
movq 0x6d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x18e8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x18f8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x21f65
movq 0x18e8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x18c8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x21f86
movq 0x18c8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x18b0(%rsp), %rdi
testq %rdi, %rdi
je 0x21f98
callq 0x4fb08
leaq 0x2720(%rsp), %rdi
leaq 0x1232d9(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2708(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x21fd2
callq 0x4fb08
movq 0x26f0(%rsp), %rdi
testq %rdi, %rdi
je 0x21fe4
callq 0x4fb08
movq 0x6e8(%rsp), %rdi
leaq 0x6f8(%rsp), %rax
cmpq %rax, %rdi
je 0x22009
movq 0x6f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x708(%rsp), %rdi
leaq 0x718(%rsp), %rax
cmpq %rax, %rdi
je 0x2202e
movq 0x718(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0xdc0(%rsp)
leaq 0xe20(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0xdc0(%rsp)
movq 0xe00(%rsp), %rdi
leaq 0xe10(%rsp), %rax
cmpq %rax, %rdi
je 0x22070
movq 0xe10(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xde0(%rsp), %rdi
leaq 0xdf0(%rsp), %rax
cmpq %rax, %rdi
je 0x22095
movq 0xdf0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xdd8(%rsp), %rdi
testq %rdi, %rdi
je 0x220a7
callq 0x4fb08
movq 0xc8(%rsp), %rdi
leaq 0xd8(%rsp), %rax
cmpq %rax, %rdi
je 0x220cc
movq 0xd8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x26c0(%rsp), %rdi
leaq 0x1231a5(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x26a8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x22106
callq 0x4fb08
movq 0x2690(%rsp), %rdi
testq %rdi, %rdi
je 0x22118
callq 0x4fb08
movq 0x728(%rsp), %rdi
leaq 0x738(%rsp), %rax
cmpq %rax, %rdi
je 0x2213d
movq 0x738(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x748(%rsp), %rdi
leaq 0x758(%rsp), %rax
cmpq %rax, %rdi
je 0x22162
movq 0x758(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0xe38(%rsp)
leaq 0xe98(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0xe38(%rsp)
movq 0xe78(%rsp), %rdi
leaq 0xe88(%rsp), %rax
cmpq %rax, %rdi
je 0x221a4
movq 0xe88(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xe58(%rsp), %rdi
leaq 0xe68(%rsp), %rax
cmpq %rax, %rdi
je 0x221c9
movq 0xe68(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xe50(%rsp), %rdi
testq %rdi, %rdi
je 0x221db
callq 0x4fb08
movq 0xe8(%rsp), %rdi
leaq 0xf8(%rsp), %rax
cmpq %rax, %rdi
je 0x22200
movq 0xf8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2660(%rsp), %rdi
leaq 0x123071(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2648(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2223a
callq 0x4fb08
movq 0x2630(%rsp), %rdi
testq %rdi, %rdi
je 0x2224c
callq 0x4fb08
movq 0x768(%rsp), %rdi
leaq 0x778(%rsp), %rax
cmpq %rax, %rdi
je 0x22271
movq 0x778(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x788(%rsp), %rdi
leaq 0x798(%rsp), %rax
cmpq %rax, %rdi
je 0x22296
movq 0x798(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0xeb0(%rsp)
leaq 0xf10(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0xeb0(%rsp)
movq 0xef0(%rsp), %rdi
leaq 0xf00(%rsp), %rax
cmpq %rax, %rdi
je 0x222d8
movq 0xf00(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xed0(%rsp), %rdi
leaq 0xee0(%rsp), %rax
cmpq %rax, %rdi
je 0x222fd
movq 0xee0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xec8(%rsp), %rdi
testq %rdi, %rdi
je 0x2230f
callq 0x4fb08
movq 0x108(%rsp), %rdi
leaq 0x118(%rsp), %rax
cmpq %rax, %rdi
je 0x22334
movq 0x118(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2600(%rsp), %rdi
leaq 0x122f3d(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x25e8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2236e
callq 0x4fb08
movq 0x25d0(%rsp), %rdi
testq %rdi, %rdi
je 0x22380
callq 0x4fb08
movq 0x7a8(%rsp), %rdi
leaq 0x7b8(%rsp), %rax
cmpq %rax, %rdi
je 0x223a5
movq 0x7b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x7c8(%rsp), %rdi
leaq 0x7d8(%rsp), %rax
cmpq %rax, %rdi
je 0x223ca
movq 0x7d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1960(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1970(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x22400
movq 0x1960(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1940(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x22421
movq 0x1940(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1928(%rsp), %rdi
testq %rdi, %rdi
je 0x22433
callq 0x4fb08
leaq 0x25a0(%rsp), %rdi
leaq 0x122e3e(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2588(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2246d
callq 0x4fb08
movq 0x2570(%rsp), %rdi
testq %rdi, %rdi
je 0x2247f
callq 0x4fb08
movq 0x7e8(%rsp), %rdi
leaq 0x7f8(%rsp), %rax
cmpq %rax, %rdi
je 0x224a4
movq 0x7f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x808(%rsp), %rdi
leaq 0x818(%rsp), %rax
cmpq %rax, %rdi
je 0x224c9
movq 0x818(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x19d8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x19e8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x224ff
movq 0x19d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x19b8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x22520
movq 0x19b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x19a0(%rsp), %rdi
testq %rdi, %rdi
je 0x22532
callq 0x4fb08
leaq 0x2540(%rsp), %rdi
leaq 0x122d3f(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2528(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2256c
callq 0x4fb08
movq 0x2510(%rsp), %rdi
testq %rdi, %rdi
je 0x2257e
callq 0x4fb08
movq 0x828(%rsp), %rdi
leaq 0x838(%rsp), %rax
cmpq %rax, %rdi
je 0x225a3
movq 0x838(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x848(%rsp), %rdi
leaq 0x858(%rsp), %rax
cmpq %rax, %rdi
je 0x225c8
movq 0x858(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1a50(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1a60(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x225fe
movq 0x1a50(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1a30(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x2261f
movq 0x1a30(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1a18(%rsp), %rdi
testq %rdi, %rdi
je 0x22631
callq 0x4fb08
leaq 0x24e0(%rsp), %rdi
leaq 0x122c40(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x24c8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2266b
callq 0x4fb08
movq 0x24b0(%rsp), %rdi
testq %rdi, %rdi
je 0x2267d
callq 0x4fb08
movq 0x868(%rsp), %rdi
leaq 0x878(%rsp), %rax
cmpq %rax, %rdi
je 0x226a2
movq 0x878(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x888(%rsp), %rdi
leaq 0x898(%rsp), %rax
cmpq %rax, %rdi
je 0x226c7
movq 0x898(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1ac8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1ad8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x226fd
movq 0x1ac8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1aa8(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x2271e
movq 0x1aa8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1a90(%rsp), %rdi
testq %rdi, %rdi
je 0x22730
callq 0x4fb08
leaq 0x2480(%rsp), %rdi
leaq 0x122b41(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2468(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2276a
callq 0x4fb08
movq 0x2450(%rsp), %rdi
testq %rdi, %rdi
leaq 0xf58(%rsp), %rbx
je 0x22784
callq 0x4fb08
movq 0x8a8(%rsp), %rdi
leaq 0x8b8(%rsp), %rax
cmpq %rax, %rdi
je 0x227a9
movq 0x8b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x128(%rsp), %rdi
leaq 0x138(%rsp), %rax
cmpq %rax, %rdi
je 0x227ce
movq 0x138(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x8c8(%rsp), %rdi
leaq 0x8d8(%rsp), %rax
cmpq %rax, %rdi
je 0x227f3
movq 0x8d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0xf28(%rsp)
leaq 0xf88(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0xf28(%rsp)
movq 0xf68(%rsp), %rdi
leaq 0xf78(%rsp), %rax
cmpq %rax, %rdi
je 0x22835
movq 0xf78(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xf48(%rsp), %rdi
cmpq %rbx, %rdi
je 0x22852
movq 0xf58(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xf40(%rsp), %rdi
testq %rdi, %rdi
je 0x22864
callq 0x4fb08
movq 0x148(%rsp), %rdi
leaq 0x158(%rsp), %rax
cmpq %rax, %rdi
je 0x22889
movq 0x158(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2420(%rsp), %rdi
leaq 0x1229e8(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2408(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x228c3
callq 0x4fb08
movq 0x23f0(%rsp), %rdi
testq %rdi, %rdi
je 0x228d5
callq 0x4fb08
movq 0x8e8(%rsp), %rdi
leaq 0x8f8(%rsp), %rax
cmpq %rax, %rdi
je 0x228fa
movq 0x8f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x908(%rsp), %rdi
leaq 0x918(%rsp), %rax
cmpq %rax, %rdi
je 0x2291f
movq 0x918(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x928(%rsp), %rdi
leaq 0x938(%rsp), %rax
cmpq %rax, %rdi
je 0x22944
movq 0x938(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x15b8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x15c8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x2297a
movq 0x15b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1598(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x2299b
movq 0x1598(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1580(%rsp), %rdi
testq %rdi, %rdi
je 0x229ad
callq 0x4fb08
movq 0x948(%rsp), %rdi
leaq 0x958(%rsp), %rax
cmpq %rax, %rdi
je 0x229d2
movq 0x958(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x23c0(%rsp), %rdi
leaq 0x12289f(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x23a8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x22a0c
callq 0x4fb08
movq 0x2390(%rsp), %rdi
testq %rdi, %rdi
je 0x22a1e
callq 0x4fb08
movq 0x968(%rsp), %rdi
leaq 0x978(%rsp), %rax
cmpq %rax, %rdi
je 0x22a43
movq 0x978(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x988(%rsp), %rdi
leaq 0x998(%rsp), %rax
cmpq %rax, %rdi
je 0x22a68
movq 0x998(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x168(%rsp), %rdi
leaq 0x178(%rsp), %rax
cmpq %rax, %rdi
je 0x22a8d
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1b40(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1b50(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x22ac3
movq 0x1b40(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1b20(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x22ae4
movq 0x1b20(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1b08(%rsp), %rdi
testq %rdi, %rdi
je 0x22af6
callq 0x4fb08
leaq 0x2360(%rsp), %rdi
leaq 0x12277b(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2348(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x22b30
callq 0x4fb08
movq 0x2330(%rsp), %rdi
testq %rdi, %rdi
leaq 0xfd0(%rsp), %rbx
je 0x22b4a
callq 0x4fb08
movq 0x9a8(%rsp), %rdi
leaq 0x9b8(%rsp), %rax
cmpq %rax, %rdi
je 0x22b6f
movq 0x9b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x9c8(%rsp), %rdi
leaq 0x9d8(%rsp), %rax
cmpq %rax, %rdi
je 0x22b94
movq 0x9d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x188(%rsp), %rdi
leaq 0x198(%rsp), %rax
cmpq %rax, %rdi
je 0x22bb9
movq 0x198(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0xfa0(%rsp)
leaq 0x1000(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0xfa0(%rsp)
movq 0xfe0(%rsp), %rdi
leaq 0xff0(%rsp), %rax
cmpq %rax, %rdi
je 0x22bfb
movq 0xff0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xfc0(%rsp), %rdi
cmpq %rbx, %rdi
je 0x22c18
movq 0xfd0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xfb8(%rsp), %rdi
testq %rdi, %rdi
je 0x22c2a
callq 0x4fb08
movq 0x1a8(%rsp), %rdi
leaq 0x1b8(%rsp), %rax
cmpq %rax, %rdi
je 0x22c4f
movq 0x1b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2300(%rsp), %rdi
leaq 0x122622(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x22e8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x22c89
callq 0x4fb08
movq 0x22d0(%rsp), %rdi
testq %rdi, %rdi
je 0x22c9b
callq 0x4fb08
movq 0x9e8(%rsp), %rdi
leaq 0x9f8(%rsp), %rax
cmpq %rax, %rdi
je 0x22cc0
movq 0x9f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xa08(%rsp), %rdi
leaq 0xa18(%rsp), %rax
cmpq %rax, %rdi
je 0x22ce5
movq 0xa18(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xa28(%rsp), %rdi
leaq 0xa38(%rsp), %rax
cmpq %rax, %rdi
je 0x22d0a
movq 0xa38(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1630(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1640(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x22d40
movq 0x1630(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1610(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x22d61
movq 0x1610(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x15f8(%rsp), %rdi
testq %rdi, %rdi
je 0x22d73
callq 0x4fb08
movq 0xa48(%rsp), %rdi
leaq 0xa58(%rsp), %rax
cmpq %rax, %rdi
je 0x22d98
movq 0xa58(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x22a0(%rsp), %rdi
leaq 0x1224d9(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2288(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x22dd2
callq 0x4fb08
movq 0x2270(%rsp), %rdi
testq %rdi, %rdi
leaq 0x1048(%rsp), %rbx
je 0x22dec
callq 0x4fb08
movq 0xa68(%rsp), %rdi
leaq 0xa78(%rsp), %rax
cmpq %rax, %rdi
je 0x22e11
movq 0xa78(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1c8(%rsp), %rdi
leaq 0x1d8(%rsp), %rax
cmpq %rax, %rdi
je 0x22e36
movq 0x1d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1e8(%rsp), %rdi
leaq 0x1f8(%rsp), %rax
cmpq %rax, %rdi
je 0x22e5b
movq 0x1f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0x1018(%rsp)
leaq 0x1078(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0x1018(%rsp)
movq 0x1058(%rsp), %rdi
leaq 0x1068(%rsp), %rax
cmpq %rax, %rdi
je 0x22e9d
movq 0x1068(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1038(%rsp), %rdi
cmpq %rbx, %rdi
je 0x22eba
movq 0x1048(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1030(%rsp), %rdi
testq %rdi, %rdi
je 0x22ecc
callq 0x4fb08
movq 0x208(%rsp), %rdi
leaq 0x218(%rsp), %rax
cmpq %rax, %rdi
je 0x22ef1
movq 0x218(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2240(%rsp), %rdi
leaq 0x122380(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2228(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x22f2b
callq 0x4fb08
movq 0x2210(%rsp), %rdi
testq %rdi, %rdi
leaq 0x10c0(%rsp), %rbx
je 0x22f45
callq 0x4fb08
movq 0xa88(%rsp), %rdi
leaq 0xa98(%rsp), %rax
cmpq %rax, %rdi
je 0x22f6a
movq 0xa98(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xaa8(%rsp), %rdi
leaq 0xab8(%rsp), %rax
cmpq %rax, %rdi
je 0x22f8f
movq 0xab8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xac8(%rsp), %rdi
leaq 0xad8(%rsp), %rax
cmpq %rax, %rdi
je 0x22fb4
movq 0xad8(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0x1090(%rsp)
leaq 0x10f0(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0x1090(%rsp)
movq 0x10d0(%rsp), %rdi
leaq 0x10e0(%rsp), %rax
cmpq %rax, %rdi
je 0x22ff6
movq 0x10e0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x10b0(%rsp), %rdi
cmpq %rbx, %rdi
je 0x23013
movq 0x10c0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x10a8(%rsp), %rdi
testq %rdi, %rdi
je 0x23025
callq 0x4fb08
movq 0x228(%rsp), %rdi
leaq 0x238(%rsp), %rax
cmpq %rax, %rdi
je 0x2304a
movq 0x238(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x21e0(%rsp), %rdi
leaq 0x122227(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x21c8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x23084
callq 0x4fb08
movq 0x21b0(%rsp), %rdi
testq %rdi, %rdi
leaq 0x1138(%rsp), %rbx
je 0x2309e
callq 0x4fb08
movq 0xae8(%rsp), %rdi
leaq 0xaf8(%rsp), %rax
cmpq %rax, %rdi
je 0x230c3
movq 0xaf8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x248(%rsp), %rdi
leaq 0x258(%rsp), %rax
cmpq %rax, %rdi
je 0x230e8
movq 0x258(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xb08(%rsp), %rdi
leaq 0xb18(%rsp), %rax
cmpq %rax, %rdi
je 0x2310d
movq 0xb18(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0x1108(%rsp)
leaq 0x1168(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0x1108(%rsp)
movq 0x1148(%rsp), %rdi
leaq 0x1158(%rsp), %rax
cmpq %rax, %rdi
je 0x2314f
movq 0x1158(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1128(%rsp), %rdi
cmpq %rbx, %rdi
je 0x2316c
movq 0x1138(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1120(%rsp), %rdi
testq %rdi, %rdi
je 0x2317e
callq 0x4fb08
movq 0x268(%rsp), %rdi
leaq 0x278(%rsp), %rax
cmpq %rax, %rdi
je 0x231a3
movq 0x278(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2180(%rsp), %rdi
leaq 0x1220ce(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2168(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x231dd
callq 0x4fb08
movq 0x2150(%rsp), %rdi
testq %rdi, %rdi
leaq 0x11e0(%rsp), %rbx
je 0x231f7
callq 0x4fb08
movq 0xb28(%rsp), %rdi
leaq 0xb38(%rsp), %rax
cmpq %rax, %rdi
je 0x2321c
movq 0xb38(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xb48(%rsp), %rdi
leaq 0xb58(%rsp), %rax
cmpq %rax, %rdi
je 0x23241
movq 0xb58(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xb68(%rsp), %rdi
leaq 0xb78(%rsp), %rax
cmpq %rax, %rdi
je 0x23266
movq 0xb78(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0x1180(%rsp)
movq %rbx, %rdi
callq 0x4c0f8
movq %r15, 0x1180(%rsp)
movq 0x11c0(%rsp), %rdi
leaq 0x11d0(%rsp), %rax
cmpq %rax, %rdi
je 0x232a3
movq 0x11d0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x11a0(%rsp), %rdi
leaq 0x11b0(%rsp), %rax
cmpq %rax, %rdi
je 0x232c8
movq 0x11b0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1198(%rsp), %rdi
testq %rdi, %rdi
je 0x232da
callq 0x4fb08
leaq 0x2120(%rsp), %rdi
leaq 0x121f97(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2108(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x23314
callq 0x4fb08
movq 0x20f0(%rsp), %rdi
testq %rdi, %rdi
je 0x23326
callq 0x4fb08
movq 0xb88(%rsp), %rdi
leaq 0xb98(%rsp), %rax
cmpq %rax, %rdi
je 0x2334b
movq 0xb98(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x288(%rsp), %rdi
leaq 0x298(%rsp), %rax
cmpq %rax, %rdi
je 0x23370
movq 0x298(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xba8(%rsp), %rdi
leaq 0xbb8(%rsp), %rax
cmpq %rax, %rdi
je 0x23395
movq 0xbb8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x16a8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x16b8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x233cb
movq 0x16a8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1688(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x233ec
movq 0x1688(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1670(%rsp), %rdi
testq %rdi, %rdi
je 0x233fe
callq 0x4fb08
movq 0xbc8(%rsp), %rdi
leaq 0xbd8(%rsp), %rax
cmpq %rax, %rdi
je 0x23423
movq 0xbd8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x20c0(%rsp), %rdi
leaq 0x121e4e(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x20a8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x2345d
callq 0x4fb08
movq 0x2090(%rsp), %rdi
testq %rdi, %rdi
leaq 0x1228(%rsp), %rbx
je 0x23477
callq 0x4fb08
movq 0xbe8(%rsp), %rdi
leaq 0xbf8(%rsp), %rax
cmpq %rax, %rdi
je 0x2349c
movq 0xbf8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xc08(%rsp), %rdi
leaq 0xc18(%rsp), %rax
cmpq %rax, %rdi
je 0x234c1
movq 0xc18(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x2a8(%rsp), %rdi
leaq 0x2b8(%rsp), %rax
cmpq %rax, %rdi
je 0x234e6
movq 0x2b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbp, 0x11f8(%rsp)
leaq 0x1258(%rsp), %rdi
callq 0x4c0f8
movq %r15, 0x11f8(%rsp)
movq 0x1238(%rsp), %rdi
leaq 0x1248(%rsp), %rax
cmpq %rax, %rdi
je 0x23528
movq 0x1248(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1218(%rsp), %rdi
cmpq %rbx, %rdi
je 0x23545
movq 0x1228(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1210(%rsp), %rdi
testq %rdi, %rdi
je 0x23557
callq 0x4fb08
movq 0x2c8(%rsp), %rdi
leaq 0x2d8(%rsp), %rax
cmpq %rax, %rdi
je 0x2357c
movq 0x2d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2060(%rsp), %rdi
leaq 0x121cf5(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x2048(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x235b6
callq 0x4fb08
movq 0x2030(%rsp), %rdi
testq %rdi, %rdi
je 0x235c8
callq 0x4fb08
movq 0xc28(%rsp), %rdi
leaq 0xc38(%rsp), %rax
cmpq %rax, %rdi
je 0x235ed
movq 0xc38(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xc48(%rsp), %rdi
leaq 0xc58(%rsp), %rax
cmpq %rax, %rdi
je 0x23612
movq 0xc58(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x2e8(%rsp), %rdi
leaq 0x2f8(%rsp), %rax
cmpq %rax, %rdi
je 0x23637
movq 0x2f8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1720(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1730(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x2366d
movq 0x1720(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1700(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x2368e
movq 0x1700(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x16e8(%rsp), %rdi
testq %rdi, %rdi
je 0x236a0
callq 0x4fb08
movq 0xc68(%rsp), %rdi
leaq 0xc78(%rsp), %rax
cmpq %rax, %rdi
je 0x236c5
movq 0xc78(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2000(%rsp), %rdi
leaq 0x121bac(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x1fe8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x236ff
callq 0x4fb08
movq 0x1fd0(%rsp), %rdi
testq %rdi, %rdi
je 0x23711
callq 0x4fb08
movq 0xc88(%rsp), %rdi
leaq 0xc98(%rsp), %rax
cmpq %rax, %rdi
je 0x23736
movq 0xc98(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x308(%rsp), %rdi
leaq 0x318(%rsp), %rax
cmpq %rax, %rdi
je 0x2375b
movq 0x318(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x328(%rsp), %rdi
leaq 0x338(%rsp), %rax
cmpq %rax, %rdi
je 0x23780
movq 0x338(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1bb8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1bc8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x237b6
movq 0x1bb8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1b98(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x237d7
movq 0x1b98(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1b80(%rsp), %rdi
testq %rdi, %rdi
je 0x237e9
callq 0x4fb08
leaq 0x1fa0(%rsp), %rdi
leaq 0x121a88(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x1f88(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x23823
callq 0x4fb08
movq 0x1f70(%rsp), %rdi
testq %rdi, %rdi
je 0x23835
callq 0x4fb08
movq 0xca8(%rsp), %rdi
leaq 0xcb8(%rsp), %rax
cmpq %rax, %rdi
je 0x2385a
movq 0xcb8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x348(%rsp), %rdi
leaq 0x358(%rsp), %rax
cmpq %rax, %rdi
je 0x2387f
movq 0x358(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x368(%rsp), %rdi
leaq 0x378(%rsp), %rax
cmpq %rax, %rdi
je 0x238a4
movq 0x378(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1c30(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1c40(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x238da
movq 0x1c30(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1c10(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x238fb
movq 0x1c10(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1bf8(%rsp), %rdi
testq %rdi, %rdi
je 0x2390d
callq 0x4fb08
leaq 0x1f40(%rsp), %rdi
leaq 0x121964(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x1f28(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x23947
callq 0x4fb08
movq 0x1f10(%rsp), %rdi
testq %rdi, %rdi
je 0x23959
callq 0x4fb08
movq 0xcc8(%rsp), %rdi
leaq 0xcd8(%rsp), %rax
cmpq %rax, %rdi
je 0x2397e
movq 0xcd8(%rsp), %rsi
incq %rsi
callq 0x12570
movq (%rsp), %rdi
cmpq %r14, %rdi
je 0x23994
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x20(%rsp), %rdi
leaq 0x30(%rsp), %rax
cmpq %rax, %rdi
je 0x239b0
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1ca8(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1cb8(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x239e6
movq 0x1ca8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1c88(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x23a07
movq 0x1c88(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1c70(%rsp), %rdi
testq %rdi, %rdi
je 0x23a19
callq 0x4fb08
leaq 0x1ee0(%rsp), %rdi
leaq 0x121858(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x1ec8(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x23a53
callq 0x4fb08
movq 0x1eb0(%rsp), %rdi
testq %rdi, %rdi
je 0x23a65
callq 0x4fb08
movq 0x388(%rsp), %rdi
cmpq %r12, %rdi
je 0x23a82
movq 0x398(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x3a8(%rsp), %rdi
leaq 0x3b8(%rsp), %rax
cmpq %rax, %rdi
je 0x23aa7
movq 0x3b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x40(%rsp), %rdi
leaq 0x50(%rsp), %rax
cmpq %rax, %rdi
je 0x23ac3
movq 0x50(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1d20(%rsp), %rbx
movq %rbp, -0x50(%rbx)
leaq 0x1d30(%rsp), %rdi
callq 0x4c0f8
movq %r15, -0x50(%rbx)
movq -0x10(%rbx), %rdi
cmpq %rbx, %rdi
je 0x23af9
movq 0x1d20(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1d00(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x23b1a
movq 0x1d00(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1ce8(%rsp), %rdi
testq %rdi, %rdi
je 0x23b2c
callq 0x4fb08
leaq 0x1e80(%rsp), %rdi
leaq 0x121745(%rip), %rax # 0x145280
movq %rax, -0x48(%rdi)
callq 0x4fbba
leaq 0x1e68(%rsp), %rbx
movq %rbx, %rdi
callq 0x4fb78
movq %r13, -0x28(%rbx)
movq -0x8(%rbx), %rdi
testq %rdi, %rdi
je 0x23b66
callq 0x4fb08
movq 0x1e50(%rsp), %rdi
testq %rdi, %rdi
je 0x23b78
callq 0x4fb08
leaq 0x1e20(%rsp), %rbx
movq %rbp, -0x60(%rbx)
movq %rbx, %rdi
callq 0x4c0f8
movq %r15, -0x60(%rbx)
movq -0x20(%rbx), %rdi
leaq 0x1e10(%rsp), %rax
cmpq %rax, %rdi
je 0x23bb1
movq 0x1e10(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1df0(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x23bd2
movq 0x1df0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x1dd8(%rsp), %rdi
testq %rdi, %rdi
je 0x23be4
callq 0x4fb08
movq %r13, 0x1360(%rsp)
movq 0x1380(%rsp), %rdi
testq %rdi, %rdi
je 0x23bfe
callq 0x4fb08
movq 0x1370(%rsp), %rdi
testq %rdi, %rdi
je 0x23c10
callq 0x4fb08
movq 0x60(%rsp), %rax
addq $0x2b58, %rsp # imm = 0x2B58
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
jmp 0x2466c
movq %rax, %rbx
jmp 0x24679
movq %rax, %rbx
jmp 0x24696
jmp 0x23c41
movq %rax, %rbx
jmp 0x246cd
movq %rax, %rbx
jmp 0x246ea
movq %rax, %rbx
jmp 0x246f7
jmp 0x23c5b
movq %rax, %rbx
jmp 0x24729
movq %rax, %rbx
jmp 0x2474e
movq %rax, %rbx
movq 0x1288(%rsp), %rdi
testq %rdi, %rdi
je 0x2475b
callq 0x4fb08
jmp 0x2475b
movq %rax, %rbx
jmp 0x2475b
movq %rax, %rbx
jmp 0x24777
jmp 0x23c9b
movq %rax, %rbx
jmp 0x247a9
movq %rax, %rbx
jmp 0x247ce
movq %rax, %rbx
movq 0x1300(%rsp), %rdi
testq %rdi, %rdi
je 0x247db
callq 0x4fb08
jmp 0x247db
movq %rax, %rbx
jmp 0x247db
movq %rax, %rbx
jmp 0x24800
jmp 0x23cdb
movq %rax, %rbx
jmp 0x24832
movq %rax, %rbx
jmp 0x24857
movq %rax, %rbx
jmp 0x24864
movq %rax, %rbx
jmp 0x24889
jmp 0x23cfd
movq %rax, %rbx
jmp 0x248bb
movq %rax, %rbx
jmp 0x248e0
movq %rax, %rbx
jmp 0x248ed
jmp 0x23d17
movq %rax, %rbx
jmp 0x2491f
movq %rax, %rbx
jmp 0x24944
movq %rax, %rbx
jmp 0x24951
movq %rax, %rbx
jmp 0x24976
jmp 0x23d39
movq %rax, %rbx
jmp 0x249a8
movq %rax, %rbx
jmp 0x249cd
movq %rax, %rbx
jmp 0x249da
movq %rax, %rbx
jmp 0x249ff
jmp 0x23d5b
movq %rax, %rbx
jmp 0x24a31
movq %rax, %rbx
jmp 0x24a56
movq %rax, %rbx
jmp 0x24a63
movq %rax, %rbx
jmp 0x24a88
jmp 0x23d7d
movq %rax, %rbx
jmp 0x24aba
movq %rax, %rbx
jmp 0x24adf
movq %rax, %rbx
jmp 0x24aec
jmp 0x23d97
movq %rax, %rbx
jmp 0x24b1e
movq %rax, %rbx
jmp 0x24b43
movq %rax, %rbx
movq 0xd60(%rsp), %rdi
testq %rdi, %rdi
je 0x24b50
callq 0x4fb08
jmp 0x24b50
movq %rax, %rbx
jmp 0x24b50
movq %rax, %rbx
jmp 0x24b75
jmp 0x23dd7
movq %rax, %rbx
jmp 0x24ba7
movq %rax, %rbx
jmp 0x24bcc
movq %rax, %rbx
jmp 0x24bd9
jmp 0x23df1
movq %rax, %rbx
jmp 0x24c0b
movq %rax, %rbx
jmp 0x24c30
movq %rax, %rbx
movq 0xdd8(%rsp), %rdi
testq %rdi, %rdi
je 0x24c3d
callq 0x4fb08
jmp 0x24c3d
movq %rax, %rbx
jmp 0x24c3d
movq %rax, %rbx
jmp 0x24c62
jmp 0x23e31
movq %rax, %rbx
jmp 0x24c94
movq %rax, %rbx
jmp 0x24cb9
movq %rax, %rbx
movq 0xe50(%rsp), %rdi
testq %rdi, %rdi
je 0x24cc6
callq 0x4fb08
jmp 0x24cc6
movq %rax, %rbx
jmp 0x24cc6
movq %rax, %rbx
jmp 0x24ceb
jmp 0x23e71
movq %rax, %rbx
jmp 0x24d1d
movq %rax, %rbx
jmp 0x24d42
movq %rax, %rbx
movq 0xec8(%rsp), %rdi
testq %rdi, %rdi
je 0x24d4f
callq 0x4fb08
jmp 0x24d4f
movq %rax, %rbx
jmp 0x24d4f
movq %rax, %rbx
jmp 0x24d74
jmp 0x23eb1
movq %rax, %rbx
jmp 0x24da6
movq %rax, %rbx
jmp 0x24dcb
movq %rax, %rbx
jmp 0x24dd8
jmp 0x23ecb
movq %rax, %rbx
jmp 0x24e0a
movq %rax, %rbx
jmp 0x24e2f
movq %rax, %rbx
jmp 0x24e3c
jmp 0x23ee5
movq %rax, %rbx
jmp 0x24e6e
movq %rax, %rbx
jmp 0x24e93
movq %rax, %rbx
jmp 0x24ea0
jmp 0x23eff
movq %rax, %rbx
jmp 0x24ed2
movq %rax, %rbx
jmp 0x24f07
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x24f26
jmp 0x23f23
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x24f58
jmp 0x23f37
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x24f7d
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x24f9a
leaq 0x50(%rsp), %r15
movq %rax, %rbx
movq 0xf40(%rsp), %rdi
testq %rdi, %rdi
je 0x23f75
callq 0x4fb08
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x24fac
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x24fac
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x24fd1
jmp 0x23fb4
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25003
jmp 0x23fcd
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25028
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2504d
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2505a
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2507f
jmp 0x2402b
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x250b1
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x250d6
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x178(%rsp), %rax
leaq 0x50(%rsp), %r15
jmp 0x250de
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x250fb
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25108
jmp 0x240a8
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2513a
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2515f
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x198(%rsp), %rax
leaq 0x50(%rsp), %r15
jmp 0x25167
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25184
leaq 0x50(%rsp), %r15
movq %rax, %rbx
movq 0xfb8(%rsp), %rdi
testq %rdi, %rdi
je 0x24126
callq 0x4fb08
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x25191
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25191
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x251b6
jmp 0x24165
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x251e8
jmp 0x2417e
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2520d
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25232
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2523f
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25264
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25296
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x1d8(%rsp), %rax
leaq 0x50(%rsp), %r15
jmp 0x2529e
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x252bb
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x1f8(%rsp), %rax
leaq 0x50(%rsp), %r15
jmp 0x252c3
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x252e0
leaq 0x50(%rsp), %r15
movq %rax, %rbx
movq 0x1030(%rsp), %rdi
testq %rdi, %rdi
je 0x24277
callq 0x4fb08
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x252ed
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x252ed
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25312
jmp 0x242b6
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25344
jmp 0x242cf
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25369
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2538e
leaq 0x50(%rsp), %r15
movq %rax, %rbx
movq 0x10a8(%rsp), %rdi
testq %rdi, %rdi
je 0x24317
callq 0x4fb08
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x2539b
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2539b
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x253c0
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x253f2
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x258(%rsp), %rax
leaq 0x50(%rsp), %r15
jmp 0x253fa
jmp 0x2438c
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25417
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2543c
leaq 0x50(%rsp), %r15
movq %rax, %rbx
movq 0x1120(%rsp), %rdi
testq %rdi, %rdi
je 0x243d4
callq 0x4fb08
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x25449
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25449
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2546e
jmp 0x24413
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x254a0
jmp 0x2442c
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x254c5
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x254ea
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x254f7
jmp 0x2446e
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x25529
jmp 0x24482
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x2554e
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x25573
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x25580
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x255a5
jmp 0x244cc
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x255d7
jmp 0x244e0
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x255fc
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x25621
movq %rax, %rbx
movq 0x1210(%rsp), %rdi
testq %rdi, %rdi
je 0x2451e
callq 0x4fb08
jmp 0x2451e
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x2562e
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25653
jmp 0x2453c
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25685
jmp 0x2454b
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x256aa
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x256cf
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x256dc
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25701
jmp 0x24581
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25733
jmp 0x24590
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25758
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x2577d
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x2578a
jmp 0x245b9
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x257bc
jmp 0x245c8
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x257e1
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25806
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25813
jmp 0x245f1
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25845
jmp 0x24600
movq %rax, %rbx
jmp 0x2585b
movq %rax, %rbx
jmp 0x25872
movq %rax, %rbx
jmp 0x2587f
jmp 0x2461a
movq %rax, %rbx
jmp 0x258a9
jmp 0x24624
movq %rax, %rbx
jmp 0x258c6
movq %rax, %rbx
jmp 0x258dd
movq %rax, %rbx
jmp 0x258ea
movq %rax, %rbx
jmp 0x258f7
movq %rax, %rbx
jmp 0x25904
movq %rax, %rbx
movq 0xce8(%rsp), %rdi
cmpq %r14, %rdi
je 0x2466c
movq 0xcf8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1748(%rsp), %rdi
callq 0x4bc5c
movq 0xd08(%rsp), %rdi
cmpq %r13, %rdi
je 0x24696
movq 0xd18(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2af8(%rsp), %rdi
callq 0x4adca
jmp 0x246a8
movq %rax, %rbx
movq 0x3c8(%rsp), %rdi
leaq 0x3d8(%rsp), %rax
cmpq %rax, %rdi
je 0x246cd
movq 0x3d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xd28(%rsp), %rdi
cmpq %r12, %rdi
je 0x246ea
movq 0xd38(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1d48(%rsp), %rdi
callq 0x4bcac
leaq 0x2a98(%rsp), %rdi
callq 0x4adca
movq 0x3e8(%rsp), %rdi
leaq 0x3f8(%rsp), %rax
cmpq %rax, %rdi
je 0x24729
movq 0x3f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x408(%rsp), %rdi
leaq 0x418(%rsp), %rax
cmpq %rax, %rdi
je 0x2474e
movq 0x418(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1270(%rsp), %rdi
callq 0x4bcac
movq 0x68(%rsp), %rdi
leaq 0x78(%rsp), %rax
cmpq %rax, %rdi
je 0x24777
movq 0x78(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2a38(%rsp), %rdi
callq 0x4adca
movq 0x428(%rsp), %rdi
leaq 0x438(%rsp), %rax
cmpq %rax, %rdi
je 0x247a9
movq 0x438(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x448(%rsp), %rdi
leaq 0x458(%rsp), %rax
cmpq %rax, %rdi
je 0x247ce
movq 0x458(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x12e8(%rsp), %rdi
callq 0x4bcac
movq 0x88(%rsp), %rdi
leaq 0x98(%rsp), %rax
cmpq %rax, %rdi
je 0x24800
movq 0x98(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x29d8(%rsp), %rdi
callq 0x4adca
movq 0x468(%rsp), %rdi
leaq 0x478(%rsp), %rax
cmpq %rax, %rdi
je 0x24832
movq 0x478(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x488(%rsp), %rdi
leaq 0x498(%rsp), %rax
cmpq %rax, %rdi
je 0x24857
movq 0x498(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1388(%rsp), %rdi
callq 0x4bcac
movq 0x4a8(%rsp), %rdi
leaq 0x4b8(%rsp), %rax
cmpq %rax, %rdi
je 0x24889
movq 0x4b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2978(%rsp), %rdi
callq 0x4adca
movq 0x4c8(%rsp), %rdi
leaq 0x4d8(%rsp), %rax
cmpq %rax, %rdi
je 0x248bb
movq 0x4d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x4e8(%rsp), %rdi
leaq 0x4f8(%rsp), %rax
cmpq %rax, %rdi
je 0x248e0
movq 0x4f8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x17a8(%rsp), %rdi
callq 0x4bcac
leaq 0x2918(%rsp), %rdi
callq 0x4adca
movq 0x508(%rsp), %rdi
leaq 0x518(%rsp), %rax
cmpq %rax, %rdi
je 0x2491f
movq 0x518(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x528(%rsp), %rdi
leaq 0x538(%rsp), %rax
cmpq %rax, %rdi
je 0x24944
movq 0x538(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1400(%rsp), %rdi
callq 0x4bcac
movq 0x548(%rsp), %rdi
leaq 0x558(%rsp), %rax
cmpq %rax, %rdi
je 0x24976
movq 0x558(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x28b8(%rsp), %rdi
callq 0x4adca
movq 0x568(%rsp), %rdi
leaq 0x578(%rsp), %rax
cmpq %rax, %rdi
je 0x249a8
movq 0x578(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x588(%rsp), %rdi
leaq 0x598(%rsp), %rax
cmpq %rax, %rdi
je 0x249cd
movq 0x598(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1478(%rsp), %rdi
callq 0x4bcac
movq 0x5a8(%rsp), %rdi
leaq 0x5b8(%rsp), %rax
cmpq %rax, %rdi
je 0x249ff
movq 0x5b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2858(%rsp), %rdi
callq 0x4adca
movq 0x5c8(%rsp), %rdi
leaq 0x5d8(%rsp), %rax
cmpq %rax, %rdi
je 0x24a31
movq 0x5d8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x5e8(%rsp), %rdi
leaq 0x5f8(%rsp), %rax
cmpq %rax, %rdi
je 0x24a56
movq 0x5f8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x14f0(%rsp), %rdi
callq 0x4bcac
movq 0x608(%rsp), %rdi
leaq 0x618(%rsp), %rax
cmpq %rax, %rdi
je 0x24a88
movq 0x618(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x27f8(%rsp), %rdi
callq 0x4adca
movq 0x628(%rsp), %rdi
leaq 0x638(%rsp), %rax
cmpq %rax, %rdi
je 0x24aba
movq 0x638(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x648(%rsp), %rdi
leaq 0x658(%rsp), %rax
cmpq %rax, %rdi
je 0x24adf
movq 0x658(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1820(%rsp), %rdi
callq 0x4bcac
leaq 0x2798(%rsp), %rdi
callq 0x4adca
movq 0x668(%rsp), %rdi
leaq 0x678(%rsp), %rax
cmpq %rax, %rdi
je 0x24b1e
movq 0x678(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x688(%rsp), %rdi
leaq 0x698(%rsp), %rax
cmpq %rax, %rdi
je 0x24b43
movq 0x698(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0xd48(%rsp), %rdi
callq 0x4bcac
movq 0xa8(%rsp), %rdi
leaq 0xb8(%rsp), %rax
cmpq %rax, %rdi
je 0x24b75
movq 0xb8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2738(%rsp), %rdi
callq 0x4adca
movq 0x6a8(%rsp), %rdi
leaq 0x6b8(%rsp), %rax
cmpq %rax, %rdi
je 0x24ba7
movq 0x6b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x6c8(%rsp), %rdi
leaq 0x6d8(%rsp), %rax
cmpq %rax, %rdi
je 0x24bcc
movq 0x6d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1898(%rsp), %rdi
callq 0x4bcac
leaq 0x26d8(%rsp), %rdi
callq 0x4adca
movq 0x6e8(%rsp), %rdi
leaq 0x6f8(%rsp), %rax
cmpq %rax, %rdi
je 0x24c0b
movq 0x6f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x708(%rsp), %rdi
leaq 0x718(%rsp), %rax
cmpq %rax, %rdi
je 0x24c30
movq 0x718(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0xdc0(%rsp), %rdi
callq 0x4bcac
movq 0xc8(%rsp), %rdi
leaq 0xd8(%rsp), %rax
cmpq %rax, %rdi
je 0x24c62
movq 0xd8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2678(%rsp), %rdi
callq 0x4adca
movq 0x728(%rsp), %rdi
leaq 0x738(%rsp), %rax
cmpq %rax, %rdi
je 0x24c94
movq 0x738(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x748(%rsp), %rdi
leaq 0x758(%rsp), %rax
cmpq %rax, %rdi
je 0x24cb9
movq 0x758(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0xe38(%rsp), %rdi
callq 0x4bcac
movq 0xe8(%rsp), %rdi
leaq 0xf8(%rsp), %rax
cmpq %rax, %rdi
je 0x24ceb
movq 0xf8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2618(%rsp), %rdi
callq 0x4adca
movq 0x768(%rsp), %rdi
leaq 0x778(%rsp), %rax
cmpq %rax, %rdi
je 0x24d1d
movq 0x778(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x788(%rsp), %rdi
leaq 0x798(%rsp), %rax
cmpq %rax, %rdi
je 0x24d42
movq 0x798(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0xeb0(%rsp), %rdi
callq 0x4bcac
movq 0x108(%rsp), %rdi
leaq 0x118(%rsp), %rax
cmpq %rax, %rdi
je 0x24d74
movq 0x118(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x25b8(%rsp), %rdi
callq 0x4adca
movq 0x7a8(%rsp), %rdi
leaq 0x7b8(%rsp), %rax
cmpq %rax, %rdi
je 0x24da6
movq 0x7b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x7c8(%rsp), %rdi
leaq 0x7d8(%rsp), %rax
cmpq %rax, %rdi
je 0x24dcb
movq 0x7d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1910(%rsp), %rdi
callq 0x4bcac
leaq 0x2558(%rsp), %rdi
callq 0x4adca
movq 0x7e8(%rsp), %rdi
leaq 0x7f8(%rsp), %rax
cmpq %rax, %rdi
je 0x24e0a
movq 0x7f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x808(%rsp), %rdi
leaq 0x818(%rsp), %rax
cmpq %rax, %rdi
je 0x24e2f
movq 0x818(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1988(%rsp), %rdi
callq 0x4bcac
leaq 0x24f8(%rsp), %rdi
callq 0x4adca
movq 0x828(%rsp), %rdi
leaq 0x838(%rsp), %rax
cmpq %rax, %rdi
je 0x24e6e
movq 0x838(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x848(%rsp), %rdi
leaq 0x858(%rsp), %rax
cmpq %rax, %rdi
je 0x24e93
movq 0x858(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1a00(%rsp), %rdi
callq 0x4bcac
leaq 0x2498(%rsp), %rdi
callq 0x4adca
movq 0x868(%rsp), %rdi
leaq 0x878(%rsp), %rax
cmpq %rax, %rdi
je 0x24ed2
movq 0x878(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x888(%rsp), %rdi
leaq 0x898(%rsp), %rax
cmpq %rax, %rdi
je 0x24ef7
movq 0x898(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x3b8(%rsp), %r13
leaq 0x398(%rsp), %rbp
leaq 0x1a78(%rsp), %rdi
callq 0x4bcac
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x8d8(%rsp), %r15
leaq 0x2438(%rsp), %rdi
callq 0x4adca
movq 0x8a8(%rsp), %rdi
leaq 0x8b8(%rsp), %rax
cmpq %rax, %rdi
je 0x24f58
movq 0x8b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x138(%rsp), %rax
movq 0x128(%rsp), %rdi
cmpq %rax, %rdi
je 0x24f7d
movq 0x138(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x8c8(%rsp), %rdi
cmpq %r15, %rdi
je 0x24f9a
movq 0x8d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x50(%rsp), %r15
leaq 0xf28(%rsp), %rdi
callq 0x4bcac
movq 0x148(%rsp), %rdi
leaq 0x158(%rsp), %rax
cmpq %rax, %rdi
je 0x24fd1
movq 0x158(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x23d8(%rsp), %rdi
callq 0x4adca
movq 0x8e8(%rsp), %rdi
leaq 0x8f8(%rsp), %rax
cmpq %rax, %rdi
je 0x25003
movq 0x8f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x908(%rsp), %rdi
leaq 0x918(%rsp), %rax
cmpq %rax, %rdi
je 0x25028
movq 0x918(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x928(%rsp), %rdi
leaq 0x938(%rsp), %rax
cmpq %rax, %rdi
je 0x2504d
movq 0x938(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1568(%rsp), %rdi
callq 0x4bcac
movq 0x948(%rsp), %rdi
leaq 0x958(%rsp), %rax
cmpq %rax, %rdi
je 0x2507f
movq 0x958(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2378(%rsp), %rdi
callq 0x4adca
movq 0x968(%rsp), %rdi
leaq 0x978(%rsp), %rax
cmpq %rax, %rdi
je 0x250b1
movq 0x978(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x988(%rsp), %rdi
leaq 0x998(%rsp), %rax
cmpq %rax, %rdi
je 0x250d6
movq 0x998(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x178(%rsp), %rax
movq 0x168(%rsp), %rdi
cmpq %rax, %rdi
je 0x250fb
movq 0x178(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1af0(%rsp), %rdi
callq 0x4bcac
leaq 0x2318(%rsp), %rdi
callq 0x4adca
movq 0x9a8(%rsp), %rdi
leaq 0x9b8(%rsp), %rax
cmpq %rax, %rdi
je 0x2513a
movq 0x9b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x9c8(%rsp), %rdi
leaq 0x9d8(%rsp), %rax
cmpq %rax, %rdi
je 0x2515f
movq 0x9d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x198(%rsp), %rax
movq 0x188(%rsp), %rdi
cmpq %rax, %rdi
je 0x25184
movq 0x198(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0xfa0(%rsp), %rdi
callq 0x4bcac
movq 0x1a8(%rsp), %rdi
leaq 0x1b8(%rsp), %rax
cmpq %rax, %rdi
je 0x251b6
movq 0x1b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x22b8(%rsp), %rdi
callq 0x4adca
movq 0x9e8(%rsp), %rdi
leaq 0x9f8(%rsp), %rax
cmpq %rax, %rdi
je 0x251e8
movq 0x9f8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xa08(%rsp), %rdi
leaq 0xa18(%rsp), %rax
cmpq %rax, %rdi
je 0x2520d
movq 0xa18(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xa28(%rsp), %rdi
leaq 0xa38(%rsp), %rax
cmpq %rax, %rdi
je 0x25232
movq 0xa38(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x15e0(%rsp), %rdi
callq 0x4bcac
movq 0xa48(%rsp), %rdi
leaq 0xa58(%rsp), %rax
cmpq %rax, %rdi
je 0x25264
movq 0xa58(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2258(%rsp), %rdi
callq 0x4adca
movq 0xa68(%rsp), %rdi
leaq 0xa78(%rsp), %rax
cmpq %rax, %rdi
je 0x25296
movq 0xa78(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1d8(%rsp), %rax
movq 0x1c8(%rsp), %rdi
cmpq %rax, %rdi
je 0x252bb
movq 0x1d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1f8(%rsp), %rax
movq 0x1e8(%rsp), %rdi
cmpq %rax, %rdi
je 0x252e0
movq 0x1f8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1018(%rsp), %rdi
callq 0x4bcac
movq 0x208(%rsp), %rdi
leaq 0x218(%rsp), %rax
cmpq %rax, %rdi
je 0x25312
movq 0x218(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x21f8(%rsp), %rdi
callq 0x4adca
movq 0xa88(%rsp), %rdi
leaq 0xa98(%rsp), %rax
cmpq %rax, %rdi
je 0x25344
movq 0xa98(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xaa8(%rsp), %rdi
leaq 0xab8(%rsp), %rax
cmpq %rax, %rdi
je 0x25369
movq 0xab8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xac8(%rsp), %rdi
leaq 0xad8(%rsp), %rax
cmpq %rax, %rdi
je 0x2538e
movq 0xad8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1090(%rsp), %rdi
callq 0x4bcac
movq 0x228(%rsp), %rdi
leaq 0x238(%rsp), %rax
cmpq %rax, %rdi
je 0x253c0
movq 0x238(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2198(%rsp), %rdi
callq 0x4adca
movq 0xae8(%rsp), %rdi
leaq 0xaf8(%rsp), %rax
cmpq %rax, %rdi
je 0x253f2
movq 0xaf8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x258(%rsp), %rax
movq 0x248(%rsp), %rdi
cmpq %rax, %rdi
je 0x25417
movq 0x258(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xb08(%rsp), %rdi
leaq 0xb18(%rsp), %rax
cmpq %rax, %rdi
je 0x2543c
movq 0xb18(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1108(%rsp), %rdi
callq 0x4bcac
movq 0x268(%rsp), %rdi
leaq 0x278(%rsp), %rax
cmpq %rax, %rdi
je 0x2546e
movq 0x278(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2138(%rsp), %rdi
callq 0x4adca
movq 0xb28(%rsp), %rdi
leaq 0xb38(%rsp), %rax
cmpq %rax, %rdi
je 0x254a0
movq 0xb38(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xb48(%rsp), %rdi
leaq 0xb58(%rsp), %rax
cmpq %rax, %rdi
je 0x254c5
movq 0xb58(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xb68(%rsp), %rdi
leaq 0xb78(%rsp), %rax
cmpq %rax, %rdi
je 0x254ea
movq 0xb78(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1180(%rsp), %rdi
callq 0x4bcac
leaq 0x20d8(%rsp), %rdi
callq 0x4adca
movq 0xb88(%rsp), %rdi
leaq 0xb98(%rsp), %rax
cmpq %rax, %rdi
je 0x25529
movq 0xb98(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x298(%rsp), %rax
movq 0x288(%rsp), %rdi
cmpq %rax, %rdi
je 0x2554e
movq 0x298(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xba8(%rsp), %rdi
leaq 0xbb8(%rsp), %rax
cmpq %rax, %rdi
je 0x25573
movq 0xbb8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1658(%rsp), %rdi
callq 0x4bcac
movq 0xbc8(%rsp), %rdi
leaq 0xbd8(%rsp), %rax
cmpq %rax, %rdi
je 0x255a5
movq 0xbd8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2078(%rsp), %rdi
callq 0x4adca
movq 0xbe8(%rsp), %rdi
leaq 0xbf8(%rsp), %rax
cmpq %rax, %rdi
je 0x255d7
movq 0xbf8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xc08(%rsp), %rdi
leaq 0xc18(%rsp), %rax
cmpq %rax, %rdi
je 0x255fc
movq 0xc18(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2b8(%rsp), %rax
movq 0x2a8(%rsp), %rdi
cmpq %rax, %rdi
je 0x25621
movq 0x2b8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x11f8(%rsp), %rdi
callq 0x4bcac
movq 0x2c8(%rsp), %rdi
leaq 0x2d8(%rsp), %rax
cmpq %rax, %rdi
je 0x25653
movq 0x2d8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2018(%rsp), %rdi
callq 0x4adca
movq 0xc28(%rsp), %rdi
leaq 0xc38(%rsp), %rax
cmpq %rax, %rdi
je 0x25685
movq 0xc38(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0xc48(%rsp), %rdi
leaq 0xc58(%rsp), %rax
cmpq %rax, %rdi
je 0x256aa
movq 0xc58(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x2f8(%rsp), %rax
movq 0x2e8(%rsp), %rdi
cmpq %rax, %rdi
je 0x256cf
movq 0x2f8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x16d0(%rsp), %rdi
callq 0x4bcac
movq 0xc68(%rsp), %rdi
leaq 0xc78(%rsp), %rax
cmpq %rax, %rdi
je 0x25701
movq 0xc78(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1fb8(%rsp), %rdi
callq 0x4adca
movq 0xc88(%rsp), %rdi
leaq 0xc98(%rsp), %rax
cmpq %rax, %rdi
je 0x25733
movq 0xc98(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x318(%rsp), %rax
movq 0x308(%rsp), %rdi
cmpq %rax, %rdi
je 0x25758
movq 0x318(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x338(%rsp), %rax
movq 0x328(%rsp), %rdi
cmpq %rax, %rdi
je 0x2577d
movq 0x338(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1b68(%rsp), %rdi
callq 0x4bcac
leaq 0x1f58(%rsp), %rdi
callq 0x4adca
movq 0xca8(%rsp), %rdi
leaq 0xcb8(%rsp), %rax
cmpq %rax, %rdi
je 0x257bc
movq 0xcb8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x358(%rsp), %rax
movq 0x348(%rsp), %rdi
cmpq %rax, %rdi
je 0x257e1
movq 0x358(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x378(%rsp), %rax
movq 0x368(%rsp), %rdi
cmpq %rax, %rdi
je 0x25806
movq 0x378(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1be0(%rsp), %rdi
callq 0x4bcac
leaq 0x1ef8(%rsp), %rdi
callq 0x4adca
movq 0xcc8(%rsp), %rdi
leaq 0xcd8(%rsp), %rax
cmpq %rax, %rdi
je 0x25845
movq 0xcd8(%rsp), %rsi
incq %rsi
callq 0x12570
movq (%rsp), %rdi
cmpq %r14, %rdi
je 0x2585b
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x20(%rsp), %rdi
cmpq %r12, %rdi
je 0x25872
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1c58(%rsp), %rdi
callq 0x4bcac
leaq 0x1e98(%rsp), %rdi
callq 0x4adca
movq 0x388(%rsp), %rdi
cmpq %rbp, %rdi
je 0x258a9
movq 0x398(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x3a8(%rsp), %rdi
cmpq %r13, %rdi
je 0x258c6
movq 0x3b8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x40(%rsp), %rdi
cmpq %r15, %rdi
je 0x258dd
movq 0x50(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x1cd0(%rsp), %rdi
callq 0x4bcac
leaq 0x1e38(%rsp), %rdi
callq 0x4adca
leaq 0x1dc0(%rsp), %rdi
callq 0x4bcac
leaq 0x1360(%rsp), %rdi
callq 0x4bd0e
movq %rbx, %rdi
callq 0x129d0
movq %rax, %rbx
jmp 0x24704
movq %rax, %rbx
jmp 0x24784
movq %rax, %rbx
jmp 0x2480d
movq %rax, %rbx
jmp 0x24896
movq %rax, %rbx
jmp 0x248fa
movq %rax, %rbx
jmp 0x24983
movq %rax, %rbx
jmp 0x24a0c
movq %rax, %rbx
jmp 0x24a95
movq %rax, %rbx
jmp 0x24af9
movq %rax, %rbx
jmp 0x24b82
movq %rax, %rbx
jmp 0x24be6
movq %rax, %rbx
jmp 0x24c6f
movq %rax, %rbx
jmp 0x24cf8
movq %rax, %rbx
jmp 0x24d81
movq %rax, %rbx
jmp 0x24de5
movq %rax, %rbx
jmp 0x24e49
movq %rax, %rbx
jmp 0x24ead
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x24f33
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x24fde
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2508c
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25115
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x251c3
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x25271
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2531f
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x253cd
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
leaq 0x50(%rsp), %r15
jmp 0x2547b
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x25504
movq %rax, %rbx
leaq 0x30(%rsp), %r12
leaq 0x10(%rsp), %r14
jmp 0x255b2
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25660
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x2570e
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25797
movq %rax, %rbx
leaq 0x10(%rsp), %r14
jmp 0x25820
movq %rax, %rbx
jmp 0x2588c
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Session::showHelp() const
|
void Session::showHelp() const {
Catch::cout()
<< "\nCatch2 v" << libraryVersion() << '\n'
<< m_cli << '\n'
<< "For more detailed usage please see the project docs\n\n" << std::flush;
}
|
pushq %rbp
pushq %r14
pushq %rbx
subq $0x10, %rsp
movq %rdi, %rbx
movq 0x1223d2(%rip), %r14 # 0x147f98
leaq 0xd2b73(%rip), %rsi # 0xf8740
movl $0x9, %edx
movq %r14, %rdi
callq 0x12630
callq 0x25d44
leaq 0x1241b2(%rip), %rsi # 0x149d98
movq %r14, %rdi
callq 0x25c51
movb $0xa, %bpl
leaq 0xe(%rsp), %rsi
movb %bpl, (%rsi)
movl $0x1, %edx
movq %r14, %rdi
callq 0x12630
movq %rax, %r14
movq %rbx, %rdi
movq %rax, %rsi
callq 0x2bf3e
leaq 0xf(%rsp), %rsi
movb %bpl, (%rsi)
movl $0x1, %edx
movq %r14, %rdi
callq 0x12630
movq %rax, %rbx
leaq 0xd2b17(%rip), %rsi # 0xf874a
movl $0x35, %edx
movq %rax, %rdi
callq 0x12630
movq %rbx, %rdi
callq 0x12390
addq $0x10, %rsp
popq %rbx
popq %r14
popq %rbp
retq
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Session::applyCommandLine(int, char const* const*)
|
int Session::applyCommandLine( int argc, char const * const * argv ) {
if( m_startupExceptions )
return 1;
auto result = m_cli.parse( Clara::Args( argc, argv ) );
if( !result ) {
config();
getCurrentMutableContext().setConfig(m_config.get());
auto errStream = makeStream( "%stderr" );
auto colour = makeColourImpl( ColourMode::PlatformDefault, errStream.get() );
errStream->stream()
<< colour->guardColour( Colour::Red )
<< "\nError(s) in input:\n"
<< TextFlow::Column( result.errorMessage() ).indent( 2 )
<< "\n\n";
errStream->stream() << "Run with -? for usage\n\n" << std::flush;
return MaxExitCode;
}
if( m_configData.showHelp )
showHelp();
if( m_configData.libIdentify )
libIdentify();
m_config.reset();
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0xc0, %rsp
movl $0x1, %ebp
cmpb $0x0, 0x178(%rdi)
jne 0x26152
movq %rdi, %rbx
leaq 0x28(%rsp), %r14
movq %r14, %rdi
callq 0x2cf78
leaq 0x60(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
callq 0x2624e
leaq 0x48(%rsp), %r14
movq %r14, %rdi
callq 0x4c0f8
movq -0x20(%r14), %rdi
leaq 0x38(%rsp), %r12
cmpq %r12, %rdi
je 0x25f1d
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
cmpl $0x0, 0x68(%rsp)
je 0x260ce
movq %rbx, %rdi
callq 0x1e62a
cmpq $0x0, 0x123e88(%rip) # 0x149dc0
jne 0x25f51
movl $0x10, %edi
callq 0x12540
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movq %rax, 0x123e6f(%rip) # 0x149dc0
movq 0x123e68(%rip), %rax # 0x149dc0
movq 0x170(%rbx), %rcx
movq %rcx, (%rax)
leaq 0x28(%rsp), %rdi
movq %r12, (%rdi)
leaq 0xd27a7(%rip), %rsi # 0xf8718
leaq 0xd27a7(%rip), %rdx # 0xf871f
callq 0x53aee
leaq 0x10(%rsp), %rdi
leaq 0x28(%rsp), %rsi
callq 0x1e6ec
movq 0x28(%rsp), %rdi
cmpq %r12, %rdi
je 0x25fa3
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x10(%rsp), %rbx
leaq 0x8(%rsp), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq 0x1ea59
testq %rbx, %rbx
je 0x26164
movq (%rbx), %rax
movq %rbx, %rdi
callq *0x10(%rax)
movq 0x8(%rsp), %r14
testq %r14, %r14
je 0x2617b
movq %rax, %r15
leaq 0x18(%rsp), %rdi
movq %r14, (%rdi)
movl $0x2, 0x8(%rdi)
movb $0x0, 0xc(%rdi)
movq %rax, %rsi
callq 0x2d292
leaq 0xd27e4(%rip), %rsi # 0xf87e0
movl $0x14, %edx
movq %r15, %rdi
callq 0x12630
leaq 0x28(%rsp), %rdi
movq %r12, (%rdi)
movq 0xa0(%rsp), %rsi
movq 0xa8(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
leaq 0x28(%rsp), %rsi
movq $0x4f, 0x20(%rsi)
movq $-0x1, 0x30(%rsi)
movq $0x2, 0x28(%rsi)
movq %r15, %rdi
callq 0x1eb78
leaq 0xd4069(%rip), %rsi # 0xfa0be
movl $0x2, %edx
movq %r15, %rdi
callq 0x12630
movq 0x28(%rsp), %rdi
cmpq %r12, %rdi
je 0x26079
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
cmpb $0x1, 0x24(%rsp)
jne 0x2608d
movq 0x18(%rsp), %rdi
movq (%rdi), %rax
xorl %esi, %esi
callq *0x10(%rax)
movq (%rbx), %rax
movq %rbx, %rdi
callq *0x10(%rax)
movq %rax, %r15
leaq 0xd2755(%rip), %rsi # 0xf87f5
movl $0x17, %edx
movq %rax, %rdi
callq 0x12630
movq %r15, %rdi
callq 0x12390
movq (%r14), %rax
movq %r14, %rdi
callq *0x8(%rax)
movq (%rbx), %rax
movq %rbx, %rdi
callq *0x8(%rax)
movl $0xff, %ebp
jmp 0x26106
cmpb $0x1, 0x67(%rbx)
jne 0x260dc
movq %rbx, %rdi
callq 0x25bb4
cmpb $0x1, 0x6a(%rbx)
jne 0x260e7
callq 0x25d62
movq 0x170(%rbx), %rdi
testq %rdi, %rdi
je 0x260f9
movq (%rdi), %rax
callq *0x8(%rax)
movq $0x0, 0x170(%rbx)
xorl %ebp, %ebp
leaq 0x120dbb(%rip), %rax # 0x146ec8
leaq 0xb0(%rsp), %rcx
movq %rax, -0x50(%rcx)
movq -0x10(%rcx), %rdi
cmpq %rcx, %rdi
je 0x26132
movq 0xb0(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x120de7(%rip), %rax # 0x146f20
movq %rax, 0x60(%rsp)
cmpl $0x0, 0x68(%rsp)
jne 0x26152
leaq 0x88(%rsp), %rdi
callq 0x4b2a6
movl %ebp, %eax
addq $0xc0, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
leaq 0xd4dd4(%rip), %rdi # 0xfaf3f
leaq 0xd4ac5(%rip), %rsi # 0xfac37
leaq 0xd5cf3(%rip), %rcx # 0xfbe6c
jmp 0x26190
leaq 0xd4dbd(%rip), %rdi # 0xfaf3f
leaq 0xd4aae(%rip), %rsi # 0xfac37
leaq 0xd5c86(%rip), %rcx # 0xfbe16
movl $0x385, %edx # imm = 0x385
callq 0x12340
jmp 0x26211
jmp 0x261f8
movq %rax, %rbx
jmp 0x2622c
movq %rax, %rbx
movq 0x28(%rsp), %rdi
cmpq %r12, %rdi
je 0x2623c
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x2623c
jmp 0x261d7
movq %rax, %rbx
leaq 0x28(%rsp), %rdi
callq 0x4ae14
jmp 0x26246
movq %rax, %rbx
jmp 0x2623c
movq %rax, %rbx
movq 0x28(%rsp), %rdi
cmpq %r12, %rdi
je 0x261fb
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x261fb
movq %rax, %rbx
cmpb $0x1, 0x24(%rsp)
jne 0x2621c
movq 0x18(%rsp), %rdi
movq (%rdi), %rax
xorl %esi, %esi
callq *0x10(%rax)
jmp 0x2621c
movq %rax, %rdi
callq 0x1a51a
movq %rax, %rbx
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0x2622c
movq (%rdi), %rax
callq *0x8(%rax)
movq 0x10(%rsp), %rdi
testq %rdi, %rdi
je 0x2623c
movq (%rdi), %rax
callq *0x8(%rax)
leaq 0x60(%rsp), %rdi
callq 0x4ae3c
movq %rbx, %rdi
callq 0x129d0
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Session::useConfigData(Catch::ConfigData const&)
|
void Session::useConfigData( ConfigData const& configData ) {
m_configData = configData;
m_config.reset();
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %rbx
addq $0x60, %rdi
movl $0xc, %ecx
rep movsq (%rsi), %es:(%rdi)
movl 0x60(%r14), %eax
movl %eax, 0xc0(%rbx)
movl $0xc8, %r15d
leaq (%rbx,%r15), %rdi
leaq 0x68(%r14), %rsi
callq 0x12290
leaq 0xe8(%rbx), %rdi
leaq 0x88(%r14), %rsi
callq 0x12290
leaq 0x108(%rbx), %rdi
leaq 0xa8(%r14), %rsi
callq 0x12290
leaq 0x128(%rbx), %rdi
addq %r14, %r15
movq %r15, %rsi
callq 0x4fcbc
leaq 0x140(%rbx), %rdi
leaq 0xe0(%r14), %rsi
callq 0x4fd92
leaq 0x158(%rbx), %rdi
addq $0xf8, %r14
movq %r14, %rsi
callq 0x4fd92
movq 0x170(%rbx), %rdi
testq %rdi, %rdi
je 0x2635d
movq (%rdi), %rax
callq *0x8(%rax)
movq $0x0, 0x170(%rbx)
popq %rbx
popq %r14
popq %r15
retq
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Session::runInternal()
|
int Session::runInternal() {
if( m_startupExceptions )
return 1;
if (m_configData.showHelp || m_configData.libIdentify) {
return 0;
}
if ( m_configData.shardIndex >= m_configData.shardCount ) {
Catch::cerr() << "The shard count (" << m_configData.shardCount
<< ") must be greater than the shard index ("
<< m_configData.shardIndex << ")\n"
<< std::flush;
return 1;
}
CATCH_TRY {
config(); // Force config to be constructed
seedRng( *m_config );
if (m_configData.filenamesAsTags) {
applyFilenamesAsTags();
}
// Set up global config instance before we start calling into other functions
getCurrentMutableContext().setConfig(m_config.get());
// Create reporter(s) so we can route listings through them
auto reporter = prepareReporters(m_config.get());
auto const& invalidSpecs = m_config->testSpec().getInvalidSpecs();
if ( !invalidSpecs.empty() ) {
for ( auto const& spec : invalidSpecs ) {
reporter->reportInvalidTestSpec( spec );
}
return 1;
}
// Handle list request
if (list(*reporter, *m_config)) {
return 0;
}
TestGroup tests { CATCH_MOVE(reporter), m_config.get() };
auto const totals = tests.execute();
if ( tests.hadUnmatchedTestSpecs()
&& m_config->warnAboutUnmatchedTestSpecs() ) {
return 3;
}
if ( totals.testCases.total() == 0
&& !m_config->zeroTestsCountAsSuccess() ) {
return 2;
}
if ( totals.testCases.total() > 0 &&
totals.testCases.total() == totals.testCases.skipped
&& !m_config->zeroTestsCountAsSuccess() ) {
return 4;
}
// Note that on unices only the lower 8 bits are usually used, clamping
// the return value to 255 prevents false negative when some multiple
// of 256 tests has failed
return (std::min) (MaxExitCode, static_cast<int>(totals.assertions.failed));
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x298, %rsp # imm = 0x298
movl $0x1, %ebp
cmpb $0x0, 0x178(%rdi)
jne 0x26a18
movq %rdi, %r12
xorl %ebp, %ebp
cmpb $0x0, 0x67(%rdi)
jne 0x26a18
cmpb $0x0, 0x6a(%r12)
jne 0x26a18
movl 0x78(%r12), %eax
cmpl 0x74(%r12), %eax
jae 0x264e9
movq %r12, %rdi
callq 0x1e62a
movq 0x170(%r12), %r14
testq %r14, %r14
je 0x26afe
callq 0x29eb9
movq (%r14), %rax
movq %r14, %rdi
callq *0x88(%rax)
movl %eax, %eax
movabsq $0x5851f42d4c957f2d, %rcx # imm = 0x5851F42D4C957F2D
imulq %rax, %rcx
movabsq $0x6f1903917447ed96, %rax # imm = 0x6F1903917447ED96
addq %rcx, %rax
movq %rax, 0x123920(%rip) # 0x149dc8
cmpb $0x1, 0x69(%r12)
jne 0x2655e
callq 0x461bc
movq (%rax), %rcx
movq %rax, %rdi
callq *0x18(%rcx)
movq (%rax), %rcx
movq %rax, %rdi
callq *0x10(%rcx)
movq (%rax), %rbx
movq 0x8(%rax), %r14
cmpq %r14, %rbx
je 0x2655e
movq (%rbx), %rdi
callq 0x282a8
addq $0x8, %rbx
jmp 0x264d2
movq 0x121af0(%rip), %r14 # 0x147fe0
leaq 0xd237d(%rip), %rsi # 0xf8874
movl $0x11, %edx
movq %r14, %rdi
callq 0x12630
movl 0x74(%r12), %esi
movq %r14, %rdi
callq 0x12380
movq %rax, %r14
leaq 0xd236b(%rip), %rsi # 0xf8886
movl $0x28, %edx
movq %rax, %rdi
callq 0x12630
movl 0x78(%r12), %esi
movq %r14, %rdi
callq 0x12380
movq %rax, %rbx
leaq 0xd2370(%rip), %rsi # 0xf88af
movl $0x2, %edx
movq %rax, %rdi
callq 0x12630
movq %rbx, %rdi
callq 0x12390
movl $0x1, %ebp
jmp 0x26a18
cmpq $0x0, 0x12385a(%rip) # 0x149dc0
jne 0x2657f
movl $0x10, %edi
callq 0x12540
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movq %rax, 0x123841(%rip) # 0x149dc0
movq 0x12383a(%rip), %rax # 0x149dc0
movq 0x170(%r12), %r14
movq %r14, (%rax)
callq 0x461bc
movq (%rax), %rcx
movq %rax, %rdi
callq *0x10(%rcx)
movq (%rax), %rax
testq %rax, %rax
je 0x26ae7
movq (%rax), %rcx
cmpq 0x8(%rax), %rcx
jne 0x26669
movq 0x118(%r14), %r15
movq 0x120(%r14), %rax
subq %r15, %rax
cmpq $0x78, %rax
jne 0x26669
leaq 0x20(%r15), %rsi
movq %rsp, %rdi
callq 0x1e6ec
movb 0x40(%r15), %bl
leaq 0x48(%r15), %rsi
leaq 0x20(%rsp), %rdi
callq 0x4f852
movq (%rsp), %rcx
leaq 0x90(%rsp), %rax
movq %rcx, -0x20(%rax)
movq $0x0, (%rsp)
movq %r14, -0x18(%rax)
movb %bl, -0x10(%rax)
movq 0x30(%rsp), %rcx
testq %rcx, %rcx
je 0x26a2c
leaq 0x28(%rsp), %rdx
movl (%rdx), %esi
movl %esi, 0x90(%rsp)
movq %rcx, 0x98(%rsp)
movups 0x10(%rdx), %xmm0
movups %xmm0, 0xa0(%rsp)
movq %rax, 0x8(%rcx)
leaq 0x48(%rsp), %rcx
movq (%rcx), %rax
movq %rax, 0xb0(%rsp)
movq $0x0, -0x18(%rcx)
movq %rdx, -0x10(%rcx)
movq %rdx, -0x8(%rcx)
jmp 0x26a4b
movl $0x40, %edi
callq 0x12540
movw $0x0, 0x8(%rax)
movq %r14, 0x10(%rax)
leaq 0x11f6fc(%rip), %rcx # 0x145d80
movq %rcx, (%rax)
movq $0x0, 0x38(%rax)
xorps %xmm0, %xmm0
movups %xmm0, 0x18(%rax)
movups %xmm0, 0x21(%rax)
movq %rax, (%rsp)
callq 0x461bc
movq (%rax), %rcx
movq %rax, %rdi
callq *0x10(%rcx)
movq (%rax), %rax
testq %rax, %rax
je 0x26ae7
movq %r12, 0x60(%rsp)
movq (%rax), %rbx
movq 0x8(%rax), %r13
cmpq %r13, %rbx
je 0x26717
leaq 0x70(%rsp), %r15
movq (%rsp), %r12
testq %r12, %r12
je 0x26a92
movq (%rbx), %rsi
testq %rsi, %rsi
je 0x26ac8
movq (%rsi), %rax
movq %r15, %rdi
movq %r14, %rdx
callq *0x10(%rax)
movq %r12, %rdi
movq %r15, %rsi
callq 0x402ea
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x2670e
movq (%rdi), %rax
callq *0x8(%rax)
addq $0x8, %rbx
cmpq %r13, %rbx
jne 0x266ce
movq 0x118(%r14), %r12
movq 0x120(%r14), %rax
movq %rax, 0x68(%rsp)
cmpq %rax, %r12
je 0x26895
addq $0x48, %r12
leaq 0x20(%rsp), %rbp
leaq 0x18(%rsp), %r15
movq (%rsp), %r13
testq %r13, %r13
je 0x26a92
leaq -0x28(%r12), %rsi
leaq 0x10(%rsp), %rdi
callq 0x1e6ec
movb -0x8(%r12), %bl
movq %rbp, %rdi
movq %r12, %rsi
callq 0x4f852
movq 0x10(%rsp), %rax
movq %rax, 0x70(%rsp)
movq $0x0, 0x10(%rsp)
movq %r14, 0x78(%rsp)
movb %bl, 0x80(%rsp)
movq 0x30(%rsp), %rax
testq %rax, %rax
je 0x267ee
movl 0x28(%rsp), %ecx
movl %ecx, 0x90(%rsp)
movq %rax, 0x98(%rsp)
movups 0x38(%rsp), %xmm0
movups %xmm0, 0xa0(%rsp)
leaq 0x90(%rsp), %rcx
movq %rcx, 0x8(%rax)
movq 0x48(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq $0x0, 0x30(%rsp)
leaq 0x28(%rsp), %rax
movq %rax, 0x38(%rsp)
movq %rax, 0x40(%rsp)
leaq 0x48(%rsp), %rax
jmp 0x26825
movl $0x0, 0x90(%rsp)
movq $0x0, 0x98(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0xa0(%rsp)
movq %rax, 0xa8(%rsp)
leaq 0xb0(%rsp), %rax
leaq -0x48(%r12), %rsi
movq $0x0, (%rax)
movq %r15, %rdi
leaq 0x70(%rsp), %rdx
callq 0x4550b
movq %r13, %rdi
movq %r15, %rsi
callq 0x40340
movq 0x18(%rsp), %rdi
testq %rdi, %rdi
je 0x26859
movq (%rdi), %rax
callq *0x8(%rax)
leaq 0x88(%rsp), %rdi
callq 0x4f6f6
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x26876
movq (%rdi), %rax
callq *0x8(%rax)
movq %rbp, %rdi
callq 0x4f6f6
leaq 0x78(%r12), %rax
addq $0x30, %r12
cmpq 0x68(%rsp), %r12
movq %rax, %r12
jne 0x26741
movq %rsp, %rdi
movq (%rdi), %rax
movq $0x0, (%rdi)
movq %rax, 0x8(%rsp)
callq 0x511c6
movq 0x60(%rsp), %r12
movq 0x170(%r12), %rdi
testq %rdi, %rdi
je 0x26b15
movq (%rdi), %rax
callq *0x68(%rax)
movq 0x18(%rax), %rbx
movq 0x20(%rax), %r14
cmpq %r14, %rbx
je 0x26903
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0x26aa9
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq (%rdi), %rax
callq *0x18(%rax)
addq $0x20, %rbx
cmpq %r14, %rbx
jne 0x268d5
movl $0x1, %ebp
jmp 0x26a08
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0x26b2c
movq 0x170(%r12), %rsi
testq %rsi, %rsi
je 0x26afe
callq 0x26d67
xorl %ebp, %ebp
testb %al, %al
jne 0x26a08
movq 0x170(%r12), %rdx
leaq 0x70(%rsp), %rdi
leaq 0x8(%rsp), %rsi
callq 0x2735c
leaq 0x20(%rsp), %rdi
leaq 0x70(%rsp), %rsi
callq 0x27654
cmpb $0x1, 0x290(%rsp)
jne 0x26981
movq 0x170(%r12), %rdi
testq %rdi, %rdi
je 0x26b15
movq (%rdi), %rax
callq *0x38(%rax)
movl $0x3, %ebp
testb %al, %al
jne 0x269fe
movq 0x48(%rsp), %rax
addq 0x40(%rsp), %rax
addq 0x50(%rsp), %rax
addq 0x58(%rsp), %rax
jne 0x269b7
movq 0x170(%r12), %rdi
testq %rdi, %rdi
je 0x26b15
movq (%rdi), %rax
callq *0x40(%rax)
movl $0x2, %ebp
testb %al, %al
je 0x269fe
movq 0x48(%rsp), %rax
addq 0x40(%rsp), %rax
addq 0x50(%rsp), %rax
jne 0x269f0
cmpq $0x0, 0x58(%rsp)
je 0x269f0
movq 0x170(%r12), %rdi
testq %rdi, %rdi
je 0x26b15
movq (%rdi), %rax
callq *0x40(%rax)
movl $0x4, %ebp
testb %al, %al
je 0x269fe
movl 0x28(%rsp), %eax
movl $0xff, %ebp
cmpl %ebp, %eax
cmovll %eax, %ebp
leaq 0x70(%rsp), %rdi
callq 0x27750
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0x26a18
movq (%rdi), %rax
callq *0x8(%rax)
movl %ebp, %eax
addq $0x298, %rsp # imm = 0x298
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0xb0(%rsp), %rcx
movl $0x0, -0x20(%rcx)
movq $0x0, -0x18(%rcx)
movq %rax, -0x10(%rcx)
movq %rax, -0x8(%rcx)
movq $0x0, (%rcx)
leaq 0x8(%rsp), %rdi
leaq 0x70(%rsp), %rdx
movq %r15, %rsi
callq 0x4550b
leaq 0x88(%rsp), %r14
movq %r14, %rdi
callq 0x4f6f6
movq -0x18(%r14), %rdi
testq %rdi, %rdi
je 0x26a83
movq (%rdi), %rax
callq *0x8(%rax)
leaq 0x20(%rsp), %rdi
callq 0x4f6f6
jmp 0x268b1
leaq 0xd44a6(%rip), %rdi # 0xfaf3f
leaq 0xd4197(%rip), %rsi # 0xfac37
leaq 0xd449e(%rip), %rcx # 0xfaf45
jmp 0x26abe
leaq 0xd448f(%rip), %rdi # 0xfaf3f
leaq 0xd4180(%rip), %rsi # 0xfac37
leaq 0xd5499(%rip), %rcx # 0xfbf57
movl $0x385, %edx # imm = 0x385
callq 0x12340
leaq 0xd4470(%rip), %rdi # 0xfaf3f
leaq 0xd4161(%rip), %rsi # 0xfac37
leaq 0xd44c4(%rip), %rcx # 0xfafa1
movl $0x389, %edx # imm = 0x389
callq 0x12340
leaq 0xd4451(%rip), %rdi # 0xfaf3f
leaq 0xd4142(%rip), %rsi # 0xfac37
leaq 0xd5f6b(%rip), %rcx # 0xfca67
jmp 0x26add
leaq 0xd443a(%rip), %rdi # 0xfaf3f
leaq 0xd412b(%rip), %rsi # 0xfac37
leaq 0xd53a9(%rip), %rcx # 0xfbebc
jmp 0x26b41
leaq 0xd4423(%rip), %rdi # 0xfaf3f
leaq 0xd4114(%rip), %rsi # 0xfac37
leaq 0xd53df(%rip), %rcx # 0xfbf09
jmp 0x26abe
leaq 0xd440c(%rip), %rdi # 0xfaf3f
leaq 0xd40fd(%rip), %rsi # 0xfac37
leaq 0xd5474(%rip), %rcx # 0xfbfb5
movl $0x37d, %edx # imm = 0x37D
callq 0x12340
jmp 0x26c9e
movq %rdx, %r14
movq %rax, %rbx
leaq 0x88(%rsp), %r15
movq %r15, %rdi
callq 0x4f6f6
movq -0x18(%r15), %rdi
testq %rdi, %rdi
je 0x26b75
movq (%rdi), %rax
callq *0x8(%rax)
leaq 0x20(%rsp), %rdi
callq 0x4f6f6
jmp 0x26b87
movq %rdx, %r14
movq %rax, %rbx
movq (%rsp), %rdi
jmp 0x26ca9
movq %rdx, %r14
movq %rax, %rbx
leaq 0x70(%rsp), %rdi
callq 0x27750
jmp 0x26ca4
jmp 0x26c9e
jmp 0x26c0e
jmp 0x26bb0
jmp 0x26bb0
movq %rdx, %r14
movq %rax, %rbx
jmp 0x26c34
jmp 0x26c0e
movq %rdx, %r14
movq %rax, %rbx
jmp 0x26c07
movq %rdx, %r14
movq %rax, %rbx
jmp 0x26be0
movq %rdx, %r14
movq %rax, %rbx
movq 0x18(%rsp), %rdi
testq %rdi, %rdi
je 0x26be0
movq (%rdi), %rax
callq *0x8(%rax)
leaq 0x88(%rsp), %rdi
callq 0x4f6f6
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x26bfd
movq (%rdi), %rax
callq *0x8(%rax)
leaq 0x20(%rsp), %rdi
callq 0x4f6f6
movq 0x10(%rsp), %rdi
jmp 0x26c21
movq %rdx, %r14
movq %rax, %rbx
jmp 0x26c2c
movq %rdx, %r14
movq %rax, %rbx
movq 0x70(%rsp), %rdi
testq %rdi, %rdi
je 0x26c2c
movq (%rdi), %rax
callq *0x8(%rax)
movq %rsp, %rdi
callq 0x511c6
cmpl $0x1, %r14d
jne 0x26c8e
movq %rbx, %rdi
callq 0x121e0
movq (%rax), %rcx
movq %rax, %rdi
callq *0x10(%rcx)
movq 0x12138e(%rip), %rdi # 0x147fe0
movq %rax, %rsi
callq 0x12530
leaq 0x18(%rsp), %rsi
movb $0xa, (%rsi)
movl $0x1, %edx
movq %rax, %rdi
callq 0x12630
movq %rax, %rdi
callq 0x12390
callq 0x128f0
movl $0xff, %ebp
jmp 0x26a18
movq %rax, %rbx
callq 0x128f0
movq %rbx, %rdi
callq 0x129d0
movq %rax, %rdi
callq 0x1a51a
movq %rdx, %r14
movq %rax, %rbx
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0x26c34
movq (%rdi), %rax
callq *0x8(%rax)
jmp 0x26c34
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::(anonymous namespace)::TestGroup::TestGroup(Catch::Detail::unique_ptr<Catch::IEventListener>&&, Catch::Config const*)
|
explicit TestGroup(IEventListenerPtr&& reporter, Config const* config):
m_reporter(reporter.get()),
m_config{config},
m_context{config, CATCH_MOVE(reporter)} {
assert( m_config->testSpec().getInvalidSpecs().empty() &&
"Invalid test specs should be handled before running tests" );
auto const& allTestCases = getAllTestCasesSorted(*m_config);
auto const& testSpec = m_config->testSpec();
if ( !testSpec.hasFilters() ) {
for ( auto const& test : allTestCases ) {
if ( !test.getTestCaseInfo().isHidden() ) {
m_tests.emplace( &test );
}
}
} else {
m_matches =
testSpec.matchesByFilter( allTestCases, *m_config );
for ( auto const& match : m_matches ) {
m_tests.insert( match.tests.begin(),
match.tests.end() );
}
}
m_tests = createShard(m_tests, m_config->shardCount(), m_config->shardIndex());
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rsi, %rax
movq %rdi, %rbx
movq (%rsi), %rcx
movq %rcx, (%rdi)
movq %rdx, 0x8(%rdi)
addq $0x10, %rdi
movq %rdi, 0x10(%rsp)
movq %rdx, %rsi
movq %rax, %rdx
callq 0x2fbba
leaq 0x1d8(%rbx), %r15
leaq 0x1e0(%rbx), %r12
movl $0x0, 0x1e0(%rbx)
movq $0x0, 0x1e8(%rbx)
movq %r12, 0x1f0(%rbx)
movq %r12, 0x1f8(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x200(%rbx)
movups %xmm0, 0x210(%rbx)
movb $0x0, 0x220(%rbx)
movq %rbx, 0x8(%rsp)
movq 0x8(%rbx), %rdi
movq (%rdi), %rax
callq *0x68(%rax)
movq 0x18(%rax), %rcx
cmpq 0x20(%rax), %rcx
jne 0x275fa
movq 0x8(%rsp), %rax
movq 0x8(%rax), %r13
callq 0x461bc
movq (%rax), %rcx
movq %rax, %rdi
callq *0x18(%rcx)
movq (%rax), %rcx
movq %rax, %rdi
movq %r13, %rsi
callq *0x20(%rcx)
movq %rax, %r13
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
callq *0x68(%rax)
movq (%rax), %rcx
cmpq 0x8(%rax), %rcx
je 0x27514
movq 0x8(%rsp), %rbx
movq 0x8(%rbx), %rcx
leaq 0x50(%rsp), %rdi
movq %rax, %rsi
movq %r13, %rdx
callq 0x28cf6
xorps %xmm0, %xmm0
leaq 0x20(%rsp), %rdi
movaps %xmm0, (%rdi)
movq $0x0, 0x10(%rdi)
movq 0x208(%rbx), %rax
movups 0x210(%rbx), %xmm0
leaq 0x50(%rsp), %r13
movq (%r13), %rcx
movq %rcx, 0x208(%rbx)
movq 0x8(%r13), %rcx
movq %rcx, 0x210(%rbx)
movq 0x10(%r13), %rcx
movq %rcx, 0x218(%rbx)
movq (%rdi), %rcx
movq 0x8(%rdi), %rdx
movq 0x10(%rdi), %rsi
movq %rax, (%rdi)
movups %xmm0, 0x8(%rdi)
movq %rcx, (%r13)
movq %rdx, 0x8(%r13)
movq %rsi, 0x10(%r13)
callq 0x4b0e8
movq %r13, %rdi
callq 0x4b0e8
movq 0x208(%rbx), %r14
movq 0x210(%rbx), %rax
movq %rax, 0x18(%rsp)
cmpq %rax, %r14
je 0x27548
leaq 0x20(%rsp), %r13
movq 0x20(%r14), %rbp
movq 0x28(%r14), %rbx
movq %r15, 0x20(%rsp)
cmpq %rbx, %rbp
je 0x27507
movq %r15, %rdi
movq %r12, %rsi
movq %rbp, %rdx
movq %r13, %rcx
callq 0x5141e
addq $0x8, %rbp
jmp 0x274eb
addq $0x38, %r14
cmpq 0x18(%rsp), %r14
jne 0x274de
jmp 0x27548
movq (%r13), %rbx
movq 0x8(%r13), %r14
cmpq %r14, %rbx
je 0x27548
leaq 0x20(%rsp), %r13
movq (%rbx), %rax
testb $0x2, 0x78(%rax)
jne 0x2753f
movq %rbx, 0x20(%rsp)
movq %r15, %rdi
movq %r13, %rsi
callq 0x512da
addq $0x10, %rbx
cmpq %r14, %rbx
jne 0x27526
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
callq *0x90(%rax)
movl %eax, %ebp
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rdi
movq (%rdi), %rax
callq *0x98(%rax)
movl %ebp, %edx
movl %eax, %ecx
leaq 0x20(%rsp), %rdi
movq %r15, %rsi
callq 0x511e8
movq %r15, %rdi
callq 0x5180e
movq 0x30(%rsp), %rax
testq %rax, %rax
je 0x275e1
leaq 0x28(%rsp), %rcx
movl (%rcx), %edx
movq 0x8(%rsp), %rsi
movl %edx, 0x1e0(%rsi)
movq %rax, 0x1e8(%rsi)
movq 0x10(%rcx), %rdx
movq %rdx, 0x1f0(%rsi)
movq 0x18(%rcx), %rdx
movq %rdx, 0x1f8(%rsi)
movq %r12, 0x8(%rax)
movq 0x20(%rcx), %rax
movq %rax, 0x200(%rsi)
xorl %eax, %eax
movq %rax, 0x8(%rcx)
movq %rcx, 0x10(%rcx)
movq %rcx, 0x18(%rcx)
movq %rax, 0x20(%rcx)
leaq 0x20(%rsp), %rdi
callq 0x517fa
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0xd3a16(%rip), %rdi # 0xfb017
leaq 0xd0e0a(%rip), %rsi # 0xf8412
leaq 0xd3a76(%rip), %rcx # 0xfb085
movl $0x434, %edx # imm = 0x434
callq 0x12340
jmp 0x27625
jmp 0x27625
jmp 0x27625
jmp 0x27625
jmp 0x27625
jmp 0x27625
movq %rax, %r12
movq 0x8(%rsp), %rdi
addq $0x208, %rdi # imm = 0x208
callq 0x4b0e8
movq %r15, %rdi
callq 0x517fa
movq 0x10(%rsp), %rdi
callq 0x2fdee
movq %r12, %rdi
callq 0x129d0
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::toLower(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
std::string toLower( std::string const& s ) {
std::string lc = s;
toLowerInPlace( lc );
return lc;
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdi, %rbx
leaq 0x10(%rdi), %rax
movq %rax, (%rdi)
movq (%rsi), %rax
movq 0x8(%rsi), %rdx
addq %rax, %rdx
movq %rax, %rsi
callq 0x53b7e
movq 0x8(%rbx), %r14
testq %r14, %r14
je 0x286ae
movq (%rbx), %r15
xorl %r12d, %r12d
movzbl (%r15,%r12), %edi
callq 0x127d0
movb %al, (%r15,%r12)
incq %r12
cmpq %r12, %r14
jne 0x28698
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::TestSpec::matches(Catch::TestCaseInfo const&) const
|
bool TestSpec::matches( TestCaseInfo const& testCase ) const {
return std::any_of( m_filters.begin(), m_filters.end(), [&]( Filter const& f ){ return f.matches( testCase ); } );
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %r14
movq (%rdi), %rbx
movq 0x8(%rdi), %r12
movq %r12, %rbp
subq %rbx, %rbp
sarq $0x4, %rbp
movabsq $-0x5555555555555555, %r13 # imm = 0xAAAAAAAAAAAAAAAB
imulq %r13, %rbp
sarq $0x2, %rbp
testq %rbp, %rbp
jle 0x28c84
incq %rbp
movq %rbx, %rdi
movq %r14, %rsi
callq 0x289e2
testb %al, %al
jne 0x28cde
leaq 0x30(%rbx), %r15
movq %r15, %rdi
movq %r14, %rsi
callq 0x289e2
testb %al, %al
jne 0x28ce1
leaq 0x60(%rbx), %r15
movq %r15, %rdi
movq %r14, %rsi
callq 0x289e2
testb %al, %al
jne 0x28ce1
leaq 0x90(%rbx), %r15
movq %r15, %rdi
movq %r14, %rsi
callq 0x289e2
testb %al, %al
jne 0x28ce1
addq $0xc0, %rbx
decq %rbp
cmpq $0x1, %rbp
jg 0x28c1d
movq %r12, %rax
subq %rbx, %rax
sarq $0x4, %rax
imulq %r13, %rax
cmpq $0x1, %rax
je 0x28ccd
cmpq $0x2, %rax
je 0x28cba
movq %r12, %r15
cmpq $0x3, %rax
jne 0x28ce1
movq %rbx, %rdi
movq %r14, %rsi
callq 0x289e2
testb %al, %al
jne 0x28cde
addq $0x30, %rbx
movq %rbx, %rdi
movq %r14, %rsi
callq 0x289e2
testb %al, %al
jne 0x28cde
addq $0x30, %rbx
movq %rbx, %rdi
movq %r14, %rsi
callq 0x289e2
testb %al, %al
cmoveq %r12, %rbx
movq %rbx, %r15
cmpq %r12, %r15
setne %al
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Detail::convertIntoString[abi:cxx11](Catch::StringRef)
|
std::string convertIntoString(StringRef string) {
return convertIntoString(string, getCurrentContext().getConfig()->showInvisibles());
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %rbx
cmpq $0x0, 0x120cfd(%rip) # 0x149dc0
jne 0x290dc
movl $0x10, %edi
callq 0x12540
xorps %xmm0, %xmm0
movups %xmm0, (%rax)
movq %rax, 0x120ce4(%rip) # 0x149dc0
movq 0x120cdd(%rip), %rax # 0x149dc0
movq (%rax), %rdi
movq (%rdi), %rax
callq *0x50(%rax)
movzbl %al, %ecx
movq %rbx, %rdi
movq %r15, %rsi
movq %r14, %rdx
callq 0x28fb8
movq %rbx, %rax
popq %rbx
popq %r14
popq %r15
retq
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::StringMaker<char const*, void>::convert[abi:cxx11](char const*)
|
std::string StringMaker<char const*>::convert(char const* str) {
if (str) {
return Detail::convertIntoString( str );
} else {
return{ "{null string}" };
}
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
testq %rsi, %rsi
je 0x2926d
movq %rsi, %r14
movq %rsi, %rdi
callq 0x12210
movq %rbx, %rdi
movq %r14, %rsi
movq %rax, %rdx
callq 0x290ad
jmp 0x2928a
leaq 0x10(%rbx), %rax
movq %rax, (%rbx)
leaq 0xcf781(%rip), %rsi # 0xf89fc
leaq 0xcf787(%rip), %rdx # 0xf8a09
movq %rbx, %rdi
callq 0x53aee
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r14
retq
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Section::Section(Catch::SourceLineInfo const&, Catch::StringRef, char const*)
|
Section::Section( SourceLineInfo const& _lineInfo,
StringRef _name,
const char* const ):
m_info( { "invalid", static_cast<std::size_t>( -1 ) }, std::string{} ),
m_sectionIncluded(
getResultCapture().sectionStarted( _name, _lineInfo, m_assertions ) ) {
// We delay initialization the SectionInfo member until we know
// this section needs it, so we avoid allocating std::string for name.
// We also delay timer start to avoid the potential syscall unless we
// will actually use the result.
if ( m_sectionIncluded ) {
m_info.name = static_cast<std::string>( _name );
m_info.lineInfo = _lineInfo;
m_timer.start();
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rcx, %r15
movq %rdx, %r12
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x38(%rsp), %rax
movq %rax, -0x10(%rax)
xorl %ecx, %ecx
movq %rcx, -0x8(%rax)
movb %cl, (%rax)
leaq 0x10(%rdi), %r13
movq %r13, (%rdi)
movb (%rax), %dl
movb %dl, 0x10(%rdi)
movq 0x1(%rax), %rdx
movq %rdx, 0x11(%rdi)
movl 0x9(%rax), %edx
movl %edx, 0x19(%rdi)
movzwl 0xd(%rax), %edx
movw %dx, 0x1d(%rdi)
movb 0xf(%rax), %dl
movb %dl, 0x1f(%rdi)
movq %rcx, 0x8(%rdi)
movq %rax, -0x10(%rax)
movq %rcx, -0x8(%rax)
movb %cl, (%rax)
leaq 0xc69dc(%rip), %rax # 0xf99f2
movq %rax, 0x20(%rdi)
movq $-0x1, 0x28(%rdi)
xorps %xmm0, %xmm0
movups %xmm0, 0x30(%rdi)
movups %xmm0, 0x40(%rdi)
callq 0x1d5ae
leaq 0x30(%rbx), %r8
movq (%rax), %r9
movq %rax, %rdi
movq %r12, %rsi
movq %r15, %rdx
movq %r14, %rcx
callq *0x18(%r9)
movb %al, 0x50(%rbx)
movq $0x0, 0x58(%rbx)
testb %al, %al
je 0x330af
leaq 0x18(%rsp), %rbp
movq %rbp, -0x10(%rbp)
addq %r12, %r15
leaq 0x8(%rsp), %rdi
movq %r12, %rsi
movq %r15, %rdx
callq 0x53aee
leaq 0x20(%rbx), %r12
leaq 0x8(%rsp), %r15
movq %rbx, %rdi
movq %r15, %rsi
callq 0x126b0
movq (%r15), %rdi
cmpq %rbp, %rdi
je 0x3309d
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
movups (%r14), %xmm0
movups %xmm0, (%r12)
callq 0x12090
movq %rax, 0x58(%rbx)
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x330c0
movq %rax, %r14
movq (%rbx), %rdi
cmpq %r13, %rdi
je 0x330d7
movq (%r13), %rsi
incq %rsi
callq 0x12570
movq %r14, %rdi
callq 0x129d0
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Section::~Section()
|
Section::~Section() {
if( m_sectionIncluded ) {
SectionEndInfo endInfo{ CATCH_MOVE(m_info), m_assertions, m_timer.getElapsedSeconds() };
if ( uncaught_exceptions() ) {
getResultCapture().sectionEndedEarly( CATCH_MOVE(endInfo) );
} else {
getResultCapture().sectionEnded( CATCH_MOVE( endInfo ) );
}
}
}
|
pushq %r14
pushq %rbx
subq $0x58, %rsp
movq %rdi, %rbx
cmpb $0x1, 0x50(%rdi)
jne 0x331c6
leaq 0x10(%rsp), %r14
movq %r14, -0x10(%r14)
movq (%rbx), %rcx
leaq 0x10(%rbx), %rax
cmpq %rax, %rcx
je 0x33117
movq %rcx, (%rsp)
movq (%rax), %rcx
movq %rcx, 0x10(%rsp)
jmp 0x3311e
movups (%rax), %xmm0
movups %xmm0, (%r14)
movq 0x8(%rbx), %rcx
movq %rcx, 0x8(%rsp)
movq %rax, (%rbx)
movq $0x0, 0x8(%rbx)
movb $0x0, 0x10(%rbx)
movups 0x20(%rbx), %xmm0
movups %xmm0, 0x20(%rsp)
movups 0x30(%rbx), %xmm0
movups 0x40(%rbx), %xmm1
movups %xmm1, 0x40(%rsp)
movups %xmm0, 0x30(%rsp)
callq 0x12090
subq 0x58(%rbx), %rax
shrq $0x3, %rax
movabsq $0x20c49ba5e353f7cf, %rcx # imm = 0x20C49BA5E353F7CF
mulq %rcx
shrq $0x4, %rdx
xorps %xmm0, %xmm0
cvtsi2sd %rdx, %xmm0
divsd 0xc34e1(%rip), %xmm0 # 0xf6660
movsd %xmm0, 0x50(%rsp)
callq 0x12680
testl %eax, %eax
jle 0x3319a
callq 0x1d5ae
movl $0x28, %ecx
jmp 0x331a4
callq 0x1d5ae
movl $0x20, %ecx
movq (%rax), %rdx
movq %rsp, %rsi
movq %rax, %rdi
callq *(%rdx,%rcx)
movq (%rsp), %rdi
cmpq %r14, %rdi
je 0x331c6
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x12570
movq (%rbx), %rdi
addq $0x10, %rbx
cmpq %rbx, %rdi
je 0x331dd
movq (%rbx), %rsi
incq %rsi
callq 0x12570
addq $0x58, %rsp
popq %rbx
popq %r14
retq
movq %rax, %rdi
callq 0x1a51a
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::operator<<(std::ostream&, Catch::SourceLineInfo const&)
|
std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ) {
#ifndef __GNUG__
os << info.file << '(' << info.line << ')';
#else
os << info.file << ':' << info.line;
#endif
return os;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x10, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movq (%rsi), %r15
testq %r15, %r15
je 0x332e8
movq %r15, %rdi
callq 0x12210
movq %rbx, %rdi
movq %r15, %rsi
movq %rax, %rdx
callq 0x12630
jmp 0x332ff
movq (%rbx), %rax
movq -0x18(%rax), %rax
leaq (%rbx,%rax), %rdi
movl 0x20(%rbx,%rax), %esi
orl $0x1, %esi
callq 0x12940
leaq 0xf(%rsp), %rsi
movb $0x3a, (%rsi)
movl $0x1, %edx
movq %rbx, %rdi
callq 0x12630
movq 0x8(%r14), %rsi
movq %rax, %rdi
callq 0x12380
movq %rbx, %rax
addq $0x10, %rsp
popq %rbx
popq %r14
popq %r15
retq
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::replaceInPlace(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ) {
bool replaced = false;
std::size_t i = str.find( replaceThis );
while( i != std::string::npos ) {
replaced = true;
str = str.substr( 0, i ) + withThis + str.substr( i+replaceThis.size() );
if( i < str.size()-withThis.size() )
i = str.find( replaceThis, i+withThis.size() );
else
i = std::string::npos;
}
return replaced;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %rdx, %rbx
movq %rsi, %rax
movq %rdi, %r15
movq (%rsi), %rsi
movq %rax, 0x50(%rsp)
movq 0x8(%rax), %rcx
xorl %edx, %edx
callq 0x12980
movq %rax, 0x48(%rsp)
cmpq $-0x1, %rax
je 0x3367f
leaq 0x38(%rsp), %rbp
leaq 0x68(%rsp), %r13
movq 0x48(%rsp), %r12
movq %rbp, %r14
movq %r13, %rbp
leaq 0x78(%rsp), %r13
movq %r13, %rdi
movq %r15, %rsi
xorl %edx, %edx
movq %r12, %rcx
callq 0x125c0
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %r13, %rdi
callq 0x12160
movq %r14, 0x28(%rsp)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x334e0
movq %rdx, 0x28(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x38(%rsp)
jmp 0x334e7
movups (%rcx), %xmm0
movups %xmm0, (%r14)
movq %rbp, %r13
movq 0x8(%rax), %rdx
movq %rdx, 0x30(%rsp)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
movq 0x50(%rsp), %rax
movq 0x8(%rax), %rdx
addq %r12, %rdx
leaq 0x8(%rsp), %rdi
movq %r15, %rsi
movq $-0x1, %rcx
callq 0x125c0
movq 0x28(%rsp), %rcx
movl $0xf, %esi
movq %r14, %rbp
cmpq %r14, %rcx
je 0x33539
movq 0x38(%rsp), %rsi
movq 0x30(%rsp), %r8
movq 0x10(%rsp), %rdx
leaq (%rdx,%r8), %rax
cmpq %rsi, %rax
jbe 0x33567
movl $0xf, %esi
leaq 0x18(%rsp), %rdi
cmpq %rdi, 0x8(%rsp)
je 0x33562
movq 0x18(%rsp), %rsi
cmpq %rsi, %rax
jbe 0x33578
movq 0x8(%rsp), %rsi
leaq 0x28(%rsp), %rdi
callq 0x12160
jmp 0x33586
leaq 0x8(%rsp), %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x12610
movq %r13, 0x58(%rsp)
movq (%rax), %rdx
leaq 0x10(%rax), %rcx
cmpq %rcx, %rdx
je 0x335a6
movq %rdx, 0x58(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x68(%rsp)
jmp 0x335ae
movups (%rcx), %xmm0
movups %xmm0, (%r13)
movq %rax, %rdx
addq $0x8, %rdx
movq 0x8(%rax), %rsi
movq %rsi, 0x60(%rsp)
movq %rcx, (%rax)
movq $0x0, (%rdx)
movb $0x0, (%rcx)
movq %r15, %rdi
leaq 0x58(%rsp), %rsi
callq 0x126b0
movq 0x58(%rsp), %rdi
cmpq %r13, %rdi
je 0x335ef
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x8(%rsp), %rdi
leaq 0x18(%rsp), %rax
cmpq %rax, %rdi
je 0x3360b
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x28(%rsp), %rdi
cmpq %rbp, %rdi
je 0x33622
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x78(%rsp), %rdi
leaq 0x88(%rsp), %rax
cmpq %rax, %rdi
je 0x33644
movq 0x88(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x8(%r15), %rcx
movq 0x8(%rbx), %rdx
subq %rdx, %rcx
movq $-0x1, %rax
cmpq %rcx, %r12
jae 0x33672
addq %r12, %rdx
movq 0x50(%rsp), %rax
movq (%rax), %rsi
movq 0x8(%rax), %rcx
movq %r15, %rdi
callq 0x12980
movq %rax, %r12
cmpq $-0x1, %rax
jne 0x33493
cmpq $-0x1, 0x48(%rsp)
setne %al
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
jmp 0x336da
movq %rax, %rbx
jmp 0x336c3
movq %rax, %rbx
movq 0x8(%rsp), %rdi
leaq 0x18(%rsp), %rax
cmpq %rax, %rdi
je 0x336c3
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x28(%rsp), %rdi
cmpq %r14, %rdi
je 0x336da
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x78(%rsp), %rdi
leaq 0x88(%rsp), %rax
cmpq %rax, %rdi
je 0x336fc
movq 0x88(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rdi
callq 0x129d0
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::TestSpecParser::processNoneChar(char)
|
bool TestSpecParser::processNoneChar( char c ) {
switch( c ) {
case ' ':
return true;
case '~':
m_exclusion = true;
return false;
case '[':
startNewMode( Tag );
return false;
case '"':
startNewMode( QuotedName );
return false;
default:
startNewMode( Name );
return false;
}
}
|
cmpl $0x5a, %esi
jg 0x3539f
cmpl $0x20, %esi
je 0x353b7
cmpl $0x22, %esi
jne 0x353af
movl $0x2, (%rdi)
jmp 0x353c0
cmpl $0x5b, %esi
je 0x353ba
cmpl $0x7e, %esi
jne 0x353af
movb $0x1, 0x8(%rdi)
jmp 0x353c0
movl $0x1, (%rdi)
jmp 0x353c0
movb $0x1, %al
retq
movl $0x3, (%rdi)
xorl %eax, %eax
retq
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Matchers::WithinULP(double, unsigned long)
|
WithinUlpsMatcher WithinULP(double target, uint64_t maxUlpDiff) {
return WithinUlpsMatcher(target, maxUlpDiff, Detail::FloatingPointKind::Double);
}
|
movq %rdi, %rax
leaq 0x18(%rdi), %rcx
movq %rcx, 0x8(%rdi)
movq $0x0, 0x10(%rdi)
movb $0x0, 0x18(%rdi)
leaq 0x10c4e1(%rip), %rcx # 0x144298
movq %rcx, (%rdi)
movsd %xmm0, 0x28(%rdi)
movq %rsi, 0x30(%rdi)
movb $0x1, 0x38(%rdi)
retq
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Matchers::AllTrue()
|
AllTrueMatcher AllTrue() { return AllTrueMatcher{}; }
|
movq %rdi, %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rdi)
movups %xmm0, 0x10(%rdi)
xorl %ecx, %ecx
movq %rcx, 0x20(%rdi)
leaq 0x18(%rdi), %rdx
movq %rdx, 0x8(%rdi)
movq %rcx, 0x10(%rdi)
movb $0x0, 0x18(%rdi)
leaq 0x10da8b(%rip), %rcx # 0x145a60
movq %rcx, (%rdi)
retq
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Matchers::AnyTrue()
|
AnyTrueMatcher AnyTrue() { return AnyTrueMatcher{}; }
|
movq %rdi, %rax
xorps %xmm0, %xmm0
movups %xmm0, (%rdi)
movups %xmm0, 0x10(%rdi)
xorl %ecx, %ecx
movq %rcx, 0x20(%rdi)
leaq 0x18(%rdi), %rdx
movq %rdx, 0x8(%rdi)
movq %rcx, 0x10(%rdi)
movb $0x0, 0x18(%rdi)
leaq 0x10da67(%rip), %rcx # 0x145ae0
movq %rcx, (%rdi)
retq
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Matchers::ContainsSubstring(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, Catch::CaseSensitive)
|
StringContainsMatcher ContainsSubstring( std::string const& str, CaseSensitive caseSensitivity ) {
return StringContainsMatcher( CasedString( str, caseSensitivity) );
}
|
pushq %r14
pushq %rbx
subq $0x28, %rsp
movq %rsi, %rax
movq %rdi, %rbx
leaq 0x8(%rsp), %rdi
movl %edx, -0x8(%rdi)
movq %rsp, %r14
movq %r14, %rsi
movq %rax, %rdx
callq 0x38096
leaq 0xc1841(%rip), %rsi # 0xf9f7b
movl $0x8, %edx
movq %rbx, %rdi
movq %r14, %rcx
callq 0x380ec
leaq 0x10bc07(%rip), %rax # 0x144358
movq %rax, (%rbx)
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x3876f
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rax
addq $0x28, %rsp
popq %rbx
popq %r14
retq
movq %rax, %rbx
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x38798
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rdi
callq 0x129d0
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::ReporterBase::listTests(std::vector<Catch::TestCaseHandle, std::allocator<Catch::TestCaseHandle>> const&)
|
void ReporterBase::listTests(std::vector<TestCaseHandle> const& tests) {
defaultListTests(m_stream,
m_colour.get(),
tests,
m_config->hasTestFilters(),
m_config->verbosity());
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movq %rsi, %rbx
movq %rdi, %r14
movq 0x10(%rdi), %rdi
movq 0x20(%r14), %r15
movq 0x28(%r14), %r12
movq (%rdi), %rax
callq *0x70(%rax)
movl %eax, %ebp
movq 0x10(%r14), %rdi
movq (%rdi), %rax
callq *0xb0(%rax)
movzbl %bpl, %ecx
movq %r15, %rdi
movq %r12, %rsi
movq %rbx, %rdx
movl %eax, %r8d
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp 0x394a2
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::defaultListTags(std::ostream&, std::vector<Catch::TagInfo, std::allocator<Catch::TagInfo>> const&, bool)
|
void defaultListTags( std::ostream& out,
std::vector<TagInfo> const& tags,
bool isFiltered ) {
if ( isFiltered ) {
out << "Tags for matching test cases:\n";
} else {
out << "All available tags:\n";
}
for ( auto const& tagCount : tags ) {
ReusableStringStream rss;
rss << " " << std::setw( 2 ) << tagCount.count << " ";
auto str = rss.str();
auto wrapper = TextFlow::Column( tagCount.all() )
.initialIndent( 0 )
.indent( str.size() )
.width( CATCH_CONFIG_CONSOLE_WIDTH - 10 );
out << str << wrapper << '\n';
}
out << pluralise(tags.size(), "tag"_sr) << "\n\n" << std::flush;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xd8, %rsp
movq %rsi, %rbx
testl %edx, %edx
movq %rdi, 0x8(%rsp)
je 0x3997a
leaq 0xc0aca(%rip), %rsi # 0xfa43d
movl $0x1e, %edx
jmp 0x39986
leaq 0xc0adb(%rip), %rsi # 0xfa45c
movl $0x14, %edx
callq 0x12630
movq (%rbx), %r15
movq %rbx, 0x50(%rsp)
movq 0x8(%rbx), %r12
cmpq %r12, %r15
je 0x39b75
leaq 0x68(%rsp), %rbx
leaq 0xb0(%rsp), %r14
leaq 0xa0(%rsp), %r13
callq 0x5b04c
leaq 0x8(%rax), %rdi
callq 0x4c260
movq %rax, %rbp
movq %rax, 0x90(%rsp)
callq 0x5b04c
movq 0x8(%rax), %rax
movq (%rax,%rbp,8), %rbp
movq %rbp, 0x98(%rsp)
movl $0x2, %edx
movq %rbp, %rdi
leaq 0xc0e69(%rip), %rsi # 0xfa85b
callq 0x12630
movq (%rbp), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rbp,%rax)
movq 0x30(%r15), %rsi
movq %rbp, %rdi
callq 0x12380
movl $0x2, %edx
movq %rbp, %rdi
leaq 0xc0e38(%rip), %rsi # 0xfa85b
callq 0x12630
addq $0x8, %rbp
leaq 0x10(%rsp), %rdi
movq %rbp, %rsi
callq 0x12870
leaq 0x30(%rsp), %rdi
movq %r15, %rsi
callq 0x2e21a
movq %rbx, 0x58(%rsp)
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %rdx
addq %rsi, %rdx
leaq 0x58(%rsp), %rdi
callq 0x53b7e
movq $0x4f, 0x78(%rsp)
xorps %xmm0, %xmm0
movups %xmm0, 0x18(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x80(%rsp)
movq $0x46, 0x78(%rsp)
movq %r14, 0xa0(%rsp)
movq 0x58(%rsp), %rsi
movq 0x60(%rsp), %rdx
addq %rsi, %rdx
movq %r13, %rdi
callq 0x53b7e
movq 0x20(%rbx), %rax
movq %rax, 0x20(%r14)
movups 0x10(%rbx), %xmm0
movups %xmm0, 0x10(%r14)
movq 0x58(%rsp), %rdi
cmpq %rbx, %rdi
je 0x39acd
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x30(%rsp), %rdi
leaq 0x40(%rsp), %rax
cmpq %rax, %rdi
je 0x39ae9
movq 0x40(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %rdx
movq 0x8(%rsp), %rdi
callq 0x12630
movq %rax, %rbp
movq %rax, %rdi
movq %r13, %rsi
callq 0x1eb78
movb $0xa, 0x58(%rsp)
movl $0x1, %edx
movq %rbp, %rdi
leaq 0x58(%rsp), %rsi
callq 0x12630
movq 0xa0(%rsp), %rdi
cmpq %r14, %rdi
je 0x39b3f
movq 0xb0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x10(%rsp), %rdi
leaq 0x20(%rsp), %rax
cmpq %rax, %rdi
je 0x39b5b
movq 0x20(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x90(%rsp), %rdi
callq 0x2fb24
addq $0x38, %r15
cmpq %r12, %r15
jne 0x399b5
movq 0x50(%rsp), %rcx
movq 0x8(%rcx), %rax
subq (%rcx), %rax
sarq $0x3, %rax
movabsq $0x6db6db6db6db6db7, %rcx # imm = 0x6DB6DB6DB6DB6DB7
imulq %rax, %rcx
leaq 0xa0(%rsp), %rsi
movq %rcx, (%rsi)
leaq 0xc08cc(%rip), %rax # 0xfa471
movq %rax, 0x8(%rsi)
movq $0x3, 0x10(%rsi)
movq 0x8(%rsp), %rbx
movq %rbx, %rdi
callq 0x33704
leaq 0xc04f9(%rip), %rsi # 0xfa0be
movl $0x2, %edx
movq %rbx, %rdi
callq 0x12630
movq %rbx, %rdi
callq 0x12390
addq $0xd8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x39c2a
jmp 0x39c2a
movq %rax, %r15
jmp 0x39c4f
movq %rax, %r15
jmp 0x39c14
movq %rax, %r15
movq 0x58(%rsp), %rdi
cmpq %rbx, %rdi
je 0x39c14
movq 0x68(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x30(%rsp), %rdi
leaq 0x40(%rsp), %rax
cmpq %rax, %rdi
je 0x39c4f
movq 0x40(%rsp), %rsi
jmp 0x39c47
movq %rax, %r15
jmp 0x39c6b
movq %rax, %r15
movq 0xa0(%rsp), %rdi
cmpq %r14, %rdi
je 0x39c4f
movq 0xb0(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x10(%rsp), %rdi
leaq 0x20(%rsp), %rax
cmpq %rax, %rdi
je 0x39c6b
movq 0x20(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x90(%rsp), %rdi
callq 0x2fb24
movq %r15, %rdi
callq 0x129d0
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::(anonymous namespace)::SummaryColumn::addRow(unsigned long) &&
|
SummaryColumn&& addRow( std::uint64_t count ) && {
std::string row = std::to_string(count);
auto const new_width = std::max( m_width, row.size() );
if ( new_width > m_width ) {
for ( auto& oldRow : m_rows ) {
oldRow.insert( 0, new_width - m_width, ' ' );
}
} else {
row.insert( 0, m_width - row.size(), ' ' );
}
m_width = new_width;
m_rows.push_back( row );
return std::move( *this );
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x20, %rsp
movq %rsi, %r14
movq %rdi, %rbx
movl $0x1, %esi
cmpq $0xa, %r14
jb 0x3e4e7
movl $0x4, %esi
movabsq $0x346dc5d63886594b, %rdi # imm = 0x346DC5D63886594B
movq %r14, %rcx
cmpq $0x63, %rcx
jbe 0x3e4e0
cmpq $0x3e7, %rcx # imm = 0x3E7
jbe 0x3e4e5
cmpq $0x2710, %rcx # imm = 0x2710
jb 0x3e4e7
movq %rcx, %rax
mulq %rdi
shrq $0xb, %rdx
addl $0x4, %esi
cmpq $0x1869f, %rcx # imm = 0x1869F
movq %rdx, %rcx
ja 0x3e4aa
addl $-0x3, %esi
jmp 0x3e4e7
addl $-0x2, %esi
jmp 0x3e4e7
decl %esi
movl %esi, %esi
leaq 0x10(%rsp), %r12
movq %r12, -0x10(%r12)
movq %rsp, %r15
movq %r15, %rdi
xorl %edx, %edx
callq 0x12660
movq (%r15), %rdi
movl 0x8(%r15), %esi
movq %r14, %rdx
callq 0x5395f
movq 0x8(%r15), %r15
movq 0x28(%rbx), %rax
movq %rax, %rdx
subq %r15, %rdx
cmovaq %rax, %r15
jae 0x3e54c
movq 0x30(%rbx), %r14
movq 0x38(%rbx), %r13
cmpq %r13, %r14
je 0x3e55b
movq %r15, %rdx
subq 0x28(%rbx), %rdx
movq %r14, %rdi
xorl %esi, %esi
movl $0x20, %ecx
callq 0x12ac0
addq $0x20, %r14
jmp 0x3e52b
movq %rsp, %rdi
xorl %esi, %esi
movl $0x20, %ecx
callq 0x12ac0
movq %r15, 0x28(%rbx)
leaq 0x30(%rbx), %rdi
movq %rsp, %rsi
callq 0x4cae4
movq (%rsp), %rdi
cmpq %r12, %rdi
je 0x3e581
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rax
addq $0x20, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
jmp 0x3e594
movq %rax, %rbx
movq (%rsp), %rdi
cmpq %r12, %rdi
je 0x3e5ad
movq 0x10(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rdi
callq 0x129d0
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::(anonymous namespace)::TapAssertionPrinter::printOriginalExpression() const
|
void printOriginalExpression() const {
if (result.hasExpression()) {
stream << ' ' << result.getExpression();
}
}
|
pushq %r14
pushq %rbx
subq $0x28, %rsp
movq 0x8(%rdi), %rax
cmpq $0x0, 0x28(%rax)
je 0x45d7c
movq %rdi, %rbx
movq (%rdi), %rdi
leaq 0x8(%rsp), %rsi
movb $0x20, (%rsi)
movl $0x1, %edx
callq 0x12630
movq %rax, %r14
movq 0x8(%rbx), %rsi
leaq 0x8(%rsp), %rbx
movq %rbx, %rdi
callq 0x1be3c
movq (%rbx), %rsi
movq 0x8(%rbx), %rdx
movq %r14, %rdi
callq 0x12630
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x45d7c
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
addq $0x28, %rsp
popq %rbx
popq %r14
retq
movq %rax, %rbx
leaq 0x18(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x45da2
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rdi
callq 0x129d0
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::(anonymous namespace)::TapAssertionPrinter::printMessage()
|
void printMessage() {
if (itMessage != messages.end()) {
stream << " '" << itMessage->message << '\'';
++itMessage;
}
}
|
movq 0x10(%rdi), %rax
movq 0x18(%rdi), %rcx
cmpq 0x8(%rax), %rcx
je 0x46107
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
movq (%rdi), %r14
leaq 0xb6395(%rip), %rsi # 0xfc45a
movl $0x2, %edx
movq %r14, %rdi
callq 0x12630
movq 0x18(%rbx), %rax
movq 0x10(%rax), %rsi
movq 0x18(%rax), %rdx
movq %r14, %rdi
callq 0x12630
leaq 0x7(%rsp), %rsi
movb $0x27, (%rsi)
movl $0x1, %edx
movq %rax, %rdi
callq 0x12630
addq $0x48, 0x18(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::(anonymous namespace)::TapAssertionPrinter::printExpressionWas()
|
void printExpressionWas() {
if (result.hasExpression()) {
stream << ';';
stream << colourImpl->guardColour( tapDimColour )
<< " expression was:";
printOriginalExpression();
}
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x10, %rsp
movq 0x8(%rdi), %rax
cmpq $0x0, 0x28(%rax)
je 0x46187
movq %rdi, %rbx
movq (%rdi), %rdi
movq %rsp, %r15
movb $0x3b, (%r15)
movl $0x1, %edx
movq %r15, %rsi
callq 0x12630
movq (%rbx), %r14
movq 0x30(%rbx), %rax
movq %rax, (%r15)
movl $0x17, 0x8(%r15)
movb $0x0, 0xc(%r15)
movq %rsp, %rdi
movq %r14, %rsi
callq 0x2d292
leaq 0xb57a3(%rip), %rsi # 0xfb902
movl $0x10, %edx
movq %r14, %rdi
callq 0x12630
cmpb $0x1, 0xc(%rsp)
jne 0x4617f
movq (%rsp), %rdi
movq (%rdi), %rax
xorl %esi, %esi
callq *0x10(%rax)
movq %rbx, %rdi
callq 0x45d14
addq $0x10, %rsp
popq %rbx
popq %r14
popq %r15
retq
jmp 0x461b1
movq %rax, %rbx
cmpb $0x1, 0xc(%rsp)
jne 0x461a9
movq (%rsp), %rdi
movq (%rdi), %rax
xorl %esi, %esi
callq *0x10(%rax)
movq %rbx, %rdi
callq 0x129d0
movq %rax, %rdi
callq 0x1a51a
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
Catch::Singleton<Catch::(anonymous namespace)::RegistryHub, Catch::IRegistryHub, Catch::IMutableRegistryHub>::~Singleton()
|
static auto getInternal() -> Singleton* {
static Singleton* s_instance = nullptr;
if( !s_instance ) {
s_instance = new Singleton;
addSingleton( s_instance );
}
return s_instance;
}
|
pushq %rbx
movq %rdi, %rbx
leaq 0xffc3b(%rip), %rax # 0x145ff8
movq %rax, (%rdi)
leaq 0xffcb1(%rip), %rax # 0x146078
movq %rax, 0x8(%rdi)
leaq 0xff3ce(%rip), %rax # 0x1457a0
movq %rax, 0x110(%rdi)
addq $0x118, %rdi # imm = 0x118
callq 0x539da
leaq 0xf8(%rbx), %rdi
callq 0x550fe
leaq 0xfdd40(%rip), %rax # 0x144138
movq %rax, 0xc0(%rbx)
leaq 0xc8(%rbx), %rdi
callq 0x52f98
leaq 0xfdbb6(%rip), %rax # 0x143fc8
movq %rax, 0xa0(%rbx)
leaq 0xa8(%rbx), %rdi
callq 0x4bfac
leaq 0x98(%rbx), %rdi
callq 0x4c094
leaq 0x10(%rbx), %rdi
callq 0x4e0e4
movl $0x138, %esi # imm = 0x138
movq %rbx, %rdi
popq %rbx
jmp 0x12570
|
/jbaldwin[P]libcappuccino/test/catch.hpp
|
Catch::(anonymous namespace)::RegistryHub::registerReporter(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, Catch::Detail::unique_ptr<Catch::IReporterFactory>)
|
void registerReporter( std::string const& name, IReporterFactoryPtr factory ) override {
m_reporterRegistry.registerReporter( name, CATCH_MOVE(factory) );
}
|
pushq %rbx
subq $0x10, %rsp
addq $0x98, %rdi
movq (%rdx), %rcx
leaq 0x8(%rsp), %rax
movq %rcx, (%rax)
movq $0x0, (%rdx)
movq %rax, %rdx
callq 0x2edf2
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0x464a4
movq (%rdi), %rax
callq *0x8(%rax)
addq $0x10, %rsp
popq %rbx
retq
movq %rax, %rbx
movq 0x8(%rsp), %rdi
testq %rdi, %rdi
je 0x464bd
movq (%rdi), %rax
callq *0x8(%rax)
movq %rbx, %rdi
callq 0x129d0
nop
|
/jbaldwin[P]libcappuccino/test/catch.cpp
|
non-virtual thunk to Catch::Singleton<Catch::(anonymous namespace)::RegistryHub, Catch::IRegistryHub, Catch::IMutableRegistryHub>::~Singleton()
|
static auto getInternal() -> Singleton* {
static Singleton* s_instance = nullptr;
if( !s_instance ) {
s_instance = new Singleton;
addSingleton( s_instance );
}
return s_instance;
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %r14
leaq -0x130(%rdi), %rbx
leaq 0xff7db(%rip), %rax # 0x145ff8
movq %rax, -0x130(%rdi)
leaq 0xff84d(%rip), %rax # 0x146078
movq %rax, -0x128(%rdi)
leaq 0xfef67(%rip), %rax # 0x1457a0
movq %rax, -0x20(%rdi)
addq $-0x18, %rdi
callq 0x539da
leaq -0x38(%r14), %rdi
callq 0x550fe
leaq 0xfd8e2(%rip), %rax # 0x144138
movq %rax, -0x70(%r14)
leaq -0x68(%r14), %rdi
callq 0x52f98
leaq 0xfd75e(%rip), %rax # 0x143fc8
movq %rax, -0x90(%r14)
leaq -0x88(%r14), %rdi
callq 0x4bfac
leaq -0x98(%r14), %rdi
callq 0x4c094
addq $-0x120, %r14 # imm = 0xFEE0
movq %r14, %rdi
callq 0x4e0e4
movl $0x138, %esi # imm = 0x138
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r14
jmp 0x12570
|
/jbaldwin[P]libcappuccino/test/catch.hpp
|
Catch::Clara::Detail::BoundLambda<Catch::makeCommandLineParser(Catch::ConfigData&)::$_7>::setValue(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
auto setValue( std::string const& arg )
-> ParserResult override {
return invokeLambda<typename UnaryLambdaTraits<L>::ArgType>(
m_lambda, arg );
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x128, %rsp # imm = 0x128
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x58(%rsp), %r12
movq %r12, -0x10(%r12)
movq $0x0, -0x8(%r12)
movb $0x0, (%r12)
leaq 0x48(%rsp), %rdi
movq %rdx, %rsi
callq 0x12290
xorl %eax, %eax
leaq 0x88(%rsp), %r13
movq %rax, -0x18(%r13)
leaq 0x100313(%rip), %rbp # 0x146fb8
movq %rbp, -0x20(%r13)
movq %r13, -0x10(%r13)
movq %rax, -0x8(%r13)
movb $0x0, (%r13)
movq 0x50(%rsp), %rdx
testq %rdx, %rdx
je 0x46d6b
movq 0x48(%rsp), %rsi
leaq 0x98(%rsp), %rdi
callq 0x2f315
movq 0x98(%rsp), %r15
testq %r15, %r15
je 0x46df9
callq 0x461bc
movq (%rax), %rcx
movq %rax, %rdi
callq *0x10(%rcx)
movq (%rax), %rbp
testq %rbp, %rbp
je 0x47053
leaq 0x18(%rbp), %rdi
movq %r15, %rsi
callq 0x579dc
addq $0x20, %rbp
cmpq %rbp, %rax
je 0x46e4e
movq 0x98(%rsp), %rsi
testq %rsi, %rsi
leaq 0x10028c(%rip), %rbp # 0x146fb8
je 0x47072
movq 0x20(%r15), %r15
movl $0xc8, %edi
addq 0x8(%r14), %rdi
callq 0x54188
testq %r15, %r15
je 0x46ea8
xorl %eax, %eax
movq %rax, 0x8(%rbx)
movq %rbp, (%rbx)
leaq 0x20(%rbx), %rcx
movq %rcx, 0x10(%rbx)
movq %rax, 0x18(%rbx)
movb $0x0, 0x20(%rbx)
jmp 0x46ffb
leaq 0xa8(%rsp), %r14
movq %r14, -0x10(%r14)
leaq 0x8(%rsp), %rsi
movq $0x1d, (%rsi)
leaq 0x98(%rsp), %rdi
xorl %edx, %edx
callq 0x129e0
leaq 0x98(%rsp), %rdx
movq %rax, (%rdx)
movq 0x8(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movups 0xb5666(%rip), %xmm0 # 0xfc413
movups %xmm0, 0xd(%rax)
movups 0xb564e(%rip), %xmm0 # 0xfc406
movups %xmm0, (%rax)
movq %rcx, 0x8(%rdx)
movq (%rdx), %rax
movb $0x0, (%rax,%rcx)
movq %rbx, %rdi
movl $0x2, %esi
callq 0x5662e
movq 0x98(%rsp), %rdi
cmpq %r14, %rdi
je 0x47008
movq 0xa8(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x47008
leaq 0xb5624(%rip), %rsi # 0xfc424
leaq 0x28(%rsp), %rdi
leaq 0x48(%rsp), %rdx
callq 0x4b301
leaq 0xb5645(%rip), %rsi # 0xfc45b
leaq 0x28(%rsp), %rdi
callq 0x12a80
leaq 0x18(%rsp), %r14
movq %r14, -0x10(%r14)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x46f67
movq %rdx, 0x8(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x18(%rsp)
jmp 0x46f6e
leaq 0xb55ef(%rip), %rsi # 0xfc444
leaq 0x28(%rsp), %rdi
movq %r15, %rdx
leaq 0x100154(%rip), %rbp # 0x146fb8
callq 0x4b301
leaq 0xb55ed(%rip), %rsi # 0xfc45d
leaq 0x28(%rsp), %rdi
callq 0x12a80
leaq 0x18(%rsp), %r14
movq %r14, -0x10(%r14)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x46f99
movq %rdx, 0x8(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x18(%rsp)
jmp 0x46fa0
movq 0x8(%r14), %rcx
movq 0xc8(%rcx), %rax
movq 0xd0(%rcx), %rcx
cmpq %rcx, %rax
je 0x46d4d
xorl %edx, %edx
cmpq $0x1, 0x20(%rax)
adcl $0x0, %edx
addq $0x88, %rax
cmpq %rcx, %rax
jne 0x46ec5
cmpl $0x2, %edx
jb 0x46d4d
leaq 0x18(%rsp), %r14
movq %r14, -0x10(%r14)
leaq 0x28(%rsp), %rsi
movq $0x33, (%rsi)
leaq 0x8(%rsp), %rdi
xorl %edx, %edx
callq 0x129e0
leaq 0x8(%rsp), %rdx
movq %rax, (%rdx)
movq 0x28(%rsp), %rcx
movq %rcx, 0x10(%rdx)
movups 0xb558c(%rip), %xmm0 # 0xfc4a6
movups %xmm0, 0x20(%rax)
movups 0xb5571(%rip), %xmm0 # 0xfc496
movups %xmm0, 0x10(%rax)
movups 0xb5556(%rip), %xmm0 # 0xfc486
movups %xmm0, (%rax)
movl $0x2e656c69, 0x2f(%rax) # imm = 0x2E656C69
movq %rcx, 0x8(%rdx)
movb $0x0, (%rax,%rcx)
movq %rbx, %rdi
movl $0x2, %esi
callq 0x5662e
movq 0x8(%rsp), %rdi
cmpq %r14, %rdi
je 0x46ffb
movq 0x18(%rsp), %rsi
jmp 0x46ff3
movups (%rcx), %xmm0
movups %xmm0, (%r14)
movq 0x8(%rax), %rsi
leaq 0x8(%rsp), %rdx
movq %rsi, 0x8(%rdx)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
movq %rbx, %rdi
movl $0x2, %esi
callq 0x5662e
jmp 0x46fc9
movups (%rcx), %xmm0
movups %xmm0, (%r14)
movq 0x8(%rax), %rsi
leaq 0x8(%rsp), %rdx
movq %rsi, 0x8(%rdx)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
movq %rbx, %rdi
movl $0x2, %esi
callq 0x5662e
movq 0x8(%rsp), %rdi
cmpq %r14, %rdi
je 0x46fe0
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x38(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x46ffb
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x98(%rsp), %rdi
callq 0x58280
movq %rbp, 0x68(%rsp)
movq 0x78(%rsp), %rdi
cmpq %r13, %rdi
je 0x47027
movq 0x88(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x48(%rsp), %rdi
cmpq %r12, %rdi
je 0x4703e
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rax
addq $0x128, %rsp # imm = 0x128
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0xb3ee5(%rip), %rdi # 0xfaf3f
leaq 0xb3bd6(%rip), %rsi # 0xfac37
leaq 0xb59ff(%rip), %rcx # 0xfca67
movl $0x389, %edx # imm = 0x389
callq 0x12340
leaq 0xb3bb0(%rip), %rdi # 0xfac29
leaq 0xb3bb7(%rip), %rsi # 0xfac37
leaq 0xb5433(%rip), %rcx # 0xfc4ba
movl $0xe93, %edx # imm = 0xE93
callq 0x12340
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r14, %rdi
je 0x4711b
movq 0x18(%rsp), %rsi
jmp 0x470df
jmp 0x47118
jmp 0x470ad
jmp 0x470c9
jmp 0x47118
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r14, %rdi
je 0x470cc
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x470cc
movq %rax, %rbx
leaq 0x38(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x4711b
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x4711b
jmp 0x47118
jmp 0x47118
movq %rax, %rbx
movq 0x98(%rsp), %rdi
cmpq %r14, %rdi
je 0x47128
movq 0xa8(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x47128
jmp 0x47113
jmp 0x47118
movq %rax, %rbx
jmp 0x47128
movq %rax, %rbx
leaq 0x98(%rsp), %rdi
callq 0x58280
leaq 0xffe89(%rip), %rax # 0x146fb8
movq %rax, 0x68(%rsp)
movq 0x78(%rsp), %rdi
cmpq %r13, %rdi
je 0x4714e
movq 0x88(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0xffebb(%rip), %rax # 0x147010
movq %rax, 0x68(%rsp)
jmp 0x4715f
movq %rax, %rbx
movq 0x48(%rsp), %rdi
cmpq %r12, %rdi
je 0x47176
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rdi
callq 0x129d0
|
/jbaldwin[P]libcappuccino/test/catch.hpp
|
Catch::Clara::Detail::BoundFlagLambda<Catch::makeCommandLineParser(Catch::ConfigData&)::$_10>::setFlag(bool)
|
auto setFlag( bool flag ) -> ParserResult override {
return LambdaInvoker<typename UnaryLambdaTraits<
L>::ReturnType>::invoke( m_lambda, flag );
}
|
movq %rdi, %rax
movq 0x8(%rsi), %rcx
movl $0x1, 0xc(%rcx)
xorl %ecx, %ecx
movq %rcx, 0x8(%rdi)
leaq 0xffda5(%rip), %rdx # 0x146fb8
movq %rdx, (%rdi)
leaq 0x20(%rdi), %rdx
movq %rdx, 0x10(%rdi)
movq %rcx, 0x18(%rdi)
movb $0x0, 0x20(%rdi)
retq
nop
|
/jbaldwin[P]libcappuccino/test/catch.hpp
|
Catch::Clara::Detail::BoundLambda<Catch::makeCommandLineParser(Catch::ConfigData&)::$_12>::setValue(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
auto setValue( std::string const& arg )
-> ParserResult override {
return invokeLambda<typename UnaryLambdaTraits<L>::ArgType>(
m_lambda, arg );
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x40, %rsp
movq %rsi, %r14
movq %rdi, %rbx
leaq 0xf(%rsp), %rax
movb $0x0, (%rax)
leaq 0x10(%rsp), %r15
movq %r15, %rdi
movq %rdx, %rsi
movq %rax, %rdx
callq 0x2ab3c
movl 0x8(%r15), %eax
testl %eax, %eax
je 0x4771b
movl %eax, 0x8(%rbx)
leaq 0xff8c0(%rip), %r15 # 0x146fb8
movq %r15, (%rbx)
leaq 0x10(%rbx), %rdi
leaq 0x20(%rbx), %rax
movq %rax, 0x10(%rbx)
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %rdx
addq %rsi, %rdx
callq 0x53b7e
jmp 0x4774e
movzbl 0xf(%rsp), %eax
movl $0x2, %ecx
subl %eax, %ecx
movq 0x8(%r14), %rax
movl %ecx, 0x48(%rax)
xorl %eax, %eax
movq %rax, 0x8(%rbx)
leaq 0xff87d(%rip), %rcx # 0x146fb8
movq %rcx, (%rbx)
leaq 0x20(%rbx), %rcx
movq %rcx, 0x10(%rbx)
movq %rax, 0x18(%rbx)
movb $0x0, 0x20(%rbx)
leaq 0xff863(%rip), %rax # 0x146fb8
leaq 0x30(%rsp), %rcx
movq %rax, -0x20(%rcx)
movq -0x10(%rcx), %rdi
cmpq %rcx, %rdi
je 0x47774
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rax
addq $0x40, %rsp
popq %rbx
popq %r14
popq %r15
retq
movq %rax, %r14
leaq 0xff885(%rip), %rax # 0x147010
movq %rax, (%rbx)
leaq 0x30(%rsp), %rax
movq %r15, -0x20(%rax)
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x477ad
movq 0x30(%rsp), %rsi
incq %rsi
callq 0x12570
movq %r14, %rdi
callq 0x129d0
nop
|
/jbaldwin[P]libcappuccino/test/catch.hpp
|
Catch::Clara::Detail::BoundLambda<Catch::makeCommandLineParser(Catch::ConfigData&)::$_6>::setValue(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
|
auto setValue( std::string const& arg )
-> ParserResult override {
return invokeLambda<typename UnaryLambdaTraits<L>::ArgType>(
m_lambda, arg );
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x18(%rsp), %r15
movq %r15, -0x10(%r15)
movq $0x0, -0x8(%r15)
movb $0x0, (%r15)
leaq 0x8(%rsp), %rdi
movq %rdx, %rsi
callq 0x12290
xorl %ebp, %ebp
leaq 0xa8(%rsp), %r12
movq %rbp, -0x18(%r12)
leaq 0xff21f(%rip), %r13 # 0x146fb8
movq %r13, -0x20(%r12)
movq %r12, -0x10(%r12)
movq %rbp, -0x8(%r12)
movb $0x0, (%r12)
leaq 0x48(%rsp), %rdi
leaq 0x8(%rsp), %rsi
callq 0x28665
leaq 0xb47cd(%rip), %rsi # 0xfc590
leaq 0x48(%rsp), %rdi
callq 0x12140
testl %eax, %eax
je 0x47e6a
leaq 0xb47ba(%rip), %rsi # 0xfc596
leaq 0x48(%rsp), %rdi
callq 0x12140
testl %eax, %eax
je 0x47e5e
leaq 0xb10f5(%rip), %rsi # 0xf8ee6
leaq 0x48(%rsp), %rdi
callq 0x12140
testl %eax, %eax
je 0x47e65
leaq 0xb4797(%rip), %rsi # 0xfc59d
leaq 0x68(%rsp), %rdi
leaq 0x8(%rsp), %rdx
callq 0x4b301
leaq 0x68(%rsp), %rdi
movq 0x8(%rdi), %rsi
movl $0x1, %ecx
xorl %edx, %edx
movl $0x27, %r8d
callq 0x12a30
leaq 0x38(%rsp), %r14
movq %r14, -0x10(%r14)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x47ef6
movq %rdx, 0x28(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x38(%rsp)
jmp 0x47efd
movl $0x1, %ebp
jmp 0x47e6a
movl $0x2, %ebp
movq 0x8(%r14), %rax
movl %ebp, 0x40(%rax)
xorl %eax, %eax
movq %rax, 0x8(%rbx)
movq %r13, (%rbx)
leaq 0x20(%rbx), %rcx
movq %rcx, 0x10(%rbx)
movq %rax, 0x18(%rbx)
movb $0x0, 0x20(%rbx)
leaq 0x58(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x47ea5
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x12570
movq %r13, 0x88(%rsp)
movq 0x98(%rsp), %rdi
cmpq %r12, %rdi
je 0x47eca
movq 0xa8(%rsp), %rsi
incq %rsi
callq 0x12570
movq 0x8(%rsp), %rdi
cmpq %r15, %rdi
je 0x47ee1
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rax
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movups (%rcx), %xmm0
movups %xmm0, (%r14)
movq 0x8(%rax), %rsi
leaq 0x28(%rsp), %rdx
movq %rsi, 0x8(%rdx)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
movq %rbx, %rdi
movl $0x2, %esi
callq 0x5662e
movq 0x28(%rsp), %rdi
cmpq %r14, %rdi
je 0x47f3d
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0x78(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x47e8a
movq 0x78(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x47e8a
movq %rax, %rbx
movq 0x28(%rsp), %rdi
cmpq %r14, %rdi
je 0x47f80
movq 0x38(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x47f80
movq %rax, %rbx
leaq 0x78(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x47fa0
movq 0x78(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x47fa0
movq %rax, %rbx
leaq 0x58(%rsp), %rax
movq -0x10(%rax), %rdi
cmpq %rax, %rdi
je 0x47fc0
movq 0x58(%rsp), %rsi
incq %rsi
callq 0x12570
jmp 0x47fc0
movq %rax, %rbx
movq %r13, 0x88(%rsp)
movq 0x98(%rsp), %rdi
cmpq %r12, %rdi
je 0x47fe5
movq 0xa8(%rsp), %rsi
incq %rsi
callq 0x12570
leaq 0xff024(%rip), %rax # 0x147010
movq %rax, 0x88(%rsp)
jmp 0x47ff9
movq %rax, %rbx
movq 0x8(%rsp), %rdi
cmpq %r15, %rdi
je 0x48010
movq 0x18(%rsp), %rsi
incq %rsi
callq 0x12570
movq %rbx, %rdi
callq 0x129d0
|
/jbaldwin[P]libcappuccino/test/catch.hpp
|
Subsets and Splits
SQL Console for LLM4Binary/decompile-bench
Filters out entries with file names ending in .cpp, providing a basic subset of the dataset that excludes C++ files.