name
string
code
string
asm
string
file
string
bool run_test_type_conversion<std::shared_ptr<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const>>(chaiscript::Boxed_Value const&, bool)
bool run_test_type_conversion(const Boxed_Value &bv, bool expectedpass) { try { To ret = chaiscript::boxed_cast<To>(bv); use(ret); } catch (const chaiscript::exception::bad_boxed_cast &e) { if (expectedpass) { std::cerr << "Failure in run_test_type_conversion: " << e.what() << '\n'; return false; } return true; } catch (const std::exception &e) { std::cerr << "Unexpected standard exception when attempting cast_conversion: " << e.what() << '\n'; return false; } catch (...) { std::cerr << "Unexpected unknown exception when attempting cast_conversion.\n"; return false; } return expectedpass; }
pushq %r14 pushq %rbx subq $0x18, %rsp movl %esi, %ebx movq %rdi, %rsi leaq 0x8(%rsp), %rdi xorl %edx, %edx callq 0x76368 movq 0x10(%rsp), %rdi testq %rdi, %rdi je 0x76252 movq 0x27d40(%rip), %rax # 0x9df70 cmpb $0x0, (%rax) je 0x7623a incl 0x8(%rdi) jmp 0x7623e lock incl 0x8(%rdi) callq 0x376e2 movq 0x10(%rsp), %rdi testq %rdi, %rdi je 0x76252 callq 0x376e2 movl %ebx, %eax addq $0x18, %rsp popq %rbx popq %r14 retq movq %rdx, %r14 movq %rax, %rdi cmpl $0x3, %r14d jne 0x762c3 callq 0x2e060 testb %bl, %bl je 0x762b9 movq %rax, %r14 movq 0x27d5d(%rip), %rdi # 0x9dfd8 leaq 0x5ed2(%rip), %rsi # 0x7c154 movl $0x25, %edx callq 0x2e170 movq (%r14), %rax movq %r14, %rdi callq *0x10(%rax) movq 0x27d3c(%rip), %rdi # 0x9dfd8 movq %rax, %rsi callq 0x2e140 leaq 0x8(%rsp), %rsi movb $0xa, (%rsi) movl $0x1, %edx movq %rax, %rdi callq 0x2e170 xorb $0x1, %bl callq 0x2e1f0 jmp 0x76252 callq 0x2e060 cmpl $0x2, %r14d jne 0x76318 movq %rax, %rbx movq 0x27d00(%rip), %rdi # 0x9dfd8 leaq 0x5e35(%rip), %rsi # 0x7c114 movl $0x3f, %edx callq 0x2e170 movq (%rbx), %rax movq %rbx, %rdi callq *0x10(%rax) movq 0x27cdf(%rip), %rdi # 0x9dfd8 movq %rax, %rsi callq 0x2e140 leaq 0x8(%rsp), %rsi movb $0xa, (%rsi) movl $0x1, %edx movq %rax, %rdi callq 0x2e170 jmp 0x76330 movq 0x27cb9(%rip), %rdi # 0x9dfd8 leaq 0x5daf(%rip), %rsi # 0x7c0d5 movl $0x3e, %edx callq 0x2e170 callq 0x2e1f0 xorl %ebx, %ebx jmp 0x76252 movq %rax, %rbx callq 0x2e1f0 jmp 0x76358 movq %rax, %rbx callq 0x2e1f0 jmp 0x76358 movq %rax, %rbx callq 0x2e1f0 movq %rbx, %rdi callq 0x2e220 movq %rax, %rdi callq 0x3306e
/ChaiScript[P]ChaiScript/unittests/boxed_cast_test.cpp
bool chaiscript::Type_Conversions::convertable_type<std::reference_wrapper<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const>>() const
bool convertable_type() const noexcept { const auto type = user_type<T>().bare_type_info(); return thread_cache().count(type) != 0; }
pushq %rbx subq $0x10, %rsp leaq 0x25686(%rip), %rax # 0x9d9c0 movq %rax, 0x8(%rsp) callq 0x36726 movq %rax, %rbx leaq 0x8(%rsp), %rsi movq %rax, %rdi callq 0x36e60 addq $0x8, %rbx cmpq %rbx, %rax setne %al addq $0x10, %rsp popq %rbx retq movq %rax, %rdi callq 0x3306e
/ChaiScript[P]ChaiScript/include/chaiscript/utility/../dispatchkit/../dispatchkit/type_conversions.hpp
bool chaiscript::Type_Conversions::convertable_type<std::reference_wrapper<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const> const>() const
bool convertable_type() const noexcept { const auto type = user_type<T>().bare_type_info(); return thread_cache().count(type) != 0; }
pushq %rbx subq $0x10, %rsp leaq 0x247ce(%rip), %rax # 0x9daa0 movq %rax, 0x8(%rsp) callq 0x36726 movq %rax, %rbx leaq 0x8(%rsp), %rsi movq %rax, %rdi callq 0x36e60 addq $0x8, %rbx cmpq %rbx, %rax setne %al addq $0x10, %rsp popq %rbx retq movq %rax, %rdi callq 0x3306e
/ChaiScript[P]ChaiScript/include/chaiscript/utility/../dispatchkit/../dispatchkit/type_conversions.hpp
bool chaiscript::Type_Conversions::convertable_type<std::reference_wrapper<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const> const&>() const
bool convertable_type() const noexcept { const auto type = user_type<T>().bare_type_info(); return thread_cache().count(type) != 0; }
pushq %rbx subq $0x10, %rsp leaq 0x23efa(%rip), %rax # 0x9d9c0 movq %rax, 0x8(%rsp) callq 0x36726 movq %rax, %rbx leaq 0x8(%rsp), %rsi movq %rax, %rdi callq 0x36e60 addq $0x8, %rbx cmpq %rbx, %rax setne %al addq $0x10, %rsp popq %rbx retq movq %rax, %rdi callq 0x3306e
/ChaiScript[P]ChaiScript/include/chaiscript/utility/../dispatchkit/../dispatchkit/type_conversions.hpp
decltype(auto) chaiscript::boxed_cast<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>*&>(chaiscript::Boxed_Value const&, chaiscript::Type_Conversions_State const*)
decltype(auto) boxed_cast(const Boxed_Value &bv, const Type_Conversions_State *t_conversions = nullptr) { if (!t_conversions || bv.get_type_info().bare_equal(user_type<Type>()) || (t_conversions && !(*t_conversions)->convertable_type<Type>())) { try { return detail::Cast_Helper<Type>::cast(bv, t_conversions); } catch (const chaiscript::detail::exception::bad_any_cast &) { } } if (t_conversions && (*t_conversions)->convertable_type<Type>()) { try { // We will not catch any bad_boxed_dynamic_cast that is thrown, let the user get it // either way, we are not responsible if it doesn't work return (detail::Cast_Helper<Type>::cast((*t_conversions)->boxed_type_conversion<Type>(t_conversions->saves(), bv), t_conversions)); } catch (...) { try { // try going the other way return (detail::Cast_Helper<Type>::cast((*t_conversions)->boxed_type_down_conversion<Type>(t_conversions->saves(), bv), t_conversions)); } catch (const chaiscript::detail::exception::bad_any_cast &) { throw exception::bad_boxed_cast(bv.get_type_info(), typeid(Type)); } } } else { // If it's not convertable, just throw the error, don't waste the time on the // attempted dynamic_cast throw exception::bad_boxed_cast(bv.get_type_info(), typeid(Type)); } }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x50, %rsp movq %rsi, %r14 movq %rdi, %rbx testq %rsi, %rsi je 0x79cd9 movq (%rbx), %rdi leaq 0x23d8f(%rip), %rax # 0x9d9d0 movq %rsp, %rsi movq %rax, (%rsi) leaq 0x23d72(%rip), %rax # 0x9d9c0 movq %rax, 0x8(%rsi) movl $0x2, 0x10(%rsi) callq 0x366b4 testb %al, %al jne 0x79cd9 movq (%r14), %rdi callq 0x79eb4 testb %al, %al je 0x79cd9 movq (%r14), %rdi callq 0x79eb4 testb %al, %al je 0x79e40 movq (%r14), %rsi movq 0x8(%r14), %rcx leaq 0x23d44(%rip), %r12 # 0x9d9d0 leaq 0x20(%rsp), %rdx movq %r12, (%rdx) leaq 0x23d25(%rip), %r13 # 0x9d9c0 movq %r13, 0x8(%rdx) movl $0x2, 0x10(%rdx) movq %rsp, %rdi movq %rbx, %r8 callq 0x36fe0 movq (%rsp), %rdi movq 0x20(%rdi), %rdx leaq 0x23d10(%rip), %rsi # 0x9d9d0 callq 0x2e59c movq %rax, %r15 movq 0x8(%rsp), %rdi testq %rdi, %rdi je 0x79cef callq 0x376e2 jmp 0x79cef movq (%rbx), %rdi movq 0x20(%rdi), %rdx leaq 0x23ce9(%rip), %rsi # 0x9d9d0 callq 0x2e59c movq %rax, %r15 movq %r15, %rax addq $0x50, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %r15 movq 0x8(%rsp), %rdi testq %rdi, %rdi je 0x79d17 callq 0x376e2 jmp 0x79d17 movq %rax, %r15 movq %r15, %rdi callq 0x2e060 movq (%r14), %rsi movq 0x8(%r14), %rcx leaq 0x38(%rsp), %rdx movq %r12, (%rdx) movq %r13, 0x8(%rdx) movl $0x2, 0x10(%rdx) movq %rsp, %rdi movq %rbx, %r8 callq 0x3790c movq (%rsp), %rdi movq 0x20(%rdi), %rdx leaq 0x23c7d(%rip), %rsi # 0x9d9d0 callq 0x2e59c movq %rax, %r15 movq 0x8(%rsp), %rdi testq %rdi, %rdi je 0x79d6a callq 0x376e2 callq 0x2e1f0 jmp 0x79cef movq %rdx, %r14 movq %rax, %r15 movq 0x8(%rsp), %rdi testq %rdi, %rdi je 0x79d91 callq 0x376e2 jmp 0x79d91 movq %rdx, %r14 movq %rax, %r15 cmpl $0x1, %r14d jne 0x79e0c movq %r15, %rdi callq 0x2e060 movl $0x38, %edi callq 0x2e080 movq (%rbx), %rcx movq 0x10(%rcx), %rdx movq %rdx, 0x10(%rsp) movups (%rcx), %xmm0 movaps %xmm0, (%rsp) leaq 0x22ccd(%rip), %rcx # 0x9ca90 movq %rcx, (%rax) movaps (%rsp), %xmm0 movups %xmm0, 0x8(%rax) movq 0x10(%rsp), %rcx movq %rcx, 0x18(%rax) movq %r12, 0x20(%rax) movq $0x19, 0x28(%rax) leaq 0x2453(%rip), %rcx # 0x7c23d movq %rcx, 0x30(%rax) leaq 0x22bf3(%rip), %rsi # 0x9c9e8 movq 0x2419c(%rip), %rdx # 0x9df98 movq %rax, %rdi callq 0x2e200 movq %rax, %r15 callq 0x2e1f0 callq 0x2e1f0 jmp 0x79eac movq %rax, %rdi callq 0x3306e movq %rax, %r15 cmpl $0x1, %edx jne 0x79eac movq %r15, %rdi callq 0x2e060 callq 0x2e1f0 testq %r14, %r14 jne 0x79c6e movl $0x38, %edi callq 0x2e080 movq (%rbx), %rcx movq 0x10(%rcx), %rdx movq %rdx, 0x10(%rsp) movups (%rcx), %xmm0 movaps %xmm0, (%rsp) leaq 0x22c2c(%rip), %rcx # 0x9ca90 movq %rcx, (%rax) movaps (%rsp), %xmm0 movups %xmm0, 0x8(%rax) movq 0x10(%rsp), %rcx movq %rcx, 0x18(%rax) leaq 0x23b51(%rip), %rcx # 0x9d9d0 movq %rcx, 0x20(%rax) movq $0x19, 0x28(%rax) leaq 0x23ab(%rip), %rcx # 0x7c23d movq %rcx, 0x30(%rax) leaq 0x22b4b(%rip), %rsi # 0x9c9e8 movq 0x240f4(%rip), %rdx # 0x9df98 movq %rax, %rdi callq 0x2e200 movq %r15, %rdi callq 0x2e220
/ChaiScript[P]ChaiScript/include/chaiscript/utility/../dispatchkit/../dispatchkit/boxed_cast.hpp
os_tunnel_linux_add
int os_tunnel_linux_add(struct os_tunnel *tunnel) { int result; if (avl_is_node_added(&tunnel->_node)) { return -1; } result = _handle_tunnel(tunnel, true); if (!result) { tunnel->_node.key = tunnel->p.tunnel_if; avl_insert(&_tunnel_tree, &tunnel->_node); tunnel->if_index = if_nametoindex(tunnel->p.tunnel_if); } else { tunnel->if_index = 0; } return result; }
pushq %rbp movq %rsp, %rbp subq $0x30, %rsp movq %rdi, -0x20(%rbp) movq -0x20(%rbp), %rax addq $0x50, %rax movq %rax, -0x10(%rbp) movq -0x10(%rbp), %rax movq %rax, -0x8(%rbp) movq -0x8(%rbp), %rcx xorl %eax, %eax cmpq $0x0, (%rcx) movb %al, -0x25(%rbp) je 0x125e movq -0x8(%rbp), %rax cmpq $0x0, 0x8(%rax) setne %al movb %al, -0x25(%rbp) movb -0x25(%rbp), %al testb $0x1, %al jne 0x1267 jmp 0x1270 movl $0xffffffff, -0x14(%rbp) # imm = 0xFFFFFFFF jmp 0x12cc movq -0x20(%rbp), %rdi movl $0x1, %esi callq 0x12e0 movl %eax, -0x24(%rbp) cmpl $0x0, -0x24(%rbp) jne 0x12bb movq -0x20(%rbp), %rcx movq -0x20(%rbp), %rax movq %rcx, 0x78(%rax) movq -0x20(%rbp), %rsi addq $0x50, %rsi leaq 0x2ef6(%rip), %rdi # 0x4198 callq 0x1040 movq -0x20(%rbp), %rdi callq 0x10b0 movl %eax, %ecx movq -0x20(%rbp), %rax movl %ecx, 0x4c(%rax) jmp 0x12c6 movq -0x20(%rbp), %rax movl $0x0, 0x4c(%rax) movl -0x24(%rbp), %eax movl %eax, -0x14(%rbp) movl -0x14(%rbp), %eax addq $0x30, %rsp popq %rbp retq nopw %cs:(%rax,%rax)
/OLSR[P]OONF/src/base/os_linux/os_tunnel_linux.c
handle_ipv4_tunnel
static int _handle_ipv4_tunnel(struct os_tunnel *tunnel, bool add) { struct ip_tunnel_parm p; enum _tunnel_if_type type; struct ifreq ifr; int err; memset(&p, 0, sizeof(p)); memset(&ifr, 0, sizeof(ifr)); p.iph.version = 4; p.iph.ihl = 5; p.iph.frag_off = htons(IP_DF); strscpy(p.name, tunnel->p.tunnel_if, IF_NAMESIZE); if (tunnel->p.base_if[0]) { p.link = if_nametoindex(tunnel->p.base_if); } ifr.ifr_ifru.ifru_data = (void *)&p; switch (tunnel->p.inner_type) { case OS_TUNNEL_IPV4: p.iph.protocol = IPPROTO_IPIP; type = _TUNNEL_IP_IN_IP; break; case OS_TUNNEL_IPV6: p.iph.protocol = IPPROTO_IPV6; type = _TUNNEL_IP_IN_IP; break; case OS_TUNNEL_GRE: p.iph.protocol = IPPROTO_GRE; type = _TUNNEL_IP_IN_IP; break; default: return -1; } /* inherit TTL by default */ p.iph.ttl = tunnel->p.tunnel_ttl; /* try to inherit TOS */ if (tunnel->p.inhert_tos) { p.iph.tos = 1; } strscpy(ifr.ifr_name, _tunnel_base_if[type], IF_NAMESIZE); netaddr_to_binary(&p.iph.saddr, &tunnel->p.local, sizeof(p.iph.saddr)); netaddr_to_binary(&p.iph.daddr, &tunnel->p.remote, sizeof(p.iph.daddr)); err = ioctl(os_system_linux_linux_get_ioctl_fd(AF_INET), add ? SIOCADDTUNNEL : SIOCDELTUNNEL, &ifr); if (err) { if (add && (errno == EEXIST)) { /* tunnel with this name already exists, try to remove it! */ err = ioctl(os_system_linux_linux_get_ioctl_fd(AF_INET), SIOCDELTUNNEL, &ifr); if (err) { OONF_WARN(LOG_OS_TUNNEL, "Error while %s tunnel %s: tunnel already exists and could not be removed", add ? "adding" : "removing", tunnel->p.tunnel_if); return -1; } return _handle_ipv4_tunnel(tunnel, true); } OONF_WARN(LOG_OS_TUNNEL, "Error while %s tunnel %s: %s (%d)", add ? "adding" : "removing", tunnel->p.tunnel_if, strerror(errno), errno); return -1; } if (add) { _set_base_tunnel_up(type); } return 0; }
pushq %rbp movq %rsp, %rbp pushq %r14 pushq %rbx subq $0xe0, %rsp movb %sil, %al movq %rdi, -0x40(%rbp) andb $0x1, %al movb %al, -0x41(%rbp) leaq -0x78(%rbp), %rdi xorl %esi, %esi movl $0x34, %edx callq 0x1080 leaq -0xa8(%rbp), %rdi xorl %esi, %esi movl $0x28, %edx callq 0x1080 movb -0x58(%rbp), %al andb $0xf, %al orb $0x40, %al movb %al, -0x58(%rbp) movb -0x58(%rbp), %al andb $-0x10, %al orb $0x5, %al movb %al, -0x58(%rbp) movl $0x4000, %edi # imm = 0x4000 callq 0x1060 movw %ax, -0x52(%rbp) leaq -0x78(%rbp), %rdi movq -0x40(%rbp), %rsi movl $0x10, %edx callq 0x1110 movq -0x40(%rbp), %rax cmpb $0x0, 0x14(%rax) je 0x171b movq -0x40(%rbp), %rdi addq $0x14, %rdi callq 0x10b0 movl %eax, -0x68(%rbp) leaq -0x78(%rbp), %rax movq %rax, -0x98(%rbp) movq -0x40(%rbp), %rax movl 0x10(%rax), %eax movl %eax, -0xb0(%rbp) testl %eax, %eax je 0x1753 jmp 0x1739 movl -0xb0(%rbp), %eax subl $0x1, %eax je 0x1760 jmp 0x1746 movl -0xb0(%rbp), %eax subl $0x2, %eax je 0x176d jmp 0x177a movb $0x4, -0x4f(%rbp) movl $0x0, -0x7c(%rbp) jmp 0x1786 movb $0x29, -0x4f(%rbp) movl $0x0, -0x7c(%rbp) jmp 0x1786 movb $0x2f, -0x4f(%rbp) movl $0x0, -0x7c(%rbp) jmp 0x1786 movl $0xffffffff, -0x34(%rbp) # imm = 0xFFFFFFFF jmp 0x1a2c movq -0x40(%rbp), %rax movb 0x4a(%rax), %al movb %al, -0x50(%rbp) movq -0x40(%rbp), %rax testb $0x1, 0x48(%rax) je 0x179e movb $0x1, -0x57(%rbp) leaq -0xa8(%rbp), %rdi movl -0x7c(%rbp), %eax movl %eax, %ecx leaq 0x29af(%rip), %rax # 0x4160 movq (%rax,%rcx,8), %rsi movl $0x10, %edx callq 0x1110 leaq -0x78(%rbp), %rdi addq $0x20, %rdi addq $0xc, %rdi movq -0x40(%rbp), %rsi addq $0x24, %rsi movl $0x4, %edx callq 0x1100 leaq -0x78(%rbp), %rdi addq $0x20, %rdi addq $0x10, %rdi movq -0x40(%rbp), %rsi addq $0x36, %rsi movl $0x4, %edx callq 0x1100 movl $0x2, %edi callq 0x1120 movl %eax, %edi movb -0x41(%rbp), %dl movl $0x89f2, %eax # imm = 0x89F2 movl $0x89f1, %ecx # imm = 0x89F1 testb $0x1, %dl cmovnel %ecx, %eax movslq %eax, %rsi leaq -0xa8(%rbp), %rdx movb $0x0, %al callq 0x1090 movl %eax, -0xac(%rbp) cmpl $0x0, -0xac(%rbp) je 0x1a17 testb $0x1, -0x41(%rbp) je 0x1938 callq 0x1030 cmpl $0x11, (%rax) jne 0x1938 movl $0x2, %edi callq 0x1120 movl %eax, %edi movl $0x89f2, %esi # imm = 0x89F2 leaq -0xa8(%rbp), %rdx movb $0x0, %al callq 0x1090 movl %eax, -0xac(%rbp) cmpl $0x0, -0xac(%rbp) je 0x1922 jmp 0x188a movl 0x2874(%rip), %eax # 0x4104 movq 0x2759(%rip), %rcx # 0x3ff0 movq %rcx, -0x18(%rbp) movl %eax, -0x1c(%rbp) movl $0x4, -0x20(%rbp) movq -0x18(%rbp), %rax movl -0x1c(%rbp), %ecx movzbl (%rax,%rcx), %eax andl -0x20(%rbp), %eax cmpl $0x0, %eax je 0x1914 movl 0x2846(%rip), %esi # 0x4104 movb -0x41(%rbp), %cl leaq 0x863(%rip), %r10 # 0x212b leaq 0x855(%rip), %rax # 0x2124 testb $0x1, %cl cmovneq %rax, %r10 movq -0x40(%rbp), %rax movl $0x4, %edi leaq 0x72e(%rip), %rdx # 0x2014 addq $0x3e, %rdx movl $0x141, %ecx # imm = 0x141 xorl %r8d, %r8d movl %r8d, %r9d leaq 0x7df(%rip), %r11 # 0x20db movq %r9, %r8 movq %r11, (%rsp) movq %r10, 0x8(%rsp) movq %rax, 0x10(%rsp) movb $0x0, %al callq 0x10a0 jmp 0x1916 movl $0xffffffff, -0x34(%rbp) # imm = 0xFFFFFFFF jmp 0x1a2c movq -0x40(%rbp), %rdi movl $0x1, %esi callq 0x1690 movl %eax, -0x34(%rbp) jmp 0x1a2c jmp 0x193a movl 0x27c4(%rip), %eax # 0x4104 movq 0x26a9(%rip), %rcx # 0x3ff0 movq %rcx, -0x28(%rbp) movl %eax, -0x2c(%rbp) movl $0x4, -0x30(%rbp) movq -0x28(%rbp), %rax movl -0x2c(%rbp), %ecx movzbl (%rax,%rcx), %eax andl -0x30(%rbp), %eax cmpl $0x0, %eax je 0x1a0c movl 0x2792(%rip), %eax # 0x4104 movl %eax, -0xc4(%rbp) movb -0x41(%rbp), %cl leaq 0x7a9(%rip), %rbx # 0x212b leaq 0x79b(%rip), %rax # 0x2124 testb $0x1, %cl cmovneq %rax, %rbx movq -0x40(%rbp), %rax movq %rax, -0xc0(%rbp) callq 0x1030 movl (%rax), %edi callq 0x10f0 movq %rax, -0xb8(%rbp) callq 0x1030 movl -0xc4(%rbp), %esi movq -0xc0(%rbp), %r11 movq -0xb8(%rbp), %r10 movl (%rax), %eax movl $0x4, %edi leaq 0x63f(%rip), %rdx # 0x2014 addq $0x3e, %rdx movl $0x147, %ecx # imm = 0x147 xorl %r8d, %r8d movl %r8d, %r9d leaq 0x749(%rip), %r14 # 0x2134 movq %r9, %r8 movq %r14, (%rsp) movq %rbx, 0x8(%rsp) movq %r11, 0x10(%rsp) movq %r10, 0x18(%rsp) movl %eax, 0x20(%rsp) movb $0x0, %al callq 0x10a0 jmp 0x1a0e movl $0xffffffff, -0x34(%rbp) # imm = 0xFFFFFFFF jmp 0x1a2c testb $0x1, -0x41(%rbp) je 0x1a25 movl -0x7c(%rbp), %edi callq 0x1da0 movl $0x0, -0x34(%rbp) movl -0x34(%rbp), %eax addq $0xe0, %rsp popq %rbx popq %r14 popq %rbp retq nopl (%rax,%rax)
/OLSR[P]OONF/src/base/os_linux/os_tunnel_linux.c
handle_ipv6_tunnel
static int _handle_ipv6_tunnel(struct os_tunnel *tunnel, bool add) { struct my_ip6_tnl_parm2 p; enum _tunnel_if_type type; struct ifreq ifr; int err; struct netaddr_str nbuf1, nbuf2; memset(&p, 0, sizeof(p)); memset(&ifr, 0, sizeof(ifr)); ifr.ifr_ifru.ifru_data = (void *)&p; if (tunnel->p.base_if[0]) { p.link = if_nametoindex(tunnel->p.base_if); } strscpy(p.name, tunnel->p.tunnel_if, IF_NAMESIZE); switch (tunnel->p.inner_type) { case OS_TUNNEL_IPV4: p.proto = IPPROTO_IPIP; type = _TUNNEL_IP_IN_IP6; break; case OS_TUNNEL_IPV6: p.proto = IPPROTO_IPV6; type = _TUNNEL_IP6_IN_IP6; break; case OS_TUNNEL_GRE: p.proto = IPPROTO_GRE; type = _TUNNEL_GRE_IN_IP6; break; default: return -1; } /* set tunnel flags */ if (tunnel->p.inhert_tos) { p.flags |= IP6_TNL_F_USE_ORIG_TCLASS; } if (tunnel->p.inhert_flowlabel) { p.flags |= IP6_TNL_F_USE_ORIG_FLOWLABEL; } if (tunnel->p.tunnel_ttl) { p.hop_limit = tunnel->p.tunnel_ttl; } strscpy(ifr.ifr_name, _tunnel_base_if[type], IF_NAMESIZE); netaddr_to_binary(&p.laddr, &tunnel->p.local, sizeof(p.laddr)); netaddr_to_binary(&p.raddr, &tunnel->p.remote, sizeof(p.raddr)); err = ioctl(os_system_linux_linux_get_ioctl_fd(AF_INET6), add ? SIOCADDTUNNEL : SIOCDELTUNNEL, &ifr); if (err) { OONF_WARN(LOG_OS_TUNNEL, "Error while %s tunnel %s (%d,%s,%s): %s (%d)", add ? "add" : "remove", tunnel->p.tunnel_if, tunnel->p.inner_type, netaddr_to_string(&nbuf1, &tunnel->p.local), netaddr_to_string(&nbuf2, &tunnel->p.remote), strerror(errno), errno); return -1; } if (add) { _set_base_tunnel_up(type); } return 0; }
pushq %rbp movq %rsp, %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x1a8, %rsp # imm = 0x1A8 movb %sil, %al movq %rdi, -0x68(%rbp) andb $0x1, %al movb %al, -0x69(%rbp) leaq -0xb8(%rbp), %rdi xorl %esi, %esi movl $0x4c, %edx callq 0x1080 leaq -0xe8(%rbp), %rdi xorl %esi, %esi movl $0x28, %edx callq 0x1080 leaq -0xb8(%rbp), %rax movq %rax, -0xd8(%rbp) movq -0x68(%rbp), %rax cmpb $0x0, 0x14(%rax) je 0x1ab1 movq -0x68(%rbp), %rdi addq $0x14, %rdi callq 0x10b0 movl %eax, -0xa8(%rbp) movq -0x68(%rbp), %rsi leaq -0xb8(%rbp), %rdi movl $0x10, %edx callq 0x1110 movq -0x68(%rbp), %rax movl 0x10(%rax), %eax movl %eax, -0x16c(%rbp) testl %eax, %eax je 0x1af3 jmp 0x1ad9 movl -0x16c(%rbp), %eax subl $0x1, %eax je 0x1b06 jmp 0x1ae6 movl -0x16c(%rbp), %eax subl $0x2, %eax je 0x1b19 jmp 0x1b2c movb $0x4, -0xa4(%rbp) movl $0x1, -0xbc(%rbp) jmp 0x1b38 movb $0x29, -0xa4(%rbp) movl $0x3, -0xbc(%rbp) jmp 0x1b38 movb $0x2f, -0xa4(%rbp) movl $0x5, -0xbc(%rbp) jmp 0x1b38 movl $0xffffffff, -0x5c(%rbp) # imm = 0xFFFFFFFF jmp 0x1d7f movq -0x68(%rbp), %rax testb $0x1, 0x48(%rax) je 0x1b51 movl -0x9c(%rbp), %eax orl $0x2, %eax movl %eax, -0x9c(%rbp) movq -0x68(%rbp), %rax testb $0x1, 0x49(%rax) je 0x1b6a movl -0x9c(%rbp), %eax orl $0x4, %eax movl %eax, -0x9c(%rbp) movq -0x68(%rbp), %rax cmpb $0x0, 0x4a(%rax) je 0x1b81 movq -0x68(%rbp), %rax movb 0x4a(%rax), %al movb %al, -0xa2(%rbp) leaq -0xe8(%rbp), %rdi movl -0xbc(%rbp), %eax movl %eax, %ecx leaq 0x25c9(%rip), %rax # 0x4160 movq (%rax,%rcx,8), %rsi movl $0x10, %edx callq 0x1110 leaq -0xb8(%rbp), %rdi addq $0x20, %rdi movq -0x68(%rbp), %rsi addq $0x24, %rsi movl $0x10, %edx callq 0x1100 leaq -0xb8(%rbp), %rdi addq $0x30, %rdi movq -0x68(%rbp), %rsi addq $0x36, %rsi movl $0x10, %edx callq 0x1100 movl $0xa, %edi callq 0x1120 movl %eax, %edi movb -0x69(%rbp), %dl movl $0x89f2, %eax # imm = 0x89F2 movl $0x89f1, %ecx # imm = 0x89F1 testb $0x1, %dl cmovnel %ecx, %eax movslq %eax, %rsi leaq -0xe8(%rbp), %rdx movb $0x0, %al callq 0x1090 movl %eax, -0xec(%rbp) cmpl $0x0, -0xec(%rbp) je 0x1d67 jmp 0x1c24 movl 0x24da(%rip), %eax # 0x4104 movq 0x23bf(%rip), %rcx # 0x3ff0 movq %rcx, -0x50(%rbp) movl %eax, -0x54(%rbp) movl $0x4, -0x58(%rbp) movq -0x50(%rbp), %rax movl -0x54(%rbp), %ecx movzbl (%rax,%rcx), %eax andl -0x58(%rbp), %eax cmpl $0x0, %eax je 0x1d5c movl 0x24a8(%rip), %eax # 0x4104 movl %eax, -0x184(%rbp) movb -0x69(%rbp), %cl leaq 0x5b1(%rip), %r12 # 0x221d leaq 0x5a6(%rip), %rax # 0x2219 testb $0x1, %cl cmovneq %rax, %r12 movq -0x68(%rbp), %r15 movq -0x68(%rbp), %rax movl 0x10(%rax), %r14d movq -0x68(%rbp), %rax addq $0x24, %rax leaq -0x12a(%rbp), %rcx movq %rcx, -0x30(%rbp) movq %rax, -0x38(%rbp) movq -0x30(%rbp), %rdi movq -0x38(%rbp), %rsi xorl %edx, %edx callq 0x10d0 movq %rax, %rbx movq -0x68(%rbp), %rax addq $0x36, %rax leaq -0x168(%rbp), %rcx movq %rcx, -0x40(%rbp) movq %rax, -0x48(%rbp) movq -0x40(%rbp), %rdi movq -0x48(%rbp), %rsi xorl %edx, %edx callq 0x10d0 movq %rax, -0x180(%rbp) callq 0x1030 movl (%rax), %edi callq 0x10f0 movq %rax, -0x178(%rbp) callq 0x1030 movl -0x184(%rbp), %esi movq -0x180(%rbp), %r11 movq -0x178(%rbp), %r10 movl (%rax), %eax movl $0x4, %edi leaq 0x2fe(%rip), %rdx # 0x2014 addq $0x3e, %rdx movl $0x18e, %ecx # imm = 0x18E xorl %r8d, %r8d movl %r8d, %r9d leaq 0x4c0(%rip), %r13 # 0x21ec movq %r9, %r8 movq %r13, (%rsp) movq %r12, 0x8(%rsp) movq %r15, 0x10(%rsp) movl %r14d, 0x18(%rsp) movq %rbx, 0x20(%rsp) movq %r11, 0x28(%rsp) movq %r10, 0x30(%rsp) movl %eax, 0x38(%rsp) movb $0x0, %al callq 0x10a0 jmp 0x1d5e movl $0xffffffff, -0x5c(%rbp) # imm = 0xFFFFFFFF jmp 0x1d7f testb $0x1, -0x69(%rbp) je 0x1d78 movl -0xbc(%rbp), %edi callq 0x1da0 movl $0x0, -0x5c(%rbp) movl -0x5c(%rbp), %eax addq $0x1a8, %rsp # imm = 0x1A8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq nopw %cs:(%rax,%rax)
/OLSR[P]OONF/src/base/os_linux/os_tunnel_linux.c
set_base_tunnel_up
static void _set_base_tunnel_up(enum _tunnel_if_type type) { struct ifreq ifr; int oldflags; if (!_tunnel_base_up[type]) { /* make sure base interface is up for incoming tunnel traffic */ memset(&ifr, 0, sizeof(ifr)); strscpy(ifr.ifr_name, _tunnel_base_if[type], IF_NAMESIZE); if (ioctl(os_system_linux_linux_get_ioctl_fd(AF_INET), SIOCGIFFLAGS, &ifr) < 0) { OONF_WARN(LOG_OS_TUNNEL, "ioctl SIOCGIFFLAGS (get flags) error on device %s: %s (%d)\n", _tunnel_base_if[type], strerror(errno), errno); return; } oldflags = ifr.ifr_flags; ifr.ifr_flags |= IFF_UP; if (oldflags == ifr.ifr_flags) { /* interface is already up/down */ return; } if (ioctl(os_system_linux_linux_get_ioctl_fd(AF_INET), SIOCSIFFLAGS, &ifr) < 0) { OONF_WARN(LOG_OS_TUNNEL, "ioctl SIOCSIFFLAGS (set flags up) error on device %s: %s (%d)\n", _tunnel_base_if[type], strerror(errno), errno); return; } _tunnel_base_up[type] = true; } }
pushq %rbp movq %rsp, %rbp pushq %rbx subq $0xa8, %rsp movl %edi, -0x2c(%rbp) movl -0x2c(%rbp), %eax movl %eax, %ecx leaq 0x2405(%rip), %rax # 0x41c0 testb $0x1, (%rax,%rcx) jne 0x1fe4 leaq -0x58(%rbp), %rdi xorl %esi, %esi movl $0x28, %edx callq 0x1080 leaq -0x58(%rbp), %rdi movl -0x2c(%rbp), %eax movl %eax, %ecx leaq 0x237b(%rip), %rax # 0x4160 movq (%rax,%rcx,8), %rsi movl $0x10, %edx callq 0x1110 movl $0x2, %edi callq 0x1120 movl %eax, %edi movl $0x8913, %esi # imm = 0x8913 leaq -0x58(%rbp), %rdx movb $0x0, %al callq 0x1090 cmpl $0x0, %eax jge 0x1ecc jmp 0x1e1a movl 0x22e4(%rip), %eax # 0x4104 movq 0x21c9(%rip), %rcx # 0x3ff0 movq %rcx, -0x10(%rbp) movl %eax, -0x14(%rbp) movl $0x4, -0x18(%rbp) movq -0x10(%rbp), %rax movl -0x14(%rbp), %ecx movzbl (%rax,%rcx), %eax andl -0x18(%rbp), %eax cmpl $0x0, %eax je 0x1ec5 movl 0x22b6(%rip), %eax # 0x4104 movl %eax, -0x74(%rbp) movl -0x2c(%rbp), %eax movl %eax, %ecx leaq 0x2303(%rip), %rax # 0x4160 movq (%rax,%rcx,8), %rax movq %rax, -0x70(%rbp) callq 0x1030 movl (%rax), %edi callq 0x10f0 movq %rax, -0x68(%rbp) callq 0x1030 movl -0x74(%rbp), %esi movq -0x70(%rbp), %r11 movq -0x68(%rbp), %r10 movl (%rax), %eax movl $0x4, %edi leaq 0x181(%rip), %rdx # 0x2014 addq $0x3e, %rdx movl $0xec, %ecx xorl %r8d, %r8d movl %r8d, %r9d leaq 0x2c8(%rip), %rbx # 0x2171 movq %r9, %r8 movq %rbx, (%rsp) movq %r11, 0x8(%rsp) movq %r10, 0x10(%rsp) movl %eax, 0x18(%rsp) movb $0x0, %al callq 0x10a0 jmp 0x1ec7 jmp 0x1fe4 movswl -0x48(%rbp), %eax movl %eax, -0x5c(%rbp) movswl -0x48(%rbp), %eax orl $0x1, %eax movw %ax, -0x48(%rbp) movl -0x5c(%rbp), %eax movswl -0x48(%rbp), %ecx cmpl %ecx, %eax jne 0x1eee jmp 0x1fe4 movl $0x2, %edi callq 0x1120 movl %eax, %edi movl $0x8914, %esi # imm = 0x8914 leaq -0x58(%rbp), %rdx movb $0x0, %al callq 0x1090 cmpl $0x0, %eax jge 0x1fd4 jmp 0x1f15 movl 0x21e9(%rip), %eax # 0x4104 movq 0x20ce(%rip), %rcx # 0x3ff0 movq %rcx, -0x20(%rbp) movl %eax, -0x24(%rbp) movl $0x4, -0x28(%rbp) movq -0x20(%rbp), %rax movl -0x24(%rbp), %ecx movzbl (%rax,%rcx), %eax andl -0x28(%rbp), %eax cmpl $0x0, %eax je 0x1fd0 movl 0x21b7(%rip), %eax # 0x4104 movl %eax, -0x8c(%rbp) movl -0x2c(%rbp), %eax movl %eax, %ecx leaq 0x2201(%rip), %rax # 0x4160 movq (%rax,%rcx,8), %rax movq %rax, -0x88(%rbp) callq 0x1030 movl (%rax), %edi callq 0x10f0 movq %rax, -0x80(%rbp) callq 0x1030 movl -0x8c(%rbp), %esi movq -0x88(%rbp), %r11 movq -0x80(%rbp), %r10 movl (%rax), %eax movl $0x4, %edi leaq 0x76(%rip), %rdx # 0x2014 addq $0x3e, %rdx movl $0xfa, %ecx xorl %r8d, %r8d movl %r8d, %r9d leaq 0x1f9(%rip), %rbx # 0x21ad movq %r9, %r8 movq %rbx, (%rsp) movq %r11, 0x8(%rsp) movq %r10, 0x10(%rsp) movl %eax, 0x18(%rsp) movb $0x0, %al callq 0x10a0 jmp 0x1fd2 jmp 0x1fe4 movl -0x2c(%rbp), %eax movl %eax, %ecx leaq 0x21e0(%rip), %rax # 0x41c0 movb $0x1, (%rax,%rcx) addq $0xa8, %rsp popq %rbx popq %rbp retq
/OLSR[P]OONF/src/base/os_linux/os_tunnel_linux.c
select_wrapper
int select_wrapper(int nfds, fd_set *rd, fd_set *wr, fd_set *exc, struct timeval *tv) { if(nfds < 0) { SET_SOCKERRNO(EINVAL); return -1; } #ifdef USE_WINSOCK /* * Winsock select() requires that at least one of the three fd_set * pointers is not NULL and points to a non-empty fdset. IOW Winsock * select() can not be used to sleep without a single fd_set. */ if(!nfds) { Sleep((1000*tv->tv_sec) + (DWORD)(((double)tv->tv_usec)/1000.0)); return 0; } #endif return select(nfds, rd, wr, exc, tv); }
testl %edi, %edi jns 0x1080 pushq %rax callq 0x1030 movl $0x16, (%rax) movl $0xffffffff, %eax # imm = 0xFFFFFFFF popq %rcx retq
/nomaster[P]curl/tests/libtest/first.c
HelpIRCCommand::trigger(IRC_Bot*, std::basic_string_view<char, std::char_traits<char>>, std::basic_string_view<char, std::char_traits<char>>, std::basic_string_view<char, std::char_traits<char>>)
void HelpIRCCommand::trigger(IRC_Bot *source, std::string_view in_channel, std::string_view nick, std::string_view parameters) { Jupiter::IRC::Client::Channel *channel = source->getChannel(in_channel); if (channel != nullptr) { int access = source->getAccessLevel(*channel, nick); if (parameters.empty()) { for (int i = 0; i <= access; i++) { auto cmds = source->getAccessCommands(channel, i); if (cmds.size() != 0) { std::string triggers = source->getTriggers(cmds); if (triggers.size() >= 0) { source->sendNotice(nick, string_printf("Access level %d commands: %.*s", i, triggers.size(), triggers.data())); } } } source->sendNotice(nick, "For command-specific help, use: help <command>"sv); } else { auto command_split = jessilib::word_split_once_view(std::string_view{parameters}, WHITESPACE_SV); IRCCommand *cmd = source->getCommand(command_split.first); if (cmd) { int command_access = cmd->getAccessLevel(channel); if (command_access < 0) source->sendNotice(nick, "Error: Command disabled."sv); else if (access < command_access) source->sendNotice(nick, "Access Denied."sv); else source->sendNotice(nick, cmd->getHelp(command_split.second)); } else source->sendNotice(nick, "Error: Command not found."sv); } } }
pushq %rbp movq %rsp, %rbp subq $0x250, %rsp # imm = 0x250 leaq 0x10(%rbp), %rax movq %rax, -0x208(%rbp) movq %rdx, -0x10(%rbp) movq %rcx, -0x8(%rbp) movq %r8, -0x20(%rbp) movq %r9, -0x18(%rbp) movq %rdi, -0x28(%rbp) movq %rsi, -0x30(%rbp) movq -0x30(%rbp), %rdi movq -0x10(%rbp), %rax movq %rax, -0x48(%rbp) movq -0x8(%rbp), %rax movq %rax, -0x40(%rbp) movq -0x48(%rbp), %rsi movq -0x40(%rbp), %rdx callq 0xa8f0 movq %rax, -0x38(%rbp) cmpq $0x0, -0x38(%rbp) je 0xb963 movq -0x30(%rbp), %rdi movq -0x38(%rbp), %rsi movq -0x20(%rbp), %rax movq %rax, -0x60(%rbp) movq -0x18(%rbp), %rax movq %rax, -0x58(%rbp) movq -0x60(%rbp), %rdx movq -0x58(%rbp), %rcx callq 0xa150 movq -0x208(%rbp), %rdi movl %eax, -0x4c(%rbp) callq 0xa9d0 testb $0x1, %al jne 0xb4bb jmp 0xb6d1 movl $0x0, -0x64(%rbp) movl -0x64(%rbp), %eax cmpl -0x4c(%rbp), %eax jg 0xb664 movq -0x30(%rbp), %rsi movq -0x38(%rbp), %rdx movl -0x64(%rbp), %ecx leaq -0x80(%rbp), %rdi callq 0xa5b0 leaq -0x80(%rbp), %rdi callq 0xa480 cmpq $0x0, %rax je 0xb63f leaq -0xa0(%rbp), %rdi leaq -0x80(%rbp), %rsi callq 0xa490 jmp 0xb507 leaq -0xa0(%rbp), %rdi callq 0xad30 cmpq $0x0, %rax jb 0xb623 movq -0x30(%rbp), %rax movq %rax, -0x228(%rbp) movups -0x20(%rbp), %xmm0 movaps %xmm0, -0xc0(%rbp) movl -0x64(%rbp), %eax movl %eax, -0x214(%rbp) leaq -0xa0(%rbp), %rdi movq %rdi, -0x220(%rbp) callq 0xad30 movq -0x220(%rbp), %rdi movq %rax, -0x210(%rbp) callq 0xab60 movl -0x214(%rbp), %edx movq -0x210(%rbp), %rcx movq %rax, %r8 leaq 0x4b4b(%rip), %rsi # 0x100c4 xorl %eax, %eax leaq -0xf0(%rbp), %rdi callq 0xaaa0 jmp 0xb589 leaq -0xf0(%rbp), %rdi callq 0xa290 movq -0x228(%rbp), %rdi movq %rax, -0xd0(%rbp) movq %rdx, -0xc8(%rbp) movq -0xc0(%rbp), %rsi movq -0xb8(%rbp), %rdx movq -0xd0(%rbp), %rcx movq -0xc8(%rbp), %r8 callq 0xacf0 jmp 0xb5cd leaq -0xf0(%rbp), %rdi callq 0xa4c0 jmp 0xb623 movq %rax, %rcx movl %edx, %eax movq %rcx, -0xa8(%rbp) movl %eax, -0xac(%rbp) jmp 0xb656 movq %rax, %rcx movl %edx, %eax movq %rcx, -0xa8(%rbp) movl %eax, -0xac(%rbp) jmp 0xb631 movq %rax, %rcx movl %edx, %eax movq %rcx, -0xa8(%rbp) movl %eax, -0xac(%rbp) leaq -0xf0(%rbp), %rdi callq 0xa4c0 jmp 0xb631 leaq -0xa0(%rbp), %rdi callq 0xa4c0 jmp 0xb63f leaq -0xa0(%rbp), %rdi callq 0xa4c0 jmp 0xb656 leaq -0x80(%rbp), %rdi callq 0xa250 movl -0x64(%rbp), %eax addl $0x1, %eax movl %eax, -0x64(%rbp) jmp 0xb4c2 leaq -0x80(%rbp), %rdi callq 0xa250 jmp 0xb96c movq -0x30(%rbp), %rax movq %rax, -0x230(%rbp) movq -0x20(%rbp), %rax movq %rax, -0x100(%rbp) movq -0x18(%rbp), %rax movq %rax, -0xf8(%rbp) leaq 0x49a1(%rip), %rdi # 0x1002d movl $0x2e, %esi callq 0xa950 movq -0x230(%rbp), %rdi movq %rax, -0x110(%rbp) movq %rdx, -0x108(%rbp) movq -0x100(%rbp), %rsi movq -0xf8(%rbp), %rdx movq -0x110(%rbp), %rcx movq -0x108(%rbp), %r8 callq 0xacf0 jmp 0xb961 movq -0x208(%rbp), %rax movq (%rax), %rcx movq %rcx, -0x140(%rbp) movq 0x8(%rax), %rax movq %rax, -0x138(%rbp) leaq -0x150(%rbp), %rdi leaq 0x4961(%rip), %rsi # 0x1005c callq 0xab80 leaq -0x130(%rbp), %rdi leaq -0x140(%rbp), %rsi leaq -0x150(%rbp), %rdx callq 0xa280 movq -0x30(%rbp), %rdi movq -0x130(%rbp), %rax movq %rax, -0x168(%rbp) movq -0x128(%rbp), %rax movq %rax, -0x160(%rbp) movq -0x168(%rbp), %rsi movq -0x160(%rbp), %rdx callq 0xa370 movq %rax, -0x158(%rbp) cmpq $0x0, -0x158(%rbp) je 0xb8f7 movq -0x158(%rbp), %rdi movq -0x38(%rbp), %rsi callq 0xadb0 movl %eax, -0x16c(%rbp) cmpl $0x0, -0x16c(%rbp) jge 0xb7ee movq -0x30(%rbp), %rax movq %rax, -0x238(%rbp) movq -0x20(%rbp), %rax movq %rax, -0x180(%rbp) movq -0x18(%rbp), %rax movq %rax, -0x178(%rbp) leaq 0x493a(%rip), %rdi # 0x100e3 movl $0x18, %esi callq 0xa950 movq -0x238(%rbp), %rdi movq %rax, -0x190(%rbp) movq %rdx, -0x188(%rbp) movq -0x180(%rbp), %rsi movq -0x178(%rbp), %rdx movq -0x190(%rbp), %rcx movq -0x188(%rbp), %r8 callq 0xacf0 jmp 0xb8f5 movl -0x4c(%rbp), %eax cmpl -0x16c(%rbp), %eax jge 0xb866 movq -0x30(%rbp), %rax movq %rax, -0x240(%rbp) movq -0x20(%rbp), %rax movq %rax, -0x1a0(%rbp) movq -0x18(%rbp), %rax movq %rax, -0x198(%rbp) leaq 0x48db(%rip), %rdi # 0x100fc movl $0xe, %esi callq 0xa950 movq -0x240(%rbp), %rdi movq %rax, -0x1b0(%rbp) movq %rdx, -0x1a8(%rbp) movq -0x1a0(%rbp), %rsi movq -0x198(%rbp), %rdx movq -0x1b0(%rbp), %rcx movq -0x1a8(%rbp), %r8 callq 0xacf0 jmp 0xb8f3 movq -0x30(%rbp), %rax movq %rax, -0x248(%rbp) movq -0x20(%rbp), %rax movq %rax, -0x1c0(%rbp) movq -0x18(%rbp), %rax movq %rax, -0x1b8(%rbp) movq -0x158(%rbp), %rdi movq -0x120(%rbp), %rax movq %rax, -0x1e0(%rbp) movq -0x118(%rbp), %rax movq %rax, -0x1d8(%rbp) movq -0x1e0(%rbp), %rsi movq -0x1d8(%rbp), %rdx movq (%rdi), %rax callq *(%rax) movq -0x248(%rbp), %rdi movq %rax, -0x1d0(%rbp) movq %rdx, -0x1c8(%rbp) movq -0x1c0(%rbp), %rsi movq -0x1b8(%rbp), %rdx movq -0x1d0(%rbp), %rcx movq -0x1c8(%rbp), %r8 callq 0xacf0 jmp 0xb8f5 jmp 0xb95f movq -0x30(%rbp), %rax movq %rax, -0x250(%rbp) movq -0x20(%rbp), %rax movq %rax, -0x1f0(%rbp) movq -0x18(%rbp), %rax movq %rax, -0x1e8(%rbp) leaq 0x47ec(%rip), %rdi # 0x1010b movl $0x19, %esi callq 0xa950 movq -0x250(%rbp), %rdi movq %rax, -0x200(%rbp) movq %rdx, -0x1f8(%rbp) movq -0x1f0(%rbp), %rsi movq -0x1e8(%rbp), %rdx movq -0x200(%rbp), %rcx movq -0x1f8(%rbp), %r8 callq 0xacf0 jmp 0xb961 jmp 0xb963 addq $0x250, %rsp # imm = 0x250 popq %rbp retq movq -0xa8(%rbp), %rdi callq 0xa700 nopl (%rax,%rax)
/JAJames[P]Jupiter-Bot/src/Plugins/CoreCommands/CoreCommands.cpp
RehashGenericCommand::trigger(std::basic_string_view<char, std::char_traits<char>>)
Jupiter::GenericCommand::ResponseLine *RehashGenericCommand::trigger(std::string_view parameters) { size_t hash_errors = Jupiter::rehash(); if (hash_errors == 0) return new Jupiter::GenericCommand::ResponseLine(string_printf("All %u objects were successfully rehashed.", Jupiter::getRehashableCount()), GenericCommand::DisplayType::PublicSuccess); return new Jupiter::GenericCommand::ResponseLine(string_printf("%u of %u objects failed to successfully rehash.", hash_errors, Jupiter::getRehashableCount()), GenericCommand::DisplayType::PublicError); }
pushq %rbp movq %rsp, %rbp subq $0xb0, %rsp movq %rsi, -0x18(%rbp) movq %rdx, -0x10(%rbp) movq %rdi, -0x20(%rbp) callq 0xa960 movq %rax, -0x28(%rbp) cmpq $0x0, -0x28(%rbp) jne 0xbe6b movl $0x30, %edi callq 0xa170 movq %rax, -0x90(%rbp) movb $0x1, -0x55(%rbp) callq 0xa550 movq %rax, -0x88(%rbp) jmp 0xbdde movq -0x88(%rbp), %rdx leaq 0x43b3(%rip), %rsi # 0x1019f xorl %eax, %eax leaq -0x48(%rbp), %rdi callq 0xaaa0 jmp 0xbdf9 movq -0x90(%rbp), %rdi leaq -0x48(%rbp), %rsi xorl %edx, %edx callq 0xa9c0 jmp 0xbe0d movq -0x90(%rbp), %rax movb $0x0, -0x55(%rbp) movq %rax, -0x8(%rbp) leaq -0x48(%rbp), %rdi callq 0xa4c0 jmp 0xbf2a movq %rax, %rcx movl %edx, %eax movq %rcx, -0x50(%rbp) movl %eax, -0x54(%rbp) jmp 0xbe4d movq %rax, %rcx movl %edx, %eax movq %rcx, -0x50(%rbp) movl %eax, -0x54(%rbp) leaq -0x48(%rbp), %rdi callq 0xa4c0 testb $0x1, -0x55(%rbp) jne 0xbe55 jmp 0xbe66 movq -0x90(%rbp), %rdi movl $0x30, %esi callq 0xa120 jmp 0xbf37 movl $0x30, %edi callq 0xa170 movq %rax, -0xa8(%rbp) movb $0x1, -0x79(%rbp) movq -0x28(%rbp), %rax movq %rax, -0xa0(%rbp) callq 0xa550 movq %rax, -0x98(%rbp) jmp 0xbe99 movq -0x98(%rbp), %rcx movq -0xa0(%rbp), %rdx leaq 0x431c(%rip), %rsi # 0x101ca xorl %eax, %eax leaq -0x78(%rbp), %rdi callq 0xaaa0 jmp 0xbebb movq -0xa8(%rbp), %rdi leaq -0x78(%rbp), %rsi movl $0x2, %edx callq 0xa9c0 jmp 0xbed2 movq -0xa8(%rbp), %rax movb $0x0, -0x79(%rbp) movq %rax, -0x8(%rbp) leaq -0x78(%rbp), %rdi callq 0xa4c0 jmp 0xbf2a movq %rax, %rcx movl %edx, %eax movq %rcx, -0x50(%rbp) movl %eax, -0x54(%rbp) jmp 0xbf0f movq %rax, %rcx movl %edx, %eax movq %rcx, -0x50(%rbp) movl %eax, -0x54(%rbp) leaq -0x78(%rbp), %rdi callq 0xa4c0 testb $0x1, -0x79(%rbp) jne 0xbf17 jmp 0xbf28 movq -0xa8(%rbp), %rdi movl $0x30, %esi callq 0xa120 jmp 0xbf37 movq -0x8(%rbp), %rax addq $0xb0, %rsp popq %rbp retq movq -0x50(%rbp), %rdi callq 0xa700
/JAJames[P]Jupiter-Bot/src/Plugins/CoreCommands/CoreCommands.cpp
Generic_Command_As_Console_Command<VersionGenericCommand>::Generic_Command_As_Console_Command()
Generic_Command_As_Console_Command<T>::Generic_Command_As_Console_Command() : ConsoleCommand() { size_t index = 0; while (index != T::instance.getTriggerCount()) { this->addTrigger(T::instance.getTrigger(index++)); } }
pushq %rbp movq %rsp, %rbp subq $0x60, %rsp movq %rdi, -0x8(%rbp) movq -0x8(%rbp), %rdi movq %rdi, -0x38(%rbp) callq 0xac30 movq -0x38(%rbp), %rax movq 0x88ec(%rip), %rcx # 0x14fb0 addq $0x10, %rcx movq %rcx, (%rax) movq $0x0, -0x10(%rbp) movq -0x10(%rbp), %rax movq %rax, -0x48(%rbp) movq 0x8916(%rip), %rdi # 0x14ff8 callq 0xadc0 movq %rax, -0x40(%rbp) jmp 0xc6ed movq -0x48(%rbp), %rax movq -0x40(%rbp), %rcx cmpq %rcx, %rax je 0xc75a movq -0x10(%rbp), %rsi movq %rsi, %rax incq %rax movq %rax, -0x10(%rbp) movq 0x88e9(%rip), %rdi # 0x14ff8 callq 0xa850 movq %rdx, -0x58(%rbp) movq %rax, -0x50(%rbp) jmp 0xc71e movq -0x38(%rbp), %rdi movq -0x58(%rbp), %rax movq -0x50(%rbp), %rcx movq %rcx, -0x30(%rbp) movq %rax, -0x28(%rbp) movq -0x30(%rbp), %rsi movq -0x28(%rbp), %rdx callq 0xacb0 jmp 0xc741 jmp 0xc6d3 movq -0x38(%rbp), %rdi movq %rax, %rcx movl %edx, %eax movq %rcx, -0x18(%rbp) movl %eax, -0x1c(%rbp) callq 0xa300 jmp 0xc760 addq $0x60, %rsp popq %rbp retq movq -0x18(%rbp), %rdi callq 0xa700 nopl (%rax)
/JAJames[P]Jupiter-Bot/src/Bot/src/../include/Console_Command.h
auto jessilib::word_split_once<std::basic_string_view<char, std::char_traits<char>>, char const*, char const*, char>(char const*, char const*, char)
constexpr auto word_split_once(ItrT begin, EndT end, ElementT in_whitespace) { static_assert(sizeof...(OptionalMemberT) <= 1, "Too many member types specified for OptionalMemberT"); using MemberT = std::tuple_element_t<0, std::tuple<OptionalMemberT..., std::basic_string<ElementT>>>; std::pair<MemberT, MemberT> result; if (begin >= end) { // Nothing to word_split return result; } while (begin != end && *begin == in_whitespace) { ++begin; } for (auto itr = begin; itr != end; ++itr) { if (*itr == in_whitespace) { // in_whitespace found; word_split upon it result.first = make_word_split_member<MemberT>(begin, itr); ++itr; while (itr != end && *itr == in_whitespace) { ++itr; } result.second = make_word_split_member<MemberT>(itr, end); return result; } } // in_whitespace not found result.first = make_word_split_member<MemberT>(begin, end); return result; }
pushq %rbp movq %rsp, %rbp subq $0x90, %rsp movq %rdi, -0x80(%rbp) movb %cl, %al movq %rdi, -0x78(%rbp) movq %rsi, -0x8(%rbp) movq %rdx, -0x10(%rbp) movb %al, -0x11(%rbp) leaq -0x38(%rbp), %rdi callq 0xa350 movq -0x8(%rbp), %rax cmpq -0x10(%rbp), %rax jb 0xcf2b movq -0x80(%rbp), %rax movq -0x38(%rbp), %rcx movq %rcx, (%rax) movq -0x30(%rbp), %rcx movq %rcx, 0x8(%rax) movq -0x28(%rbp), %rcx movq %rcx, 0x10(%rax) movq -0x20(%rbp), %rcx movq %rcx, 0x18(%rax) jmp 0xd0b6 jmp 0xcf2d movq -0x8(%rbp), %rcx xorl %eax, %eax cmpq -0x10(%rbp), %rcx movb %al, -0x81(%rbp) je 0xcf55 movq -0x8(%rbp), %rax movsbl (%rax), %eax movsbl -0x11(%rbp), %ecx cmpl %ecx, %eax sete %al movb %al, -0x81(%rbp) movb -0x81(%rbp), %al testb $0x1, %al jne 0xcf61 jmp 0xcf6f movq -0x8(%rbp), %rax addq $0x1, %rax movq %rax, -0x8(%rbp) jmp 0xcf2d movq -0x8(%rbp), %rax movq %rax, -0x40(%rbp) movq -0x40(%rbp), %rax cmpq -0x10(%rbp), %rax je 0xd06b movq -0x40(%rbp), %rax movsbl (%rax), %eax movsbl -0x11(%rbp), %ecx cmpl %ecx, %eax jne 0xd058 movq -0x8(%rbp), %rdi movq -0x40(%rbp), %rsi callq 0xa140 movq %rax, -0x50(%rbp) movq %rdx, -0x48(%rbp) movq -0x50(%rbp), %rax movq %rax, -0x38(%rbp) movq -0x48(%rbp), %rax movq %rax, -0x30(%rbp) movq -0x40(%rbp), %rax addq $0x1, %rax movq %rax, -0x40(%rbp) movq -0x40(%rbp), %rcx xorl %eax, %eax cmpq -0x10(%rbp), %rcx movb %al, -0x82(%rbp) je 0xcff1 movq -0x40(%rbp), %rax movsbl (%rax), %eax movsbl -0x11(%rbp), %ecx cmpl %ecx, %eax sete %al movb %al, -0x82(%rbp) movb -0x82(%rbp), %al testb $0x1, %al jne 0xcffd jmp 0xd00b movq -0x40(%rbp), %rax addq $0x1, %rax movq %rax, -0x40(%rbp) jmp 0xcfc9 movq -0x40(%rbp), %rdi movq -0x10(%rbp), %rsi callq 0xa140 movq %rax, %rcx movq -0x80(%rbp), %rax movq %rcx, -0x60(%rbp) movq %rdx, -0x58(%rbp) movq -0x60(%rbp), %rcx movq %rcx, -0x28(%rbp) movq -0x58(%rbp), %rcx movq %rcx, -0x20(%rbp) movq -0x38(%rbp), %rcx movq %rcx, (%rax) movq -0x30(%rbp), %rcx movq %rcx, 0x8(%rax) movq -0x28(%rbp), %rcx movq %rcx, 0x10(%rax) movq -0x20(%rbp), %rcx movq %rcx, 0x18(%rax) jmp 0xd0b6 jmp 0xd05a movq -0x40(%rbp), %rax addq $0x1, %rax movq %rax, -0x40(%rbp) jmp 0xcf77 movq -0x8(%rbp), %rdi movq -0x10(%rbp), %rsi callq 0xa140 movq %rax, %rcx movq -0x80(%rbp), %rax movq %rcx, -0x70(%rbp) movq %rdx, -0x68(%rbp) movq -0x70(%rbp), %rcx movq %rcx, -0x38(%rbp) movq -0x68(%rbp), %rcx movq %rcx, -0x30(%rbp) movq -0x38(%rbp), %rcx movq %rcx, (%rax) movq -0x30(%rbp), %rcx movq %rcx, 0x8(%rax) movq -0x28(%rbp), %rcx movq %rcx, 0x10(%rax) movq -0x20(%rbp), %rcx movq %rcx, 0x18(%rax) movq -0x78(%rbp), %rax addq $0x90, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
/JAJames[P]Jupiter-Bot/src/Jupiter/src/jessilib/src/common/../include/jessilib/word_split.hpp
Generic_Command_As_Console_Command<RehashGenericCommand>::trigger(std::basic_string_view<char, std::char_traits<char>>)
void Generic_Command_As_Console_Command<T>::trigger(std::string_view parameters) { std::unique_ptr<Jupiter::GenericCommand::ResponseLine> response_line{ T::instance.trigger(parameters) }; while (response_line != nullptr) { auto& out_stream = response_line->type == Jupiter::GenericCommand::DisplayType::PublicError || response_line->type == Jupiter::GenericCommand::DisplayType::PrivateError ? std::cerr : std::cout; out_stream << response_line->response << std::endl; response_line.reset(response_line->next); } }
pushq %rbp movq %rsp, %rbp subq $0x60, %rsp movq %rsi, -0x10(%rbp) movq %rdx, -0x8(%rbp) movq %rdi, -0x18(%rbp) movq -0x10(%rbp), %rax movq %rax, -0x30(%rbp) movq -0x8(%rbp), %rax movq %rax, -0x28(%rbp) movq -0x30(%rbp), %rsi movq -0x28(%rbp), %rdx movq 0x5ad5(%rip), %rax # 0x14f38 movq (%rax), %rax movq 0x5acb(%rip), %rdi # 0x14f38 callq *0x18(%rax) movq %rax, %rsi leaq -0x20(%rbp), %rdi callq 0xaa20 leaq -0x20(%rbp), %rdi xorl %eax, %eax movl %eax, %esi callq 0xad80 xorb $-0x1, %al testb $0x1, %al jne 0xf494 jmp 0xf53b leaq -0x20(%rbp), %rdi callq 0xac10 cmpl $0x2, 0x20(%rax) je 0xf4b2 leaq -0x20(%rbp), %rdi callq 0xac10 cmpl $0x3, 0x20(%rax) jne 0xf4bf movq 0x5b27(%rip), %rax # 0x14fe0 movq %rax, -0x50(%rbp) jmp 0xf4cc movq 0x5a6a(%rip), %rax # 0x14f30 movq %rax, -0x50(%rbp) jmp 0xf4cc movq -0x50(%rbp), %rax movq %rax, -0x38(%rbp) movq -0x38(%rbp), %rax movq %rax, -0x60(%rbp) leaq -0x20(%rbp), %rdi callq 0xac10 movq -0x60(%rbp), %rdi movq %rax, %rsi callq 0xa360 movq %rax, -0x58(%rbp) jmp 0xf4f7 movq -0x58(%rbp), %rdi movq 0x5ace(%rip), %rsi # 0x14fd0 callq 0xab70 jmp 0xf509 leaq -0x20(%rbp), %rdi callq 0xac10 movq 0x28(%rax), %rsi leaq -0x20(%rbp), %rdi callq 0xa680 jmp 0xf47c movq %rax, %rcx movl %edx, %eax movq %rcx, -0x40(%rbp) movl %eax, -0x44(%rbp) leaq -0x20(%rbp), %rdi callq 0xaa10 jmp 0xf54a leaq -0x20(%rbp), %rdi callq 0xaa10 addq $0x60, %rsp popq %rbp retq movq -0x40(%rbp), %rdi callq 0xa700
/JAJames[P]Jupiter-Bot/src/Bot/src/../include/Console_Command.h
bsplib::A2A::send(int, void const*, unsigned long)
void * A2A::send( int dst_pid, const void * data, std::size_t size ) { assert( dst_pid >= 0 ); assert( dst_pid < m_nprocs ); assert( m_send_cap == m_send_bufs.size() / m_nprocs ); std::size_t offset = m_send_sizes[ dst_pid ]; if ( m_send_cap < offset + size ) { std::size_t new_cap = std::max( 2 * m_send_cap , offset + size ); m_send_bufs.resize( m_nprocs * new_cap ); for ( int p = m_nprocs; p > 0; --p ) { std::size_t displ = new_cap - m_send_cap; for ( size_t i = p*new_cap; i > (p-1)*new_cap; --i) { m_send_bufs[i-1] = m_send_bufs[i-displ*(p-1)-1]; } } m_send_cap = new_cap; } m_send_sizes[ dst_pid ] += size; void * send_buf = m_send_bufs.data() + dst_pid * m_send_cap + offset; std::memcpy( send_buf , data, size ); return send_buf; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax testl %esi, %esi js 0xa29f movq %rcx, %rbx movl %esi, %ecx movq %rdi, %r15 movl 0x8(%rdi), %esi cmpl %ecx, %esi jle 0xa2be movq %rdx, %r8 movq 0x20(%r15), %r14 movq 0xa8(%r15), %rdi movq 0xb0(%r15), %rax subq %rdi, %rax xorl %edx, %edx divq %rsi cmpq %rax, %r14 jne 0xa2dd movl %ecx, %r13d movq 0x78(%r15), %rcx movq (%rcx,%r13,8), %rbp leaq (%rbx,%rbp), %rax cmpq %rax, %r14 jae 0xa277 movq %r8, (%rsp) leaq 0xa8(%r15), %r12 addq %r14, %r14 cmpq %rax, %r14 cmovbeq %rax, %r14 imulq %r14, %rsi movq %r12, %rdi callq 0xacde movslq 0x8(%r15), %rax testq %rax, %rax jle 0xa25d leaq -0x1(%rax), %rcx movq %rax, %rdx movq %r14, %rsi imulq %rax, %rsi decq %rax movq %r14, %rdi imulq %rax, %rdi cmpq %rdi, %rsi jbe 0xa254 movq 0x20(%r15), %r8 subq %r14, %r8 imulq %rcx, %r8 movq (%r12), %r9 leaq (%r9,%rsi), %r10 movb -0x1(%r8,%r10), %r10b movb %r10b, -0x1(%r9,%rsi) decq %rsi cmpq %rdi, %rsi ja 0xa23a decq %rcx cmpq $0x1, %rdx jg 0xa216 movq %r14, 0x20(%r15) movq 0x78(%r15), %rcx movq 0xa8(%r15), %rdi movq (%rcx,%r13,8), %rax addq %rbx, %rax movq (%rsp), %r8 movq %rax, (%rcx,%r13,8) imulq 0x20(%r15), %r13 addq %rbp, %rdi addq %r13, %rdi movq %r8, %rsi movq %rbx, %rdx addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0x6250 leaq 0x92fa(%rip), %rdi # 0x135a0 leaq 0x9300(%rip), %rsi # 0x135ad leaq 0x9358(%rip), %rcx # 0x1360c movl $0x47, %edx callq 0x61c0 leaq 0x937f(%rip), %rdi # 0x13644 leaq 0x92e1(%rip), %rsi # 0x135ad leaq 0x9339(%rip), %rcx # 0x1360c movl $0x48, %edx callq 0x61c0 leaq 0x9373(%rip), %rdi # 0x13657 leaq 0x92c2(%rip), %rsi # 0x135ad leaq 0x931a(%rip), %rcx # 0x1360c movl $0x4a, %edx callq 0x61c0
/wijnand-suijlen[P]bsponmpi/src/a2a.cc
bsplib::A2A::exchange()
void A2A::exchange( ) { std::size_t max_recv = 0, max_bruck_vol = 0; std::size_t new_cap = m_send_cap; { #ifdef PROFILE TicToc t( TicToc::MPI_META_A2A, 4*sizeof(std::size_t)*m_nprocs ); #endif // exchange data sizes. MPI_Alltoall( m_send_sizes.data(), sizeof(std::size_t), MPI_BYTE, m_recv_sizes.data(), sizeof(std::size_t), MPI_BYTE, m_comm ); // determine ideal nr of bytes to send over MPI_Alltoall m_lincost.reset( m_nprocs ); for ( int i = 0; i < m_nprocs; ++i ) if ( m_send_sizes[i] > 0 ) m_lincost.send( m_send_sizes[i]); std::size_t pref_bruck_vol = m_lincost.get_bruck_vol(); m_lincost.reset( m_nprocs ); for ( int i = 0; i < m_nprocs; ++i ) if ( m_recv_sizes[i] > 0 ) m_lincost.send( m_recv_sizes[i]); // Determine max communication sizes pref_bruck_vol = std::max( pref_bruck_vol, m_lincost.get_bruck_vol() ); std::size_t max_send = *std::max_element( m_send_sizes.begin(), m_send_sizes.begin() + m_nprocs ); std::size_t global_comm_send[3] = { m_send_cap, max_send, pref_bruck_vol }; std::size_t global_comm_recv[3]; MPI_Allreduce( global_comm_send, global_comm_recv, 3, MY_MPI_SIZE_T, MPI_MAX, m_comm ); new_cap = global_comm_recv[0]; max_recv = global_comm_recv[1]; max_bruck_vol = global_comm_recv[2]; } // Ensure correct size memory of recv buffers if ( m_method == RMA && new_cap != m_recv_cap && m_recv_win != MPI_WIN_NULL ) MPI_Win_free( &m_recv_win ); m_recv_bufs.resize( new_cap * m_nprocs ); if ( m_method == RMA && new_cap != m_recv_cap ) { MPI_Win_create( m_recv_bufs.data(), new_cap * m_nprocs, 1, MPI_INFO_NULL, m_comm, &m_recv_win ); } m_recv_cap = new_cap; assert( m_recv_cap >= max_recv ); if ( max_recv == 0 ) { /* no need to do anything */ clear(); } else { std::size_t sm = std::min( max_bruck_vol, m_small_a2a_size_per_proc ) ; { #ifdef PROFILE TicToc t( TicToc::MPI_SMALL_A2A ); #endif for (int p = 0; p < m_nprocs; ++p ) { std::size_t size = std::min( sm, m_send_sizes[p]); memcpy( m_small_send_buf.data() + p * sm, m_send_bufs.data() + p * m_send_cap, size); #ifdef PROFILE t.add_bytes( sm ); #endif } // In small exchanges, Bruck's algorithm will be used again MPI_Alltoall( m_small_send_buf.data(), int(sm), MPI_BYTE, m_small_recv_buf.data(), int(sm), MPI_BYTE, m_comm ); for (int p = 0; p < m_nprocs; ++p ) { std::size_t size = std::min( sm, m_recv_sizes[p]); memcpy( m_recv_bufs.data() + p * m_recv_cap, m_small_recv_buf.data() + p * sm, size ); } } // end plain all-to-all // start normal message exchange if ( m_method == RMA) { #ifdef PROFILE TicToc t( TicToc::MPI_PUT ); #endif assert( m_recv_cap > 0 ); assert( m_recv_win != MPI_WIN_NULL ); MPI_Win_fence( 0, m_recv_win ); for (int p = 0; p < m_nprocs; ++p ) { std::size_t o = std::min( sm, m_send_sizes[p] ); std::size_t size = m_send_sizes[p] - o; std::size_t o1 = m_send_cap * p + o; std::size_t o2 = m_recv_cap * m_pid + o; #ifdef PROFILE t.add_bytes( size ); #endif while ( size > 0 ) { std::size_t s = std::min( m_max_msg_size, size ); //size_t s = size; MPI_Put( m_send_bufs.data() + o1, int(s), MPI_BYTE, p, o2, int(s), MPI_BYTE, m_recv_win ); size -= s; o1 += s; o2 += s; } } MPI_Win_fence( 0, m_recv_win ); } else if ( m_method == MSG ) { #ifdef PROFILE TicToc tr( TicToc::MPI_LARGE_RECV ); TicToc ts( TicToc::MPI_LARGE_SEND ); #endif for ( int p = 0 ; p < m_nprocs; ++p ) { std::size_t so = std::min( sm, m_send_sizes[p] ); std::size_t ro = std::min( sm, m_recv_sizes[p] ); m_send_sizes[p] -= so; m_send_pos[p] = p * m_send_cap + so; m_recv_sizes[p] -= ro; m_recv_pos[p] = p * m_recv_cap + ro; } // Do a personalized exchange int outcount = MPI_UNDEFINED; bool first_time = true; do { for (int p = 0; p < m_nprocs; ++p ) { if (m_reqs[p] != MPI_REQUEST_NULL) continue; std::size_t recv_size = std::min( m_max_msg_size, m_recv_sizes[p] ); #ifdef PROFILE tr.add_bytes( recv_size ); #endif int tag = 0; if (recv_size > 0 ) MPI_Irecv( m_recv_bufs.data() + m_recv_pos[p], int( recv_size ), MPI_BYTE, p, tag, m_comm, & m_reqs[p] ); m_recv_sizes[p] -= recv_size; m_recv_pos[p] += recv_size; } if (first_time) MPI_Barrier( m_comm ); // Using the barrier the first time // allows to use ready sends for (int p = 0; p < m_nprocs; ++p ) { if (m_reqs[m_nprocs + p] != MPI_REQUEST_NULL) continue; std::size_t send_size = std::min( m_max_msg_size, m_send_sizes[p] ); #ifdef PROFILE ts.add_bytes( send_size ); #endif int tag = 0; if (send_size > 0 ) { if (first_time) MPI_Irsend( m_send_bufs.data() + m_send_pos[p], int( send_size ), MPI_BYTE, p, tag, m_comm, & m_reqs[m_nprocs + p ] ); else MPI_Isend( m_send_bufs.data() + m_send_pos[p], int( send_size ), MPI_BYTE, p, tag, m_comm, & m_reqs[m_nprocs + p ] ); } m_send_sizes[p] -= send_size; m_send_pos[p] += send_size; } first_time = false; MPI_Waitsome( int( m_reqs.size() ), m_reqs.data(), &outcount, m_ready.data(), MPI_STATUSES_IGNORE ); } while (outcount != MPI_UNDEFINED ); for (int p = 0 ; p < m_nprocs; ++p ) { assert( m_recv_sizes[p] == 0 ); assert( m_send_sizes[p] == 0 ); assert( m_reqs[p] == MPI_REQUEST_NULL ); assert( m_reqs[m_nprocs+p] == MPI_REQUEST_NULL ); m_recv_sizes[p] = m_recv_pos[p] - p * m_recv_cap; } } // end of plain message exchange } // end of else m_send_cap = m_recv_cap; m_send_bufs.resize( m_nprocs * m_send_cap ); for (int p = 0 ; p < m_nprocs; ++p ) { m_send_sizes[p] = 0; m_send_pos[p] = 0; m_recv_pos[p] = 0; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x78, %rsp movq %rdi, %rbx movslq 0x8(%rdi), %r15 shlq $0x5, %r15 movl $0x8, %edi callq 0x97be movl %eax, 0x10(%rsp) leaq 0x30(%rsp), %r14 movl $0x1, %edi movq %r14, %rsi callq 0x6290 movaps (%r14), %xmm0 movups %xmm0, 0x18(%rsp) movq %r15, 0x28(%rsp) movq 0x78(%rbx), %rdi movq 0xc0(%rbx), %rcx movq 0x138(%rbx), %rax movq %rax, (%rsp) movq 0xebbe(%rip), %rdx # 0x18f60 movl $0x8, %esi movl $0x8, %r8d movq %rdx, %r9 callq 0x6150 leaq 0x30(%rbx), %r14 movl 0x8(%rbx), %esi movq %r14, %rdi callq 0x1078c movl 0x8(%rbx), %eax testl %eax, %eax jle 0xa419 leaq 0x48(%rbx), %r15 xorl %r13d, %r13d leaq 0x30(%rsp), %r12 movq 0x78(%rbx), %rcx movq (%rcx,%r13,8), %rcx testq %rcx, %rcx je 0xa40e movq %rcx, 0x30(%rsp) movq 0x50(%rbx), %rsi cmpq 0x58(%rbx), %rsi je 0xa400 movq %rcx, (%rsi) addq $0x8, %rsi movq %rsi, 0x50(%rbx) jmp 0xa40e movq %r15, %rdi movq %r12, %rdx callq 0xada2 movl 0x8(%rbx), %eax incq %r13 movslq %eax, %rcx cmpq %rcx, %r13 jl 0xa3d7 movq %r14, %rdi callq 0x10814 movq %rax, %r15 movl 0x8(%rbx), %esi movq %r14, %rdi callq 0x1078c movl 0x8(%rbx), %eax testl %eax, %eax jle 0xa486 leaq 0x48(%rbx), %r12 xorl %ebp, %ebp leaq 0x30(%rsp), %r13 movq 0xc0(%rbx), %rcx movq (%rcx,%rbp,8), %rcx testq %rcx, %rcx je 0xa47b movq %rcx, 0x30(%rsp) movq 0x50(%rbx), %rsi cmpq 0x58(%rbx), %rsi je 0xa46d movq %rcx, (%rsi) addq $0x8, %rsi movq %rsi, 0x50(%rbx) jmp 0xa47b movq %r12, %rdi movq %r13, %rdx callq 0xada2 movl 0x8(%rbx), %eax incq %rbp movslq %eax, %rcx cmpq %rcx, %rbp jl 0xa441 movq %r14, %rdi callq 0x10814 cmpq %rax, %r15 cmovaq %r15, %rax movq 0x78(%rbx), %rcx movslq 0x8(%rbx), %rdx cmpq $0x2, %rdx jb 0xa4ca shlq $0x3, %rdx leaq 0x8(%rcx), %rsi movq (%rcx), %rdi addq $-0x8, %rdx movq (%rsi), %r8 cmpq %r8, %rdi cmovbeq %r8, %rdi cmovbq %rsi, %rcx addq $0x8, %rsi addq $-0x8, %rdx jne 0xa4b2 movq (%rcx), %rcx movq 0x20(%rbx), %rdx leaq 0x30(%rsp), %rdi movq %rdx, (%rdi) movq %rcx, 0x8(%rdi) movq %rax, 0x10(%rdi) movq 0x138(%rbx), %r9 movq 0xead9(%rip), %rcx # 0x18fc8 movq 0xea92(%rip), %r8 # 0x18f88 leaq 0x50(%rsp), %rsi movl $0x3, %edx callq 0x6350 movq 0x50(%rsp), %r15 movq 0x58(%rsp), %r14 movq 0x60(%rsp), %rbp leaq 0x10(%rsp), %rdi callq 0x944c cmpl $0x0, (%rbx) jne 0xa545 cmpq 0x28(%rbx), %r15 je 0xa545 movq 0x140(%rbx), %rax cmpq 0xea39(%rip), %rax # 0x18f70 je 0xa545 leaq 0x140(%rbx), %rdi callq 0x6090 leaq 0xf0(%rbx), %rdi movslq 0x8(%rbx), %rsi imulq %r15, %rsi callq 0xacde cmpl $0x0, (%rbx) jne 0xa592 cmpq 0x28(%rbx), %r15 je 0xa592 movslq 0x8(%rbx), %rsi imulq %r15, %rsi movq 0xf0(%rbx), %rdi movq 0x138(%rbx), %r8 leaq 0x140(%rbx), %r9 movq 0xea50(%rip), %rcx # 0x18fd8 movl $0x1, %edx callq 0x6050 movq %r15, 0x28(%rbx) cmpq %r14, %r15 jb 0xac14 testq %r14, %r14 je 0xa9d5 movq 0x18(%rbx), %rax cmpq %rbp, %rax cmovbq %rax, %rbp movl $0x9, %edi callq 0x97be movl %eax, 0x10(%rsp) leaq 0x30(%rsp), %r15 movl $0x1, %edi movq %r15, %rsi callq 0x6290 movaps (%r15), %xmm0 movups %xmm0, 0x18(%rsp) movq $0x0, 0x28(%rsp) cmpl $0x0, 0x8(%rbx) jle 0xa632 xorl %r14d, %r14d xorl %r15d, %r15d movq 0x78(%rbx), %rax movq (%rax,%r15,8), %rdx cmpq %rbp, %rdx cmovaeq %rbp, %rdx movq 0x108(%rbx), %rdi addq %r14, %rdi movq 0x20(%rbx), %rsi imulq %r15, %rsi addq 0xa8(%rbx), %rsi callq 0x6250 addq %rbp, 0x28(%rsp) incq %r15 movslq 0x8(%rbx), %rax addq %rbp, %r14 cmpq %rax, %r15 jl 0xa5f1 movq 0x108(%rbx), %rdi movq 0x120(%rbx), %rcx movq 0x138(%rbx), %rax movq %rax, (%rsp) movq 0xe90e(%rip), %rdx # 0x18f60 movl %ebp, %esi movl %ebp, %r8d movq %rdx, %r9 callq 0x6150 cmpl $0x0, 0x8(%rbx) jle 0xa6aa xorl %r14d, %r14d xorl %r15d, %r15d movq 0xc0(%rbx), %rax movq (%rax,%r15,8), %rdx cmpq %rbp, %rdx cmovaeq %rbp, %rdx movq 0x28(%rbx), %rdi imulq %r15, %rdi addq 0xf0(%rbx), %rdi movq 0x120(%rbx), %rsi addq %r14, %rsi callq 0x6250 incq %r15 movslq 0x8(%rbx), %rax addq %rbp, %r14 cmpq %rax, %r15 jl 0xa66b leaq 0x10(%rsp), %rdi callq 0x944c movl (%rbx), %eax testl %eax, %eax je 0xaa1e cmpl $0x1, %eax jne 0xab39 movl $0xa, %edi callq 0x97be movl %eax, 0x10(%rsp) leaq 0x30(%rsp), %r15 movl $0x1, %edi movq %r15, %rsi callq 0x6290 movaps (%r15), %xmm0 movups %xmm0, 0x18(%rsp) movq $0x0, 0x28(%rsp) movl $0xb, %edi callq 0x97be movl %eax, 0x30(%rsp) leaq 0x50(%rsp), %r15 movl $0x1, %edi movq %r15, %rsi callq 0x6290 movaps (%r15), %xmm0 movups %xmm0, 0x38(%rsp) movq $0x0, 0x48(%rsp) movslq 0x8(%rbx), %rax testq %rax, %rax jle 0xa79a movq 0x78(%rbx), %rcx movq 0x90(%rbx), %rdx movq 0xc0(%rbx), %rsi movq 0xd8(%rbx), %rdi xorl %r8d, %r8d movq (%rcx,%r8,8), %r9 cmpq %rbp, %r9 movq %rbp, %r10 cmovbq %r9, %r10 movq (%rsi,%r8,8), %r11 cmpq %rbp, %r11 cmovaeq %rbp, %r11 subq %r10, %r9 movq %r9, (%rcx,%r8,8) movq 0x20(%rbx), %r9 imulq %r8, %r9 addq %r10, %r9 movq %r9, (%rdx,%r8,8) subq %r11, (%rsi,%r8,8) movq 0x28(%rbx), %r9 imulq %r8, %r9 addq %r11, %r9 movq %r9, (%rdi,%r8,8) incq %r8 cmpq %r8, %rax jne 0xa750 leaq 0x50(%rsp), %rax movl $0xffff8002, (%rax) # imm = 0xFFFF8002 movb $0x1, %r14b movq 0xe821(%rip), %rbp # 0x18fd0 movl 0x8(%rbx), %eax testl %eax, %eax jle 0xa84e xorl %r15d, %r15d xorl %r12d, %r12d movq 0x148(%rbx), %rcx cmpq %rbp, (%rcx,%r15) jne 0xa83b movq 0x10(%rbx), %r13 movq 0xc0(%rbx), %rsi movq (%rsi,%r15), %rdx cmpq %r13, %rdx cmovbq %rdx, %r13 addq %r13, 0x28(%rsp) movq 0xd8(%rbx), %rdx testq %r13, %r13 je 0xa833 addq %r15, %rcx movq 0xf0(%rbx), %rdi movq 0x138(%rbx), %r9 addq (%rdx,%r15), %rdi movq %rcx, (%rsp) movl %r13d, %esi movq 0xe749(%rip), %rdx # 0x18f60 movl %r12d, %ecx xorl %r8d, %r8d callq 0x63d0 movq 0xc0(%rbx), %rsi movq 0xd8(%rbx), %rdx movl 0x8(%rbx), %eax subq %r13, (%rsi,%r15) addq %r13, (%rdx,%r15) incq %r12 movslq %eax, %rcx addq $0x8, %r15 cmpq %rcx, %r12 jl 0xa7c0 testb $0x1, %r14b je 0xa863 movq 0x138(%rbx), %rdi callq 0x6210 movl 0x8(%rbx), %eax testl %eax, %eax jle 0xa91e xorl %r12d, %r12d movslq %eax, %rcx addq %r12, %rcx movq 0x148(%rbx), %rdx cmpq %rbp, (%rdx,%rcx,8) jne 0xa90f movq 0x10(%rbx), %r13 movq 0x78(%rbx), %rax movq (%rax,%r12,8), %rax cmpq %r13, %rax cmovbq %rax, %r13 addq %r13, 0x48(%rsp) testq %r13, %r13 je 0xa8f9 leaq (%rdx,%rcx,8), %rax movq 0x90(%rbx), %rcx movq 0xa8(%rbx), %rdi addq (%rcx,%r12,8), %rdi movq 0x138(%rbx), %r9 testb $0x1, %r14b je 0xa8e0 movq %rax, (%rsp) movl %r13d, %esi movq 0xe68d(%rip), %rdx # 0x18f60 movl %r12d, %ecx xorl %r8d, %r8d callq 0x6070 jmp 0xa8f9 movq %rax, (%rsp) movl %r13d, %esi movq 0xe672(%rip), %rdx # 0x18f60 movl %r12d, %ecx xorl %r8d, %r8d callq 0x6100 movq 0x78(%rbx), %rax subq %r13, (%rax,%r12,8) movq 0x90(%rbx), %rax addq %r13, (%rax,%r12,8) movl 0x8(%rbx), %eax incq %r12 movslq %eax, %rcx cmpq %rcx, %r12 jl 0xa86e movq 0x148(%rbx), %rsi movq 0x150(%rbx), %rdi subq %rsi, %rdi shrq $0x3, %rdi movq 0x160(%rbx), %rcx leaq 0x50(%rsp), %rdx xorl %r8d, %r8d callq 0x62a0 xorl %r14d, %r14d cmpl $0xffff8002, 0x50(%rsp) # imm = 0xFFFF8002 jne 0xa7af movslq 0x8(%rbx), %rax testq %rax, %rax jle 0xa9c6 movq 0x78(%rbx), %rcx movq 0xc0(%rbx), %rdx movq 0x148(%rbx), %rsi movq 0xd8(%rbx), %rdi leaq (%rsi,%rax,8), %r8 xorl %r9d, %r9d cmpq $0x0, (%rdx,%r9,8) jne 0xabf5 cmpq $0x0, (%rcx,%r9,8) jne 0xabd6 cmpq %rbp, (%rsi,%r9,8) jne 0xabb7 cmpq %rbp, (%r8,%r9,8) jne 0xab98 movq (%rdi,%r9,8), %r10 movq 0x28(%rbx), %r11 imulq %r9, %r11 subq %r11, %r10 movq %r10, (%rdx,%r9,8) incq %r9 cmpq %r9, %rax jne 0xa981 leaq 0x30(%rsp), %rdi callq 0x944c jmp 0xab2f movslq 0x8(%rbx), %rax testq %rax, %rax jle 0xab39 movq 0xc0(%rbx), %rcx movq 0x78(%rbx), %rdx movq 0x90(%rbx), %rsi movq 0xd8(%rbx), %rdi xorl %r8d, %r8d xorl %r9d, %r9d movq %r8, (%rcx,%r9,8) movq %r8, (%rdx,%r9,8) movq %r8, (%rdi,%r9,8) movq %r8, (%rsi,%r9,8) incq %r9 cmpq %r9, %rax jne 0xaa01 jmp 0xab39 movl $0xc, %edi callq 0x97be movl %eax, 0x10(%rsp) leaq 0x30(%rsp), %r12 movl $0x1, %edi movq %r12, %rsi callq 0x6290 movaps (%r12), %xmm0 movups %xmm0, 0x18(%rsp) movq $0x0, 0x28(%rsp) cmpq $0x0, 0x28(%rbx) je 0xac33 movq 0x140(%rbx), %rsi cmpq 0xe506(%rip), %rsi # 0x18f70 je 0xac52 xorl %edi, %edi callq 0x64f0 movl 0x8(%rbx), %eax testl %eax, %eax jle 0xab21 xorl %r12d, %r12d movq %rbp, 0x70(%rsp) movq 0x78(%rbx), %rcx movq (%rcx,%r12,8), %r14 cmpq %rbp, %r14 movq %rbp, %rcx cmovbq %r14, %rcx subq %rcx, %r14 movq 0x20(%rbx), %r15 movq 0x28(%rbx), %r13 movslq 0x4(%rbx), %rdx addq %r14, 0x28(%rsp) testq %r14, %r14 je 0xab12 imulq %rdx, %r13 addq %rcx, %r13 imulq %r12, %r15 addq %rcx, %r15 movq 0x10(%rbx), %rbp movq 0x140(%rbx), %rax cmpq %rbp, %r14 cmovbq %r14, %rbp movq 0xa8(%rbx), %rdi addq %r15, %rdi movq %rax, 0x8(%rsp) movq 0xe475(%rip), %rdx # 0x18f60 movq %rdx, (%rsp) movl %ebp, %esi movl %r12d, %ecx movq %r13, %r8 movl %ebp, %r9d callq 0x6370 addq %rbp, %r15 addq %rbp, %r13 subq %rbp, %r14 jne 0xaac3 movl 0x8(%rbx), %eax movq 0x70(%rsp), %rbp incq %r12 movslq %eax, %rcx cmpq %rcx, %r12 jl 0xaa8a movq 0x140(%rbx), %rsi xorl %edi, %edi callq 0x64f0 leaq 0x10(%rsp), %rdi callq 0x944c movq 0x28(%rbx), %rax movq %rax, 0x20(%rbx) leaq 0xa8(%rbx), %rdi movslq 0x8(%rbx), %rsi imulq %rax, %rsi callq 0xacde movslq 0x8(%rbx), %rax testq %rax, %rax jle 0xab89 movq 0x78(%rbx), %rcx movq 0x90(%rbx), %rdx movq 0xd8(%rbx), %rsi xorl %edi, %edi xorl %r8d, %r8d movq %rdi, (%rcx,%r8,8) movq %rdi, (%rdx,%r8,8) movq %rdi, (%rsi,%r8,8) incq %r8 cmpq %r8, %rax jne 0xab75 addq $0x78, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0x8b8a(%rip), %rdi # 0x13729 leaq 0x8a07(%rip), %rsi # 0x135ad leaq 0x8aed(%rip), %rcx # 0x1369a movl $0x137, %edx # imm = 0x137 callq 0x61c0 leaq 0x8b4d(%rip), %rdi # 0x1370b leaq 0x89e8(%rip), %rsi # 0x135ad leaq 0x8ace(%rip), %rcx # 0x1369a movl $0x136, %edx # imm = 0x136 callq 0x61c0 leaq 0x8b19(%rip), %rdi # 0x136f6 leaq 0x89c9(%rip), %rsi # 0x135ad leaq 0x8aaf(%rip), %rcx # 0x1369a movl $0x135, %edx # imm = 0x135 callq 0x61c0 leaq 0x8ae5(%rip), %rdi # 0x136e1 leaq 0x89aa(%rip), %rsi # 0x135ad leaq 0x8a90(%rip), %rcx # 0x1369a movl $0x134, %edx # imm = 0x134 callq 0x61c0 leaq 0x8a68(%rip), %rdi # 0x13683 leaq 0x898b(%rip), %rsi # 0x135ad leaq 0x8a71(%rip), %rcx # 0x1369a movl $0xa8, %edx callq 0x61c0 leaq 0x8a7d(%rip), %rdi # 0x136b7 leaq 0x896c(%rip), %rsi # 0x135ad leaq 0x8a52(%rip), %rcx # 0x1369a movl $0xce, %edx callq 0x61c0 leaq 0x8a6d(%rip), %rdi # 0x136c6 leaq 0x894d(%rip), %rsi # 0x135ad leaq 0x8a33(%rip), %rcx # 0x1369a movl $0xcf, %edx callq 0x61c0 jmp 0xac96 jmp 0xac96 jmp 0xac96 jmp 0xac96 jmp 0xac96 jmp 0xac96 jmp 0xac96 jmp 0xac96 jmp 0xac96 jmp 0xac87 jmp 0xac87 movq %rax, %rbx leaq 0x30(%rsp), %rdi callq 0x944c jmp 0xac99 movq %rax, %rbx leaq 0x10(%rsp), %rdi callq 0x944c movq %rbx, %rdi callq 0x6520 nop
/wijnand-suijlen[P]bsponmpi/src/a2a.cc
bsplib::Unbuf::wait()
void Unbuf :: wait( ) { while (true) { int outcount = MPI_UNDEFINED; MPI_Waitsome( int(m_reqs.size()), m_reqs.data(), &outcount, m_ready.data(), MPI_STATUSES_IGNORE ); if (outcount == MPI_UNDEFINED ) break; for (int i = 0; i < outcount; ++i ) { size_t r = m_ready[i]; if (r < m_recvs.size()) { Entry & u = m_recvs[r]; size_t size = std::min( m_max_msg_size, u.size ); char * addr = const_cast<char *>( u.addr ); char * next_addr = addr + size; u.addr = next_addr; u.size -= size; const int tag = u.tag; if (size > 0) MPI_Irecv( addr, int(size), MPI_BYTE, u.pid, tag, m_comm, &m_reqs[r]); } else { Entry & u = m_sends[r - m_recvs.size()]; size_t size = std::min( m_max_msg_size, u.size ); char * addr = const_cast<char *>( u.addr ); char * next_addr = addr + size; u.addr = next_addr; u.size -= size; const int tag = u.tag; if (size > 0) MPI_Isend(addr, int(size), MPI_BYTE, u.pid, tag, m_comm, &m_reqs[r]); } } } m_sends.clear(); m_recvs.clear(); m_reqs.clear(); m_ready.clear(); }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x10, %rsp movq %rdi, %rbx movl $0xffff8002, %ebp # imm = 0xFFFF8002 leaq 0xc(%rsp), %r14 movl %ebp, (%r14) movq 0x48(%rdi), %rsi movq 0x50(%rdi), %rdi subq %rsi, %rdi shrq $0x3, %rdi movq 0x60(%rbx), %rcx movq %r14, %rdx xorl %r8d, %r8d callq 0x62a0 movl (%r14), %eax cmpl %ebp, %eax je 0xbd77 movq 0xd300(%rip), %r14 # 0x18f60 leaq 0xc(%rsp), %r15 testl %eax, %eax jle 0xbd49 xorl %r12d, %r12d movq 0x60(%rbx), %rax movslq (%rax,%r12,4), %rax movq 0x30(%rbx), %rdx movq 0x38(%rbx), %rsi subq %rdx, %rsi sarq $0x5, %rsi movq %rax, %rcx subq %rsi, %rcx jae 0xbce4 movq %rax, %rcx shlq $0x5, %rcx movq 0x8(%rdx,%rcx), %rdi movq 0x10(%rdx,%rcx), %r8 movq 0x10(%rbx), %rsi cmpq %rsi, %r8 cmovbq %r8, %rsi leaq (%rdi,%rsi), %r9 movq %r9, 0x8(%rdx,%rcx) subq %rsi, %r8 movq %r8, 0x10(%rdx,%rcx) testq %rsi, %rsi je 0xbd38 addq %rcx, %rdx movl (%rdx), %ecx movl 0x18(%rdx), %r8d movq 0x8(%rbx), %r9 shlq $0x3, %rax addq 0x48(%rbx), %rax movq %rax, (%rsp) movq %r14, %rdx callq 0x63d0 jmp 0xbd38 movq 0x10(%rbx), %rsi movq 0x18(%rbx), %rdx shlq $0x5, %rcx movq 0x8(%rdx,%rcx), %rdi movq 0x10(%rdx,%rcx), %r8 cmpq %rsi, %r8 cmovbq %r8, %rsi leaq (%rdi,%rsi), %r9 movq %r9, 0x8(%rdx,%rcx) subq %rsi, %r8 movq %r8, 0x10(%rdx,%rcx) testq %rsi, %rsi je 0xbd38 addq %rcx, %rdx movl (%rdx), %ecx movl 0x18(%rdx), %r8d movq 0x8(%rbx), %r9 shlq $0x3, %rax addq 0x48(%rbx), %rax movq %rax, (%rsp) movq %r14, %rdx callq 0x6100 incq %r12 movslq 0xc(%rsp), %rax cmpq %rax, %r12 jl 0xbc70 movl %ebp, 0xc(%rsp) movq 0x48(%rbx), %rsi movq 0x50(%rbx), %rdi subq %rsi, %rdi shrq $0x3, %rdi movq 0x60(%rbx), %rcx movq %r15, %rdx xorl %r8d, %r8d callq 0x62a0 movl 0xc(%rsp), %eax cmpl %ebp, %eax jne 0xbc65 movq 0x18(%rbx), %rax cmpq %rax, 0x20(%rbx) je 0xbd85 movq %rax, 0x20(%rbx) movq 0x30(%rbx), %rax cmpq %rax, 0x38(%rbx) je 0xbd93 movq %rax, 0x38(%rbx) movq 0x48(%rbx), %rax cmpq %rax, 0x50(%rbx) je 0xbda1 movq %rax, 0x50(%rbx) movq 0x60(%rbx), %rax cmpq %rax, 0x68(%rbx) je 0xbdaf movq %rax, 0x68(%rbx) addq $0x10, %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/wijnand-suijlen[P]bsponmpi/src/unbuf.cc
bsplib::exception::~exception()
exception( const exception & e ) : m_stream( e.m_stream.str() ) , m_buf( ) {}
pushq %rbx movq %rdi, %rbx leaq 0xcd23(%rip), %rax # 0x18c78 movq %rax, (%rdi) movq 0x180(%rdi), %rdi leaq 0x190(%rbx), %rax cmpq %rax, %rdi je 0xbf76 movq (%rax), %rsi incq %rsi callq 0x62c0 leaq 0x8(%rbx), %rdi movq 0xd027(%rip), %rsi # 0x18fa8 callq 0x6140 leaq 0x78(%rbx), %rdi callq 0x60c0 movq %rbx, %rdi popq %rbx jmp 0x6540
/wijnand-suijlen[P]bsponmpi/src/exception.h
bsplib::Rdma::hpget(int, unsigned long, unsigned long, void*, unsigned long)
void Rdma::hpget( int src_pid, Memslot src_slot, size_t src_offset, void * dst, size_t size ) { #ifdef PROFILE TicToc t( TicToc::HPGET ); #endif if ( size < m_min_n_hp_msg_size ) { get( src_pid, src_slot, src_offset, dst, size ); return; } assert( !( slot( m_pid, src_slot ).status & Memblock::PUSHED) ); Memslot dst_slot = m_local_slots.size(); m_local_slots.push_back( dst ); int tag = m_unbuf.recv( src_pid, dst, size ); Action action = { Action::HPGET, src_pid, src_pid, m_pid, tag, src_slot, dst_slot, src_offset, size }; m_send_actions.push_back( action ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x78, %rsp movq %r9, %r12 movq %r8, %r13 movq %rcx, 0x8(%rsp) movq %rdx, %rbp movl %esi, %ebx movq %rdi, %r14 movq %r8, 0x10(%rsp) movl $0x6, %edi callq 0x97be movl %eax, 0x20(%rsp) leaq 0x40(%rsp), %r15 movl $0x1, %edi movq %r15, %rsi callq 0x6290 movaps (%r15), %xmm0 movups %xmm0, 0x28(%rsp) movq $0x0, 0x38(%rsp) cmpq %r12, 0x2f8(%r14) jbe 0xcbb0 movq %r14, %rdi movl %ebx, %esi movq %rbp, %rdx movq 0x8(%rsp), %rcx movq %r13, %r8 movq %r12, %r9 callq 0xc9e2 jmp 0xcc83 movslq 0x2f4(%r14), %rax movq %rbp, 0x18(%rsp) imulq %rbp, %rax leaq (%rax,%rax,2), %rax shlq $0x3, %rax addq 0x338(%r14), %rax movslq 0x2f0(%r14), %rcx leaq (%rcx,%rcx,2), %rcx testb $0x1, 0x10(%rax,%rcx,8) jne 0xcc9c movq 0x378(%r14), %rsi movq %rsi, %r15 subq 0x370(%r14), %r15 cmpq 0x380(%r14), %rsi movl %ebx, %ebp je 0xcc0e movq %r13, (%rsi) addq $0x8, 0x378(%r14) jmp 0xcc24 leaq 0x370(%r14), %rdi leaq 0x10(%rsp), %rdx callq 0xf244 movq 0x10(%rsp), %r13 leaq 0x428(%r14), %rdi movl %ebp, %esi movq %r13, %rdx movq %r12, %rcx callq 0xb968 sarq $0x3, %r15 leaq 0x40(%rsp), %rsi movl $0x2, (%rsi) movl %ebp, 0x4(%rsi) movl %ebp, 0x8(%rsi) movl 0x2f0(%r14), %ecx movl %ecx, 0xc(%rsi) movl %eax, 0x10(%rsi) movq 0x18(%rsp), %rax movq %rax, 0x18(%rsi) movq %r15, 0x20(%rsi) movq 0x8(%rsp), %rax movq %rax, 0x28(%rsi) movq %r12, 0x30(%rsi) addq $0x388, %r14 # imm = 0x388 movq %r14, %rdi callq 0xe564 leaq 0x20(%rsp), %rdi callq 0x944c addq $0x78, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0x6c7c(%rip), %rdi # 0x1391f leaq 0x6b33(%rip), %rsi # 0x137dd leaq 0x6ce1(%rip), %rcx # 0x13992 movl $0x8f, %edx callq 0x61c0 jmp 0xccc1 jmp 0xccc1 jmp 0xccc1 movq %rax, %rbx leaq 0x20(%rsp), %rdi callq 0x944c movq %rbx, %rdi callq 0x6520
/wijnand-suijlen[P]bsponmpi/src/rdma.cc
spawner::Spawner::Spawner<0ul, 1ul, 2ul, 3ul, 4ul, 5ul, 6ul, 7ul, 8ul, 9ul, 10ul, 11ul, 12ul, 13ul, 14ul, 15ul, 16ul, 17ul, 18ul, 19ul, 20ul>(std::vector<char, std::allocator<char>>&&, std::integer_sequence<unsigned long, 0ul, 1ul, 2ul, 3ul, 4ul, 5ul, 6ul, 7ul, 8ul, 9ul, 10ul, 11ul, 12ul, 13ul, 14ul, 15ul, 16ul, 17ul, 18ul, 19ul, 20ul>)
Spawner(std::vector<char>&& pRodata, std::integer_sequence<size_t, I...>) : fn({[this](){decodeParam(typename StateTraits<State(I)>::type());}...}) , mRodata(std::move(pRodata)) , mRefPos(std::string_view((char*)mRodata.data(), mRodata.size()).find("LoggerRefXD")) { if (std::string_view::npos == mRefPos) throw std::runtime_error("LoggerRefXD not found in rodata"); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x18, %rsp movq %rdi, %rbx andq $0x0, 0x8(%rdi) movq %rdi, (%rdi) leaq 0x4bb(%rip), %rax # 0x2e38 movq %rax, 0x18(%rdi) leaq 0x4b6(%rip), %rax # 0x2e3e movq %rax, 0x10(%rdi) andq $0x0, 0x28(%rdi) movq %rdi, 0x20(%rdi) leaq 0x532(%rip), %rax # 0x2ece movq %rax, 0x38(%rdi) leaq 0x52d(%rip), %rax # 0x2ed4 movq %rax, 0x30(%rdi) andq $0x0, 0x48(%rdi) movq %rdi, 0x40(%rdi) leaq 0x595(%rip), %rax # 0x2f50 movq %rax, 0x58(%rdi) leaq 0x590(%rip), %rax # 0x2f56 movq %rax, 0x50(%rdi) andq $0x0, 0x68(%rdi) movq %rdi, 0x60(%rdi) leaq 0x690(%rip), %rax # 0x306a movq %rax, 0x78(%rdi) leaq 0x68b(%rip), %rax # 0x3070 movq %rax, 0x70(%rdi) andq $0x0, 0x88(%rdi) movq %rdi, 0x80(%rdi) leaq 0x91d(%rip), %rax # 0x331c movq %rax, 0x98(%rdi) leaq 0x915(%rip), %rax # 0x3322 movq %rax, 0x90(%rdi) andq $0x0, 0xa8(%rdi) movq %rdi, 0xa0(%rdi) leaq 0x9ae(%rip), %rax # 0x33d8 movq %rax, 0xb8(%rdi) leaq 0x9a6(%rip), %rax # 0x33de movq %rax, 0xb0(%rdi) andq $0x0, 0xc8(%rdi) movq %rdi, 0xc0(%rdi) leaq 0xa3f(%rip), %rax # 0x3494 movq %rax, 0xd8(%rdi) leaq 0xa37(%rip), %rax # 0x349a movq %rax, 0xd0(%rdi) andq $0x0, 0xe8(%rdi) movq %rdi, 0xe0(%rdi) leaq 0xad0(%rip), %rax # 0x3550 movq %rax, 0xf8(%rdi) leaq 0xac8(%rip), %rax # 0x3556 movq %rax, 0xf0(%rdi) andq $0x0, 0x108(%rdi) movq %rdi, 0x100(%rdi) leaq 0xb61(%rip), %rax # 0x360c movq %rax, 0x118(%rdi) leaq 0xb59(%rip), %rax # 0x3612 movq %rax, 0x110(%rdi) andq $0x0, 0x128(%rdi) movq %rdi, 0x120(%rdi) leaq 0xbf2(%rip), %rax # 0x36c8 movq %rax, 0x138(%rdi) leaq 0xbea(%rip), %rax # 0x36ce movq %rax, 0x130(%rdi) andq $0x0, 0x148(%rdi) movq %rdi, 0x140(%rdi) leaq 0xc83(%rip), %rax # 0x3784 movq %rax, 0x158(%rdi) leaq 0xc7b(%rip), %rax # 0x378a movq %rax, 0x150(%rdi) andq $0x0, 0x168(%rdi) movq %rdi, 0x160(%rdi) leaq 0xd14(%rip), %rax # 0x3840 movq %rax, 0x178(%rdi) leaq 0xd0c(%rip), %rax # 0x3846 movq %rax, 0x170(%rdi) andq $0x0, 0x188(%rdi) movq %rdi, 0x180(%rdi) leaq 0xda5(%rip), %rax # 0x38fc movq %rax, 0x198(%rdi) leaq 0xd9d(%rip), %rax # 0x3902 movq %rax, 0x190(%rdi) andq $0x0, 0x1a8(%rdi) movq %rdi, 0x1a0(%rdi) leaq 0xe36(%rip), %rax # 0x39b8 movq %rax, 0x1b8(%rdi) leaq 0xe2e(%rip), %rax # 0x39be movq %rax, 0x1b0(%rdi) andq $0x0, 0x1c8(%rdi) movq %rdi, 0x1c0(%rdi) leaq 0xec7(%rip), %rax # 0x3a74 movq %rax, 0x1d8(%rdi) leaq 0xebf(%rip), %rax # 0x3a7a movq %rax, 0x1d0(%rdi) andq $0x0, 0x1e8(%rdi) movq %rdi, 0x1e0(%rdi) leaq 0xf5c(%rip), %rax # 0x3b34 movq %rax, 0x1f8(%rdi) leaq 0xf54(%rip), %rax # 0x3b3a movq %rax, 0x1f0(%rdi) andq $0x0, 0x208(%rdi) movq %rdi, 0x200(%rdi) leaq 0xff1(%rip), %rax # 0x3bf4 movq %rax, 0x218(%rdi) leaq 0xfe9(%rip), %rax # 0x3bfa movq %rax, 0x210(%rdi) andq $0x0, 0x228(%rdi) movq %rdi, 0x220(%rdi) leaq 0x1082(%rip), %rax # 0x3cb0 movq %rax, 0x238(%rdi) leaq 0x107a(%rip), %rax # 0x3cb6 movq %rax, 0x230(%rdi) andq $0x0, 0x248(%rdi) movq %rdi, 0x240(%rdi) leaq 0x10d9(%rip), %rax # 0x3d32 movq %rax, 0x258(%rdi) leaq 0x10d1(%rip), %rax # 0x3d38 movq %rax, 0x250(%rdi) andq $0x0, 0x268(%rdi) movq %rdi, 0x260(%rdi) leaq 0x1232(%rip), %rax # 0x3eb6 movq %rax, 0x278(%rdi) leaq 0x122a(%rip), %rax # 0x3ebc movq %rax, 0x270(%rdi) andq $0x0, 0x288(%rdi) movq %rdi, 0x280(%rdi) leaq 0x1289(%rip), %rax # 0x3f38 movq %rax, 0x298(%rdi) leaq 0x1281(%rip), %rax # 0x3f3e movq %rax, 0x290(%rdi) movups (%rsi), %xmm0 movups %xmm0, 0x2a0(%rdi) movq 0x10(%rsi), %rax movq %rax, 0x2b0(%rdi) xorps %xmm0, %xmm0 andq $0x0, 0x10(%rsi) movups %xmm0, (%rsi) andl $0x0, 0x2b8(%rdi) andq $0x0, 0x1002c0(%rdi) movq 0x2a0(%rdi), %rax movq 0x2a8(%rdi), %rcx subq %rax, %rcx leaq 0x8(%rsp), %rdi movq %rcx, (%rdi) movq %rax, 0x8(%rdi) leaq 0x23b3(%rip), %rsi # 0x50ca xorl %edx, %edx callq 0x2de4 movq %rax, 0x1002c8(%rbx) andq $0x0, 0x1002d0(%rbx) leaq 0x1002f0(%rbx), %r14 movq %r14, %rdi callq 0x2120 leaq 0x100488(%rbx), %rax movq %rax, 0x100478(%rbx) andq $0x0, 0x100480(%rbx) movb $0x0, 0x100488(%rbx) cmpq $-0x1, 0x1002c8(%rbx) je 0x2d6f addq $0x18, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq pushq $0x10 popq %rdi callq 0x20e0 movq %rax, %r12 leaq 0x2355(%rip), %rsi # 0x50d6 movq %rax, %rdi callq 0x2090 movq 0x6260(%rip), %rsi # 0x8ff0 movq 0x6231(%rip), %rdx # 0x8fc8 movq %r12, %rdi callq 0x22d0 movq %rax, %r15 jmp 0x2daf movq %rax, %r15 movq %r12, %rdi callq 0x2140 leaq 0x100478(%rbx), %rdi callq 0x20d0 movq %r14, %rdi callq 0x2160 jmp 0x2dc8 movq %rax, %r15 leaq 0x2a0(%rbx), %rdi callq 0x415e movq %rbx, %rdi callq 0x2e10 movq %r15, %rdi callq 0x22e0
/therooftopprinz[P]Logless/src/spawner.cpp
spawner::Spawner::decodeParam(spawner::tag)
void decodeParam(tag pTag) { auto pData = pTag.data; auto ntok = findNextToken('%', ';', tokenFormat, mLogPoint); size_t sglen = uintptr_t(ntok)-uintptr_t(mLogPoint); std::string logSeg(mLogPoint, sglen); // std::cout << "state: Tag: " << std::hex << std::setw(2) << std::setfill('0') << unsigned(pData) << "\n"; // std::cout << "seg str:" << logSeg << "\n"; mSs << logSeg; if (*ntok) mLogPoint = ntok + tokenFormat.size() + 1; if (pData == TypeTraits<unsigned char>::type_id) mState = State::ParamU8; else if (pData == TypeTraits<signed char>::type_id) mState = State::Param8; else if (pData == TypeTraits<unsigned short>::type_id) mState = State::ParamU16; else if (pData == TypeTraits<short>::type_id) mState = State::Param16; else if (pData == TypeTraits<unsigned int>::type_id) mState = State::ParamU32; else if (pData == TypeTraits<int>::type_id) mState = State::Param32; else if (pData == TypeTraits<unsigned long>::type_id) mState = State::ParamU32or34; else if (pData == TypeTraits<long>::type_id) mState = State::Param32or34; else if (pData == TypeTraits<unsigned long long>::type_id) mState = State::ParamU64; else if (pData == TypeTraits<long long>::type_id) mState = State::Param64; else if (pData == TypeTraits<float>::type_id) mState = State::ParamFloat; else if (pData == TypeTraits<double>::type_id) mState = State::ParamDouble; else if (pData == TypeTraits<void*>::type_id) mState = State::ParamVoidP; else if (pData == TypeTraits<BufferLog>::type_id) mState = State::ParamBufferLogSz; else if (pData == TypeTraits<const char*>::type_id) mState = State::ParamStrSz; else { std::cout << mSs.str() << "\n"; // RTP TODO: Buggy in ARM gcc // mSs = std::stringstream(); mSs.~basic_stringstream(); new (&mSs) std::stringstream(); // mSs.str(""); mState = State::Logpoint; } mReadSz = 0; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x48, %rsp movl %esi, %ebp movq %rdi, %rbx leaq 0x100478(%rdi), %rdx movq 0x1002d0(%rdi), %rcx pushq $0x25 popq %rdi pushq $0x3b popq %rsi callq 0x31ce movq %rax, %r14 movq 0x1002d0(%rbx), %rsi leaq 0x38(%rsp), %rax movq %rax, -0x10(%rax) leaq 0x28(%rsp), %r15 movq %r15, %rdi movq %r14, %rdx callq 0x3260 leaq 0x100300(%rbx), %rdi movq %r15, %rsi callq 0x21a0 cmpb $0x0, (%r14) je 0x3116 movq 0x100480(%rbx), %rax addq %r14, %rax incq %rax movq %rax, 0x1002d0(%rbx) addb $0x5f, %bpl cmpb $0xf, %bpl jae 0x3130 movzbl %bpl, %eax leaq 0x2f81(%rip), %rcx # 0x60ac movl (%rcx,%rax,4), %ebp jmp 0x3184 leaq 0x100308(%rbx), %rsi leaq 0x8(%rsp), %rdi callq 0x2280 movq 0x5e88(%rip), %rdi # 0x8fd0 leaq 0x8(%rsp), %rsi callq 0x21a0 leaq 0x1fa4(%rip), %rsi # 0x50fd movq %rax, %rdi callq 0x21b0 leaq 0x1002f0(%rbx), %r14 leaq 0x8(%rsp), %rdi callq 0x20d0 movq %r14, %rdi callq 0x2160 xorl %ebp, %ebp movq %r14, %rdi callq 0x2120 movl %ebp, 0x2b8(%rbx) andq $0x0, 0x1002c0(%rbx) leaq 0x28(%rsp), %rdi callq 0x20d0 addq $0x48, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq jmp 0x31b8 movq %rax, %rbx leaq 0x8(%rsp), %rdi callq 0x20d0 jmp 0x31bb movq %rax, %rbx leaq 0x28(%rsp), %rdi callq 0x20d0 movq %rbx, %rdi callq 0x22e0 nop
/therooftopprinz[P]Logless/src/spawner.cpp
void spawner::Spawner::decodeParam<unsigned int>(unsigned int)
void decodeParam(T i) { if (sizeof(T) == mReadSz) { std::memcpy(&i, mReadBuff, sizeof(i)); snprintf(fmtbuf, sizeof(fmtbuf), tokenFormat.c_str(), i); // std::cout << "state: " << std::dec << unsigned(mState) << "format: " << tokenFormat.c_str() << " value: " << fmtbuf << "\n"; mSs << fmtbuf; mState = State::Tag; mReadSz = 0; } }
cmpq $0x4, 0x1002c0(%rdi) jne 0x36a0 pushq %r14 pushq %rbx pushq %rax movq %rdi, %rbx movl 0x2bc(%rdi), %ecx leaq 0x100498(%rdi), %r14 movq 0x100478(%rdi), %rdx movl $0x4000, %esi # imm = 0x4000 movq %r14, %rdi xorl %eax, %eax callq 0x22a0 leaq 0x100300(%rbx), %rdi movq %r14, %rsi callq 0x21b0 movl $0x3, 0x2b8(%rbx) andq $0x0, 0x1002c0(%rbx) addq $0x8, %rsp popq %rbx popq %r14 retq nop
/therooftopprinz[P]Logless/src/spawner.cpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::setCategoryRatesWithIndex(int, double const*)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::setCategoryRatesWithIndex(int categoryRatesIndex, const double* inCategoryRates) { if (categoryRatesIndex < 0 || categoryRatesIndex >= kEigenDecompCount) return BEAGLE_ERROR_OUT_OF_RANGE; if (gCategoryRates[categoryRatesIndex] == NULL) { gCategoryRates[categoryRatesIndex] = (double*) malloc(sizeof(double) * kCategoryCount); if (gCategoryRates[categoryRatesIndex] == 0L) return BEAGLE_ERROR_OUT_OF_MEMORY; } memcpy(gCategoryRates[categoryRatesIndex], inCategoryRates, sizeof(double) * kCategoryCount); return BEAGLE_SUCCESS; }
subq $0x28, %rsp movq %rdi, 0x18(%rsp) movl %esi, 0x14(%rsp) movq %rdx, 0x8(%rsp) movq 0x18(%rsp), %rax movq %rax, (%rsp) cmpl $0x0, 0x14(%rsp) jl 0x68def movq (%rsp), %rcx movl 0x14(%rsp), %eax cmpl 0x30(%rcx), %eax jl 0x68dfc movl $0xfffffffb, 0x24(%rsp) # imm = 0xFFFFFFFB jmp 0x68e7c movq (%rsp), %rax movq 0x78(%rax), %rax movslq 0x14(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) jne 0x68e51 movq (%rsp), %rax movslq 0x34(%rax), %rdi shlq $0x3, %rdi callq 0x62d90 movq %rax, %rsi movq (%rsp), %rax movq 0x78(%rax), %rcx movslq 0x14(%rsp), %rdx movq %rsi, (%rcx,%rdx,8) movq 0x78(%rax), %rax movslq 0x14(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) jne 0x68e4f movl $0xfffffffe, 0x24(%rsp) # imm = 0xFFFFFFFE jmp 0x68e7c jmp 0x68e51 movq (%rsp), %rax movq 0x78(%rax), %rcx movslq 0x14(%rsp), %rdx movq (%rcx,%rdx,8), %rdi movq 0x8(%rsp), %rsi movslq 0x34(%rax), %rdx shlq $0x3, %rdx callq 0x61580 movl $0x0, 0x24(%rsp) movl 0x24(%rsp), %eax addq $0x28, %rsp retq nopw %cs:(%rax,%rax) nop
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::setTransitionMatrices(int const*, double const*, double const*, int)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::setTransitionMatrices(const int* matrixIndices, const double* inMatrices, const double* paddedValues, int count) { for (int k = 0; k < count; k++) { const double* inMatrix = inMatrices + k*kStateCount*kStateCount*kCategoryCount; int matrixIndex = matrixIndices[k]; if (T_PAD != 0) { const double* offsetInMatrix = inMatrix; REALTYPE* offsetBeagleMatrix = gTransitionMatrices[matrixIndex]; for(int i = 0; i < kCategoryCount; i++) { for(int j = 0; j < kStateCount; j++) { beagleMemCpy(offsetBeagleMatrix, offsetInMatrix, kStateCount); offsetBeagleMatrix[kStateCount] = paddedValues[k]; offsetBeagleMatrix += kTransPaddedStateCount; // Skip padding offsetInMatrix += kStateCount; } } } else { beagleMemCpy(gTransitionMatrices[matrixIndex], inMatrix, kMatrixSize * kCategoryCount); } } return BEAGLE_SUCCESS; }
subq $0x58, %rsp movq %rdi, 0x50(%rsp) movq %rsi, 0x48(%rsp) movq %rdx, 0x40(%rsp) movq %rcx, 0x38(%rsp) movl %r8d, 0x34(%rsp) movq 0x50(%rsp), %rax movq %rax, (%rsp) movl $0x0, 0x30(%rsp) movl 0x30(%rsp), %eax cmpl 0x34(%rsp), %eax jge 0x69118 movq (%rsp), %rax movq 0x40(%rsp), %rcx movl 0x30(%rsp), %edx imull 0x24(%rax), %edx imull 0x24(%rax), %edx imull 0x34(%rax), %edx movslq %edx, %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0x28(%rsp) movq 0x48(%rsp), %rcx movslq 0x30(%rsp), %rdx movl (%rcx,%rdx,4), %ecx movl %ecx, 0x24(%rsp) movq 0x28(%rsp), %rcx movq %rcx, 0x18(%rsp) movq 0xd8(%rax), %rax movslq 0x24(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x10(%rsp) movl $0x0, 0xc(%rsp) movq (%rsp), %rcx movl 0xc(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x69106 movl $0x0, 0x8(%rsp) movq (%rsp), %rcx movl 0x8(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x690f4 movq (%rsp), %rax movq 0x10(%rsp), %rdi movq 0x18(%rsp), %rsi movl 0x24(%rax), %edx callq 0x64780 movq (%rsp), %rax movq 0x38(%rsp), %rcx movslq 0x30(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x10(%rsp), %rcx movslq 0x24(%rax), %rdx vmovsd %xmm0, (%rcx,%rdx,8) movl 0x28(%rax), %edx movq 0x10(%rsp), %rcx movslq %edx, %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0x10(%rsp) movl 0x24(%rax), %ecx movq 0x18(%rsp), %rax movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x18(%rsp) movl 0x8(%rsp), %eax addl $0x1, %eax movl %eax, 0x8(%rsp) jmp 0x69075 jmp 0x690f6 movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x6905c jmp 0x69108 movl 0x30(%rsp), %eax addl $0x1, %eax movl %eax, 0x30(%rsp) jmp 0x68fee xorl %eax, %eax addq $0x58, %rsp retq nop
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::convolveTransitionMatrices(int const*, int const*, int const*, int)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::convolveTransitionMatrices(const int* firstIndices, const int* secondIndices, const int* resultIndices, int matrixCount) { #ifdef BEAGLE_DEBUG_FLOW fprintf(stderr, "\t Entering BeagleCPUImpl::convolveTransitionMatrices \n"); #endif int returnCode = BEAGLE_SUCCESS; for (int u = 0; u < matrixCount; u++) { if(firstIndices[u] == resultIndices[u] || secondIndices[u] == resultIndices[u]) { #ifdef BEAGLE_DEBUG_FLOW fprintf(stderr, "In-place convolution is not allowed \n"); #endif returnCode = BEAGLE_ERROR_OUT_OF_RANGE; break; }//END: overwrite check REALTYPE* C = gTransitionMatrices[resultIndices[u]]; REALTYPE* A = gTransitionMatrices[firstIndices[u]]; REALTYPE* B = gTransitionMatrices[secondIndices[u]]; int n = 0; for (int l = 0; l < kCategoryCount; l++) { for (int i = 0; i < kStateCount; i++) { for (int j = 0; j < kStateCount; j++) { REALTYPE sum = 0.0; for (int k = 0; k < kStateCount; k++) { sum += A[k + kTransPaddedStateCount * i] * B[j + kTransPaddedStateCount * k]; } // printf("%.30f %.30f %d: \n", C[n], sum, l); C[n] = sum; n++; }//END: j loop if (T_PAD != 0) { // A[n] = 1.0; // B[n] = 1.0; C[n] = 1.0; n += T_PAD; }//END: padding check }//END: i loop A += kStateCount * kTransPaddedStateCount; B += kStateCount * kTransPaddedStateCount; }//END: rates loop }//END: u loop #ifdef BEAGLE_DEBUG_FLOW fprintf(stderr, "\t Leaving BeagleCPUImpl::convolveTransitionMatrices \n"); #endif return returnCode; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movl %r8d, -0x24(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x70(%rsp) movl $0x0, -0x28(%rsp) movl $0x0, -0x2c(%rsp) movl -0x2c(%rsp), %eax cmpl -0x24(%rsp), %eax jge 0x69473 movq -0x10(%rsp), %rax movslq -0x2c(%rsp), %rcx movl (%rax,%rcx,4), %eax movq -0x20(%rsp), %rcx movslq -0x2c(%rsp), %rdx cmpl (%rcx,%rdx,4), %eax je 0x69279 movq -0x18(%rsp), %rax movslq -0x2c(%rsp), %rcx movl (%rax,%rcx,4), %eax movq -0x20(%rsp), %rcx movslq -0x2c(%rsp), %rdx cmpl (%rcx,%rdx,4), %eax jne 0x69286 movl $0xfffffffb, -0x28(%rsp) # imm = 0xFFFFFFFB jmp 0x69473 movq -0x70(%rsp), %rax movq 0xd8(%rax), %rcx movq -0x20(%rsp), %rdx movslq -0x2c(%rsp), %rsi movslq (%rdx,%rsi,4), %rdx movq (%rcx,%rdx,8), %rcx movq %rcx, -0x38(%rsp) movq 0xd8(%rax), %rcx movq -0x10(%rsp), %rdx movslq -0x2c(%rsp), %rsi movslq (%rdx,%rsi,4), %rdx movq (%rcx,%rdx,8), %rcx movq %rcx, -0x40(%rsp) movq 0xd8(%rax), %rax movq -0x18(%rsp), %rcx movslq -0x2c(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, -0x48(%rsp) movl $0x0, -0x4c(%rsp) movl $0x0, -0x50(%rsp) movq -0x70(%rsp), %rcx movl -0x50(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x69461 movl $0x0, -0x54(%rsp) movq -0x70(%rsp), %rcx movl -0x54(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x69416 movl $0x0, -0x58(%rsp) movq -0x70(%rsp), %rcx movl -0x58(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x693e4 vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x60(%rsp) movl $0x0, -0x64(%rsp) movq -0x70(%rsp), %rcx movl -0x64(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x693b4 movq -0x70(%rsp), %rdx movq -0x40(%rsp), %rax movl -0x64(%rsp), %ecx movl 0x28(%rdx), %esi imull -0x54(%rsp), %esi addl %esi, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x48(%rsp), %rax movl -0x58(%rsp), %ecx movl 0x28(%rdx), %edx imull -0x64(%rsp), %edx addl %edx, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x60(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x60(%rsp) movl -0x64(%rsp), %eax addl $0x1, %eax movl %eax, -0x64(%rsp) jmp 0x6934d vmovsd -0x60(%rsp), %xmm0 movq -0x38(%rsp), %rax movslq -0x4c(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl -0x4c(%rsp), %eax addl $0x1, %eax movl %eax, -0x4c(%rsp) movl -0x58(%rsp), %eax addl $0x1, %eax movl %eax, -0x58(%rsp) jmp 0x69329 movq -0x38(%rsp), %rax movslq -0x4c(%rsp), %rcx vmovsd 0x4fc12(%rip), %xmm0 # 0xb9008 vmovsd %xmm0, (%rax,%rcx,8) movl -0x4c(%rsp), %eax addl $0x2, %eax movl %eax, -0x4c(%rsp) movl -0x54(%rsp), %eax addl $0x1, %eax movl %eax, -0x54(%rsp) jmp 0x6930f movq -0x70(%rsp), %rax movl 0x24(%rax), %edx imull 0x28(%rax), %edx movq -0x40(%rsp), %rcx movslq %edx, %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, -0x40(%rsp) movl 0x24(%rax), %ecx imull 0x28(%rax), %ecx movq -0x48(%rsp), %rax movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x48(%rsp) movl -0x50(%rsp), %eax addl $0x1, %eax movl %eax, -0x50(%rsp) jmp 0x692f5 jmp 0x69463 movl -0x2c(%rsp), %eax addl $0x1, %eax movl %eax, -0x2c(%rsp) jmp 0x69233 movl -0x28(%rsp), %eax retq nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::updateTransitionMatrices(int, int const*, int const*, int const*, double const*, int)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::updateTransitionMatrices(int eigenIndex, const int* probabilityIndices, const int* firstDerivativeIndices, const int* secondDerivativeIndices, const double* edgeLengths, int count) { // for (int i = 0; i < count; i++) { // printf("uTM %d %d %f %d\n", eigenIndex, probabilityIndices[i], edgeLengths[i], 0); // } gEigenDecomposition->updateTransitionMatrices(eigenIndex,probabilityIndices,firstDerivativeIndices,secondDerivativeIndices, edgeLengths,gCategoryRates[0],gTransitionMatrices,count); return BEAGLE_SUCCESS; }
pushq %rbx subq $0x50, %rsp movl 0x60(%rsp), %eax movq %rdi, 0x48(%rsp) movl %esi, 0x44(%rsp) movq %rdx, 0x38(%rsp) movq %rcx, 0x30(%rsp) movq %r8, 0x28(%rsp) movq %r9, 0x20(%rsp) movq 0x48(%rsp), %rax movq 0x70(%rax), %rdi movl 0x44(%rsp), %esi movq 0x38(%rsp), %rdx movq 0x30(%rsp), %rcx movq 0x28(%rsp), %r8 movq 0x20(%rsp), %r9 movq 0x78(%rax), %r10 movq (%r10), %rbx movq 0xd8(%rax), %r11 movl 0x60(%rsp), %r10d movq (%rdi), %rax movq %rbx, (%rsp) movq %r11, 0x8(%rsp) movl %r10d, 0x10(%rsp) callq *0x18(%rax) xorl %eax, %eax addq $0x50, %rsp popq %rbx retq nopw %cs:(%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::updatePartialsByPartition(int const*, int)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::updatePartialsByPartition(const int* operations, int count) { int returnCode = BEAGLE_ERROR_GENERAL; if (kThreadingEnabled) { returnCode = upPartialsByPartitionAsync(operations, count); } else { bool byPartition = true; returnCode = upPartials(byPartition, operations, count, BEAGLE_OP_NONE); } return returnCode; }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movl %edx, 0x14(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) movl $0xffffffff, 0x10(%rsp) # imm = 0xFFFFFFFF testb $0x1, 0x13c(%rax) je 0x699f8 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movl 0x14(%rsp), %edx movq (%rdi), %rax callq *0x1f8(%rax) movl %eax, 0x10(%rsp) jmp 0x69a2a movq (%rsp), %rdi movb $0x1, 0xf(%rsp) movb 0xf(%rsp), %sil movq 0x18(%rsp), %rdx movl 0x14(%rsp), %ecx movq (%rdi), %rax movl $0xffffffff, %r8d # imm = 0xFFFFFFFF andb $0x1, %sil movzbl %sil, %esi callq *0x1a0(%rax) movl %eax, 0x10(%rsp) movl 0x10(%rsp), %eax addq $0x28, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::updatePrePartialsByPartition(int const*, int)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::updatePrePartialsByPartition(const int* operations, int count) { int returnCode = BEAGLE_ERROR_GENERAL; if (kThreadingEnabled) { // TODO // returnCode = upPrePartialsByPartitionAsync(operations, // count); } else { bool byPartition = true; returnCode = upPrePartials(byPartition, operations, count, BEAGLE_OP_NONE); } return returnCode; }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movl %edx, 0x14(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) movl $0xffffffff, 0x10(%rsp) # imm = 0xFFFFFFFF testb $0x1, 0x13c(%rax) je 0x69a6e jmp 0x69aa0 movq (%rsp), %rdi movb $0x1, 0xf(%rsp) movb 0xf(%rsp), %sil movq 0x18(%rsp), %rdx movl 0x14(%rsp), %ecx movq (%rdi), %rax movl $0xffffffff, %r8d # imm = 0xFFFFFFFF andb $0x1, %sil movzbl %sil, %esi callq *0x1a8(%rax) movl %eax, 0x10(%rsp) movl 0x10(%rsp), %eax addq $0x28, %rsp retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::accumulateScaleFactors(int const*, int, int)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateScaleFactors(const int* scalingIndices, int count, int cumulativeScalingIndex) { if (kFlags & BEAGLE_FLAG_SCALING_AUTO) { REALTYPE* cumulativeScaleBuffer = gScaleBuffers[0]; for(int j=0; j<kPatternCount; j++) cumulativeScaleBuffer[j] = 0; for(int i=0; i<count; i++) { int sIndex = scalingIndices[i] - kTipCount; if (gActiveScalingFactors[sIndex]) { const signed short* scaleBuffer = gAutoScaleBuffers[sIndex]; for(int j=0; j<kPatternCount; j++) { cumulativeScaleBuffer[j] += M_LN2 * scaleBuffer[j]; } } } } else { REALTYPE* cumulativeScaleBuffer = gScaleBuffers[cumulativeScalingIndex]; for(int i=0; i<count; i++) { const REALTYPE* scaleBuffer = gScaleBuffers[scalingIndices[i]]; for(int j=0; j<kPatternCount; j++) { if (kFlags & BEAGLE_FLAG_SCALERS_LOG) cumulativeScaleBuffer[j] += scaleBuffer[j]; else cumulativeScaleBuffer[j] += log(scaleBuffer[j]); } } if (DEBUGGING_OUTPUT) { fprintf(stderr,"Accumulating %d scale buffers into #%d\n",count,cumulativeScalingIndex); for(int j=0; j<kPatternCount; j++) { fprintf(stderr,"cumulativeScaleBuffer[%d] = %2.5e\n",j,cumulativeScaleBuffer[j]); } } } return BEAGLE_SUCCESS; }
subq $0x68, %rsp movq %rdi, 0x60(%rsp) movq %rsi, 0x58(%rsp) movl %edx, 0x54(%rsp) movl %ecx, 0x50(%rsp) movq 0x60(%rsp), %rax movq %rax, (%rsp) movq 0x58(%rax), %rax andq $0x80, %rax cmpq $0x0, %rax je 0x69c12 movq (%rsp), %rax movq 0xc0(%rax), %rax movq (%rax), %rax movq %rax, 0x48(%rsp) movl $0x0, 0x44(%rsp) movq (%rsp), %rcx movl 0x44(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x69b4b movq 0x48(%rsp), %rax movslq 0x44(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x44(%rsp), %eax addl $0x1, %eax movl %eax, 0x44(%rsp) jmp 0x69b1e movl $0x0, 0x40(%rsp) movl 0x40(%rsp), %eax cmpl 0x54(%rsp), %eax jge 0x69c0d movq (%rsp), %rax movq 0x58(%rsp), %rcx movslq 0x40(%rsp), %rdx movl (%rcx,%rdx,4), %ecx subl 0x10(%rax), %ecx movl %ecx, 0x3c(%rsp) movq 0xd0(%rax), %rax movslq 0x3c(%rsp), %rcx cmpl $0x0, (%rax,%rcx,4) je 0x69bfb movq (%rsp), %rax movq 0xc8(%rax), %rax movslq 0x3c(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x30(%rsp) movl $0x0, 0x2c(%rsp) movq (%rsp), %rcx movl 0x2c(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x69bf9 movq 0x30(%rsp), %rax movslq 0x2c(%rsp), %rcx movswl (%rax,%rcx,2), %eax vcvtsi2sd %eax, %xmm0, %xmm1 movq 0x48(%rsp), %rax movslq 0x2c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm2 vmovsd 0x4f42e(%rip), %xmm0 # 0xb9010 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0x2c(%rsp), %eax addl $0x1, %eax movl %eax, 0x2c(%rsp) jmp 0x69bac jmp 0x69bfb jmp 0x69bfd movl 0x40(%rsp), %eax addl $0x1, %eax movl %eax, 0x40(%rsp) jmp 0x69b53 jmp 0x69cfc movq (%rsp), %rax movq 0xc0(%rax), %rax movslq 0x50(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) movl $0x0, 0x1c(%rsp) movl 0x1c(%rsp), %eax cmpl 0x54(%rsp), %eax jge 0x69cfa movq (%rsp), %rax movq 0xc0(%rax), %rax movq 0x58(%rsp), %rcx movslq 0x1c(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x10(%rsp) movl $0x0, 0xc(%rsp) movq (%rsp), %rcx movl 0xc(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x69ce8 movq (%rsp), %rax movq 0x58(%rax), %rax andq $0x400, %rax # imm = 0x400 cmpq $0x0, %rax je 0x69cb1 movq 0x10(%rsp), %rax movslq 0xc(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x20(%rsp), %rax movslq 0xc(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x69cd9 movq 0x10(%rsp), %rax movslq 0xc(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 callq 0x61460 movq 0x20(%rsp), %rax movslq 0xc(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x69cdb movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x69c6b jmp 0x69cea movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x69c33 jmp 0x69cfc xorl %eax, %eax addq $0x68, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::removeScaleFactorsByPartition(int const*, int, int, int)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::removeScaleFactorsByPartition(const int* scalingIndices, int count, int cumulativeScalingIndex, int partitionIndex) { int startPattern = gPatternPartitionsStartPatterns[partitionIndex]; int endPattern = gPatternPartitionsStartPatterns[partitionIndex + 1]; REALTYPE* cumulativeScaleBuffer = gScaleBuffers[cumulativeScalingIndex]; for(int i=0; i<count; i++) { const REALTYPE* scaleBuffer = gScaleBuffers[scalingIndices[i]]; for(int j=startPattern; j<endPattern; j++) { if (kFlags & BEAGLE_FLAG_SCALERS_LOG) cumulativeScaleBuffer[j] -= scaleBuffer[j]; else cumulativeScaleBuffer[j] -= log(scaleBuffer[j]); } } return BEAGLE_SUCCESS; }
subq $0x58, %rsp movq %rdi, 0x50(%rsp) movq %rsi, 0x48(%rsp) movl %edx, 0x44(%rsp) movl %ecx, 0x40(%rsp) movl %r8d, 0x3c(%rsp) movq 0x50(%rsp), %rax movq %rax, 0x8(%rsp) movq 0x90(%rax), %rcx movslq 0x3c(%rsp), %rdx movl (%rcx,%rdx,4), %ecx movl %ecx, 0x38(%rsp) movq 0x90(%rax), %rcx movl 0x3c(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx movl (%rcx,%rdx,4), %ecx movl %ecx, 0x34(%rsp) movq 0xc0(%rax), %rax movslq 0x40(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movl $0x0, 0x24(%rsp) movl 0x24(%rsp), %eax cmpl 0x44(%rsp), %eax jge 0x6a0f6 movq 0x8(%rsp), %rax movq 0xc0(%rax), %rax movq 0x48(%rsp), %rcx movslq 0x24(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x18(%rsp) movl 0x38(%rsp), %eax movl %eax, 0x14(%rsp) movl 0x14(%rsp), %eax cmpl 0x34(%rsp), %eax jge 0x6a0e4 movq 0x8(%rsp), %rax movq 0x58(%rax), %rax andq $0x400, %rax # imm = 0x400 cmpq $0x0, %rax je 0x6a0a2 movq 0x18(%rsp), %rax movslq 0x14(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x28(%rsp), %rax movslq 0x14(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vsubsd %xmm1, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x6a0d2 movq 0x18(%rsp), %rax movslq 0x14(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 callq 0x61460 vmovaps %xmm0, %xmm1 movq 0x28(%rsp), %rax movslq 0x14(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vsubsd %xmm1, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x6a0d4 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0x6a056 jmp 0x6a0e6 movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) jmp 0x6a01d xorl %eax, %eax addq $0x58, %rsp retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::calculateRootLogLikelihoods(int const*, int const*, int const*, int const*, int, double*)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calculateRootLogLikelihoods(const int *bufferIndices, const int *categoryWeightsIndices, const int *stateFrequenciesIndices, const int *cumulativeScaleIndices, int count, double *outSumLogLikelihood) { if (count == 1) { // We treat this as a special case so that we don't have convoluted logic // at the end of the loop over patterns int cumulativeScalingFactorIndex; if (kFlags & BEAGLE_FLAG_SCALING_AUTO) { cumulativeScalingFactorIndex = 0; } else if (kFlags & BEAGLE_FLAG_SCALING_ALWAYS) { cumulativeScalingFactorIndex = bufferIndices[0] - kTipCount; } else { cumulativeScalingFactorIndex = cumulativeScaleIndices[0]; } if (kAutoRootPartitioningEnabled) { calcRootLogLikelihoodsByAutoPartitionAsync(bufferIndices, categoryWeightsIndices, stateFrequenciesIndices, cumulativeScaleIndices, gAutoPartitionIndices, gAutoPartitionOutSumLogLikelihoods); *outSumLogLikelihood = 0.0; for (int i = 0; i < kPartitionCount; i++) { *outSumLogLikelihood += gAutoPartitionOutSumLogLikelihoods[i]; } if (*outSumLogLikelihood != *outSumLogLikelihood) { return BEAGLE_ERROR_FLOATING_POINT; } else { return BEAGLE_SUCCESS; } } else { if (categoryWeightsIndices[0] >= 0) { return calcRootLogLikelihoods(bufferIndices[0], categoryWeightsIndices[0], stateFrequenciesIndices[0], cumulativeScalingFactorIndex, outSumLogLikelihood); } else { return calcRootLogLikelihoodsPerCategory( bufferIndices[0], stateFrequenciesIndices[0], cumulativeScalingFactorIndex, outSumLogLikelihood); } } } else { return calcRootLogLikelihoodsMulti(bufferIndices, categoryWeightsIndices, stateFrequenciesIndices, cumulativeScaleIndices, count, outSumLogLikelihood); } }
subq $0x58, %rsp movq 0x60(%rsp), %rax movq %rdi, 0x48(%rsp) movq %rsi, 0x40(%rsp) movq %rdx, 0x38(%rsp) movq %rcx, 0x30(%rsp) movq %r8, 0x28(%rsp) movl %r9d, 0x24(%rsp) movq 0x48(%rsp), %rax movq %rax, 0x10(%rsp) cmpl $0x1, 0x24(%rsp) jne 0x6a475 movq 0x10(%rsp), %rax movq 0x58(%rax), %rax andq $0x80, %rax cmpq $0x0, %rax je 0x6a30b movl $0x0, 0x20(%rsp) jmp 0x6a342 movq 0x10(%rsp), %rax movq 0x58(%rax), %rax andq $0x100, %rax # imm = 0x100 cmpq $0x0, %rax je 0x6a335 movq 0x10(%rsp), %rcx movq 0x40(%rsp), %rax movl (%rax), %eax subl 0x10(%rcx), %eax movl %eax, 0x20(%rsp) jmp 0x6a340 movq 0x28(%rsp), %rax movl (%rax), %eax movl %eax, 0x20(%rsp) jmp 0x6a342 movq 0x10(%rsp), %rax testb $0x1, 0x13e(%rax) je 0x6a40d movq 0x10(%rsp), %rdi movq 0x40(%rsp), %rsi movq 0x38(%rsp), %rdx movq 0x30(%rsp), %rcx movq 0x28(%rsp), %r8 movq 0x160(%rdi), %r9 movq 0x168(%rdi), %r10 movq (%rdi), %rax movq %r10, (%rsp) callq *0x248(%rax) movq 0x60(%rsp), %rax vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl $0x0, 0x1c(%rsp) movq 0x10(%rsp), %rcx movl 0x1c(%rsp), %eax cmpl 0x48(%rcx), %eax jge 0x6a3db movq 0x10(%rsp), %rax movq 0x168(%rax), %rax movslq 0x1c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x60(%rsp), %rax vaddsd (%rax), %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x6a39d movq 0x60(%rsp), %rax vmovsd (%rax), %xmm0 movq 0x60(%rsp), %rax vucomisd (%rax), %xmm0 jne 0x6a3f3 jp 0x6a3f3 jmp 0x6a400 movl $0xfffffff8, 0x54(%rsp) # imm = 0xFFFFFFF8 jmp 0x6a4a9 movl $0x0, 0x54(%rsp) jmp 0x6a4a9 movq 0x38(%rsp), %rax cmpl $0x0, (%rax) jl 0x6a44a movq 0x10(%rsp), %rdi movq 0x40(%rsp), %rax movl (%rax), %esi movq 0x38(%rsp), %rax movl (%rax), %edx movq 0x30(%rsp), %rax movl (%rax), %ecx movl 0x20(%rsp), %r8d movq 0x60(%rsp), %r9 movq (%rdi), %rax callq *0x230(%rax) movl %eax, 0x54(%rsp) jmp 0x6a4a9 movq 0x10(%rsp), %rdi movq 0x40(%rsp), %rax movl (%rax), %esi movq 0x30(%rsp), %rax movl (%rax), %edx movl 0x20(%rsp), %ecx movq 0x60(%rsp), %r8 movq (%rdi), %rax callq *0x238(%rax) movl %eax, 0x54(%rsp) jmp 0x6a4a9 movq 0x10(%rsp), %rdi movq 0x40(%rsp), %rsi movq 0x38(%rsp), %rdx movq 0x30(%rsp), %rcx movq 0x28(%rsp), %r8 movl 0x24(%rsp), %r9d movq 0x60(%rsp), %r10 movq (%rdi), %rax movq %r10, (%rsp) callq *0x258(%rax) movl %eax, 0x54(%rsp) movl 0x54(%rsp), %eax addq $0x58, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::calculateEdgeLogLikelihoods(int const*, int const*, int const*, int const*, int const*, int const*, int const*, int const*, int, double*, double*, double*)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calculateEdgeLogLikelihoods(const int* parentBufferIndices, const int* childBufferIndices, const int* probabilityIndices, const int* firstDerivativeIndices, const int* secondDerivativeIndices, const int* categoryWeightsIndices, const int* stateFrequenciesIndices, const int* cumulativeScaleIndices, int count, double* outSumLogLikelihood, double* outSumFirstDerivative, double* outSumSecondDerivative) { // TODO: implement for count > 1 if (count == 1) { int cumulativeScalingFactorIndex; if (kFlags & BEAGLE_FLAG_SCALING_AUTO) { cumulativeScalingFactorIndex = 0; } else if (kFlags & BEAGLE_FLAG_SCALING_ALWAYS) { cumulativeScalingFactorIndex = kInternalPartialsBufferCount; int child1ScalingIndex = parentBufferIndices[0] - kTipCount; int child2ScalingIndex = childBufferIndices[0] - kTipCount; resetScaleFactors(cumulativeScalingFactorIndex); if (child1ScalingIndex >= 0 && child2ScalingIndex >= 0) { int scalingIndices[2] = {child1ScalingIndex, child2ScalingIndex}; accumulateScaleFactors(scalingIndices, 2, cumulativeScalingFactorIndex); } else if (child1ScalingIndex >= 0) { int scalingIndices[1] = {child1ScalingIndex}; accumulateScaleFactors(scalingIndices, 1, cumulativeScalingFactorIndex); } else if (child2ScalingIndex >= 0) { int scalingIndices[1] = {child2ScalingIndex}; accumulateScaleFactors(scalingIndices, 1, cumulativeScalingFactorIndex); } } else { cumulativeScalingFactorIndex = cumulativeScaleIndices[0]; } if (firstDerivativeIndices == NULL && secondDerivativeIndices == NULL) if (kAutoRootPartitioningEnabled) { calcEdgeLogLikelihoodsByAutoPartitionAsync(parentBufferIndices, childBufferIndices, probabilityIndices, categoryWeightsIndices, stateFrequenciesIndices, cumulativeScaleIndices, gAutoPartitionIndices, gAutoPartitionOutSumLogLikelihoods); *outSumLogLikelihood = 0.0; for (int i = 0; i < kPartitionCount; i++) { *outSumLogLikelihood += gAutoPartitionOutSumLogLikelihoods[i]; } if (*outSumLogLikelihood != *outSumLogLikelihood) { return BEAGLE_ERROR_FLOATING_POINT; } else { return BEAGLE_SUCCESS; } } else { return calcEdgeLogLikelihoods(parentBufferIndices[0], childBufferIndices[0], probabilityIndices[0], categoryWeightsIndices[0], stateFrequenciesIndices[0], cumulativeScalingFactorIndex, outSumLogLikelihood); } else if (secondDerivativeIndices == NULL) return calcEdgeLogLikelihoodsFirstDeriv(parentBufferIndices[0], childBufferIndices[0], probabilityIndices[0], firstDerivativeIndices[0], categoryWeightsIndices[0], stateFrequenciesIndices[0], cumulativeScalingFactorIndex, outSumLogLikelihood, outSumFirstDerivative); else return calcEdgeLogLikelihoodsSecondDeriv(parentBufferIndices[0], childBufferIndices[0], probabilityIndices[0], firstDerivativeIndices[0], secondDerivativeIndices[0], categoryWeightsIndices[0], stateFrequenciesIndices[0], cumulativeScalingFactorIndex, outSumLogLikelihood, outSumFirstDerivative, outSumSecondDerivative); } else { if ((kFlags & BEAGLE_FLAG_SCALING_AUTO) || (kFlags & BEAGLE_FLAG_SCALING_ALWAYS)) { fprintf(stderr,"BeagleCPUImpl::calculateEdgeLogLikelihoods not yet implemented for count > 1 and auto/always scaling\n"); } if (firstDerivativeIndices == NULL && secondDerivativeIndices == NULL) { return calcEdgeLogLikelihoodsMulti(parentBufferIndices, childBufferIndices, probabilityIndices, categoryWeightsIndices, stateFrequenciesIndices, cumulativeScaleIndices, count, outSumLogLikelihood); } else { fprintf(stderr,"BeagleCPUImpl::calculateEdgeLogLikelihoods not yet implemented for count > 1 and derivatives\n"); } } return BEAGLE_SUCCESS; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x98, %rsp movq 0xf0(%rsp), %rax movq 0xe8(%rsp), %rax movq 0xe0(%rsp), %rax movl 0xd8(%rsp), %eax movq 0xd0(%rsp), %rax movq 0xc8(%rsp), %rax movq 0xc0(%rsp), %rax movq %rdi, 0x88(%rsp) movq %rsi, 0x80(%rsp) movq %rdx, 0x78(%rsp) movq %rcx, 0x70(%rsp) movq %r8, 0x68(%rsp) movq %r9, 0x60(%rsp) movq 0x88(%rsp), %rax movq %rax, 0x38(%rsp) cmpl $0x1, 0xd8(%rsp) jne 0x6abcf movq 0x38(%rsp), %rax movq 0x58(%rax), %rax andq $0x80, %rax cmpq $0x0, %rax je 0x6a855 movl $0x0, 0x5c(%rsp) jmp 0x6a950 movq 0x38(%rsp), %rax movq 0x58(%rax), %rax andq $0x100, %rax # imm = 0x100 cmpq $0x0, %rax je 0x6a940 movq 0x38(%rsp), %rdi movl 0x44(%rdi), %eax movl %eax, 0x5c(%rsp) movq 0x80(%rsp), %rax movl (%rax), %eax subl 0x10(%rdi), %eax movl %eax, 0x58(%rsp) movq 0x78(%rsp), %rax movl (%rax), %eax subl 0x10(%rdi), %eax movl %eax, 0x54(%rsp) movl 0x5c(%rsp), %esi movq (%rdi), %rax callq *0x120(%rax) cmpl $0x0, 0x58(%rsp) jl 0x6a8e2 cmpl $0x0, 0x54(%rsp) jl 0x6a8e2 movq 0x38(%rsp), %rdi movl 0x58(%rsp), %eax movl %eax, 0x4c(%rsp) movl 0x54(%rsp), %eax movl %eax, 0x50(%rsp) leaq 0x4c(%rsp), %rsi movl 0x5c(%rsp), %ecx movq (%rdi), %rax movl $0x2, %edx callq *0x100(%rax) jmp 0x6a93e cmpl $0x0, 0x58(%rsp) jl 0x6a90f movq 0x38(%rsp), %rdi movl 0x58(%rsp), %eax movl %eax, 0x48(%rsp) leaq 0x48(%rsp), %rsi movl 0x5c(%rsp), %ecx movq (%rdi), %rax movl $0x1, %edx callq *0x100(%rax) jmp 0x6a93c cmpl $0x0, 0x54(%rsp) jl 0x6a93a movq 0x38(%rsp), %rdi movl 0x54(%rsp), %eax movl %eax, 0x44(%rsp) leaq 0x44(%rsp), %rsi movl 0x5c(%rsp), %ecx movq (%rdi), %rax movl $0x1, %edx callq *0x100(%rax) jmp 0x6a93c jmp 0x6a93e jmp 0x6a94e movq 0xd0(%rsp), %rax movl (%rax), %eax movl %eax, 0x5c(%rsp) jmp 0x6a950 cmpq $0x0, 0x68(%rsp) jne 0x6aac3 cmpq $0x0, 0x60(%rsp) jne 0x6aac3 movq 0x38(%rsp), %rax testb $0x1, 0x13e(%rax) je 0x6aa65 movq 0x38(%rsp), %rdi movq 0x80(%rsp), %rsi movq 0x78(%rsp), %rdx movq 0x70(%rsp), %rcx movq 0xc0(%rsp), %r8 movq 0xc8(%rsp), %r9 movq 0xd0(%rsp), %rbx movq 0x160(%rdi), %r11 movq 0x168(%rdi), %r10 movq (%rdi), %rax movq %rbx, (%rsp) movq %r11, 0x8(%rsp) movq %r10, 0x10(%rsp) callq *0x270(%rax) movq 0xe0(%rsp), %rax vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl $0x0, 0x40(%rsp) movq 0x38(%rsp), %rcx movl 0x40(%rsp), %eax cmpl 0x48(%rcx), %eax jge 0x6aa27 movq 0x38(%rsp), %rax movq 0x168(%rax), %rax movslq 0x40(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0xe0(%rsp), %rax vaddsd (%rax), %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl 0x40(%rsp), %eax addl $0x1, %eax movl %eax, 0x40(%rsp) jmp 0x6a9e6 movq 0xe0(%rsp), %rax vmovsd (%rax), %xmm0 movq 0xe0(%rsp), %rax vucomisd (%rax), %xmm0 jne 0x6aa45 jp 0x6aa45 jmp 0x6aa55 movl $0xfffffff8, 0x94(%rsp) # imm = 0xFFFFFFF8 jmp 0x6aca5 movl $0x0, 0x94(%rsp) jmp 0x6aca5 movq 0x38(%rsp), %rdi movq 0x80(%rsp), %rax movl (%rax), %esi movq 0x78(%rsp), %rax movl (%rax), %edx movq 0x70(%rsp), %rax movl (%rax), %ecx movq 0xc0(%rsp), %rax movl (%rax), %r8d movq 0xc8(%rsp), %rax movl (%rax), %r9d movl 0x5c(%rsp), %r11d movq 0xe0(%rsp), %r10 movq (%rdi), %rax movl %r11d, (%rsp) movq %r10, 0x8(%rsp) callq *0x260(%rax) movl %eax, 0x94(%rsp) jmp 0x6aca5 cmpq $0x0, 0x60(%rsp) jne 0x6ab3f movq 0x38(%rsp), %rdi movq 0x80(%rsp), %rax movl (%rax), %esi movq 0x78(%rsp), %rax movl (%rax), %edx movq 0x70(%rsp), %rax movl (%rax), %ecx movq 0x68(%rsp), %rax movl (%rax), %r8d movq 0xc0(%rsp), %rax movl (%rax), %r9d movq 0xc8(%rsp), %rax movl (%rax), %ebp movl 0x5c(%rsp), %ebx movq 0xe0(%rsp), %r11 movq 0xe8(%rsp), %r10 movq (%rdi), %rax movl %ebp, (%rsp) movl %ebx, 0x8(%rsp) movq %r11, 0x10(%rsp) movq %r10, 0x18(%rsp) callq *0x290(%rax) movl %eax, 0x94(%rsp) jmp 0x6aca5 movq 0x38(%rsp), %rdi movq 0x80(%rsp), %rax movl (%rax), %esi movq 0x78(%rsp), %rax movl (%rax), %edx movq 0x70(%rsp), %rax movl (%rax), %ecx movq 0x68(%rsp), %rax movl (%rax), %r8d movq 0x60(%rsp), %rax movl (%rax), %r9d movq 0xc0(%rsp), %rax movl (%rax), %r15d movq 0xc8(%rsp), %rax movl (%rax), %r14d movl 0x5c(%rsp), %ebp movq 0xe0(%rsp), %rbx movq 0xe8(%rsp), %r11 movq 0xf0(%rsp), %r10 movq (%rdi), %rax movl %r15d, (%rsp) movl %r14d, 0x8(%rsp) movl %ebp, 0x10(%rsp) movq %rbx, 0x18(%rsp) movq %r11, 0x20(%rsp) movq %r10, 0x28(%rsp) callq *0x298(%rax) movl %eax, 0x94(%rsp) jmp 0x6aca5 movq 0x38(%rsp), %rax movq 0x58(%rax), %rax andq $0x80, %rax cmpq $0x0, %rax jne 0x6abf9 movq 0x38(%rsp), %rax movq 0x58(%rax), %rax andq $0x100, %rax # imm = 0x100 cmpq $0x0, %rax je 0x6ac11 movq 0x632e8(%rip), %rax # 0xcdee8 movq (%rax), %rdi leaq 0x4e59d(%rip), %rsi # 0xb91a7 movb $0x0, %al callq 0x63430 cmpq $0x0, 0x68(%rsp) jne 0x6ac80 cmpq $0x0, 0x60(%rsp) jne 0x6ac80 movq 0x38(%rsp), %rdi movq 0x80(%rsp), %rsi movq 0x78(%rsp), %rdx movq 0x70(%rsp), %rcx movq 0xc0(%rsp), %r8 movq 0xc8(%rsp), %r9 movq 0xd0(%rsp), %rbx movl 0xd8(%rsp), %r11d movq 0xe0(%rsp), %r10 movq (%rdi), %rax movq %rbx, (%rsp) movl %r11d, 0x8(%rsp) movq %r10, 0x10(%rsp) callq *0x288(%rax) movl %eax, 0x94(%rsp) jmp 0x6aca5 movq 0x63261(%rip), %rax # 0xcdee8 movq (%rax), %rdi leaq 0x4e57c(%rip), %rsi # 0xb920d movb $0x0, %al callq 0x63430 jmp 0x6ac9a movl $0x0, 0x94(%rsp) movl 0x94(%rsp), %eax addq $0x98, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::getDerivatives(double*, double*)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::getDerivatives(double* outSumFirstDerivative, double* outSumSecondDerivative) { *outSumFirstDerivative = 0.0; for (int i = 0; i < kPatternCount; i++) { *outSumFirstDerivative += outFirstDerivativesTmp[i] * gPatternWeights[i]; } if (outSumSecondDerivative != NULL) { *outSumSecondDerivative = 0.0; for (int i = 0; i < kPatternCount; i++) { *outSumSecondDerivative += outSecondDerivativesTmp[i] * gPatternWeights[i]; } } return BEAGLE_SUCCESS; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x28(%rsp) movq -0x10(%rsp), %rax vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl $0x0, -0x1c(%rsp) movq -0x28(%rsp), %rcx movl -0x1c(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x6b202 movq -0x28(%rsp), %rax movq 0x118(%rax), %rcx movslq -0x1c(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x80(%rax), %rax movslq -0x1c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq -0x10(%rsp), %rax vmovsd (%rax), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax) movl -0x1c(%rsp), %eax addl $0x1, %eax movl %eax, -0x1c(%rsp) jmp 0x6b1ae cmpq $0x0, -0x18(%rsp) je 0x6b275 movq -0x18(%rsp), %rax vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl $0x0, -0x20(%rsp) movq -0x28(%rsp), %rcx movl -0x20(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x6b273 movq -0x28(%rsp), %rax movq 0x120(%rax), %rcx movslq -0x20(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x80(%rax), %rax movslq -0x20(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq -0x18(%rsp), %rax vmovsd (%rax), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax) movl -0x20(%rsp), %eax addl $0x1, %eax movl %eax, -0x20(%rsp) jmp 0x6b21f jmp 0x6b275 xorl %eax, %eax retq nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::getSiteLogLikelihoods(double*)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::getSiteLogLikelihoods(double* outLogLikelihoods) { if (kPatternsReordered) { REALTYPE* outLogLikelihoodsOriginalOrder = (REALTYPE*) malloc(sizeof(REALTYPE) * kPatternCount); for (int i=0; i < kPatternCount; i++) { outLogLikelihoodsOriginalOrder[i] = outLogLikelihoodsTmp[gPatternsNewOrder[i]]; } beagleMemCpy(outLogLikelihoods, outLogLikelihoodsOriginalOrder, kPatternCount); free(outLogLikelihoodsOriginalOrder); } else { beagleMemCpy(outLogLikelihoods, outLogLikelihoodsTmp, kPatternCount); } return BEAGLE_SUCCESS; }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) testb $0x1, 0x51(%rax) je 0x6b32a movq (%rsp), %rax movslq 0x14(%rax), %rdi shlq $0x3, %rdi callq 0x62d90 movq %rax, 0x10(%rsp) movl $0x0, 0xc(%rsp) movq (%rsp), %rcx movl 0xc(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x6b308 movq (%rsp), %rcx movq 0x110(%rcx), %rax movq 0x98(%rcx), %rcx movslq 0xc(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x10(%rsp), %rax movslq 0xc(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x6b2bf movq (%rsp), %rax movq 0x18(%rsp), %rdi movq 0x10(%rsp), %rsi movl 0x14(%rax), %edx callq 0x641a0 movq 0x10(%rsp), %rdi callq 0x62700 jmp 0x6b342 movq (%rsp), %rax movq 0x18(%rsp), %rdi movq 0x110(%rax), %rsi movl 0x14(%rax), %edx callq 0x641a0 xorl %eax, %eax addq $0x28, %rsp retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPU4StateSSEImpl<double, 2, 0>::calcPartialsPartials(double*, double const*, double const*, double const*, double const*, int, int)
BEAGLE_CPU_4_SSE_TEMPLATE void BeagleCPU4StateSSEImpl<BEAGLE_CPU_4_SSE_DOUBLE>::calcPartialsPartials(double* destP, const double* partials_q, const double* matrices_q, const double* partials_r, const double* matrices_r, int startPattern, int endPattern) { int patternDefficit = kPatternCount + kExtraPatterns - endPattern; int v = 0; int w = 0; V_Real destq_01, destq_23, destr_01, destr_23; VecUnion vu_mq[OFFSET][2], vu_mr[OFFSET][2]; V_Real *destPvec = (V_Real *)destP; for (int l = 0; l < kCategoryCount; l++) { destPvec += startPattern*2; v += startPattern*4; /* Load transition-probability matrices into vectors */ SSE_PREFETCH_MATRICES(matrices_q + w, matrices_r + w, vu_mq, vu_mr); for (int k = startPattern; k < endPattern; k++) { # if 1 && !defined(_WIN32) __builtin_prefetch (&partials_q[v+64]); __builtin_prefetch (&partials_r[v+64]); // __builtin_prefetch (destPvec+32,1,0); # endif V_Real vpq_0, vpq_1, vpq_2, vpq_3; SSE_PREFETCH_PARTIALS(vpq_,partials_q,v); V_Real vpr_0, vpr_1, vpr_2, vpr_3; SSE_PREFETCH_PARTIALS(vpr_,partials_r,v); # if 1 /* This would probably be faster on PPC/Altivec, which has a fused multiply-add vector instruction */ destq_01 = VEC_MULT(vpq_0, vu_mq[0][0].vx); destq_01 = VEC_MADD(vpq_1, vu_mq[1][0].vx, destq_01); destq_01 = VEC_MADD(vpq_2, vu_mq[2][0].vx, destq_01); destq_01 = VEC_MADD(vpq_3, vu_mq[3][0].vx, destq_01); destq_23 = VEC_MULT(vpq_0, vu_mq[0][1].vx); destq_23 = VEC_MADD(vpq_1, vu_mq[1][1].vx, destq_23); destq_23 = VEC_MADD(vpq_2, vu_mq[2][1].vx, destq_23); destq_23 = VEC_MADD(vpq_3, vu_mq[3][1].vx, destq_23); destr_01 = VEC_MULT(vpr_0, vu_mr[0][0].vx); destr_01 = VEC_MADD(vpr_1, vu_mr[1][0].vx, destr_01); destr_01 = VEC_MADD(vpr_2, vu_mr[2][0].vx, destr_01); destr_01 = VEC_MADD(vpr_3, vu_mr[3][0].vx, destr_01); destr_23 = VEC_MULT(vpr_0, vu_mr[0][1].vx); destr_23 = VEC_MADD(vpr_1, vu_mr[1][1].vx, destr_23); destr_23 = VEC_MADD(vpr_2, vu_mr[2][1].vx, destr_23); destr_23 = VEC_MADD(vpr_3, vu_mr[3][1].vx, destr_23); # else /* SSE doesn't have a fused multiply-add, so a slight speed gain should be achieved by decoupling these operations to avoid dependency stalls */ V_Real a, b, c, d; a = VEC_MULT(vpq_0, vu_mq[0][0].vx); b = VEC_MULT(vpq_2, vu_mq[2][0].vx); c = VEC_MULT(vpq_0, vu_mq[0][1].vx); d = VEC_MULT(vpq_2, vu_mq[2][1].vx); a = VEC_MADD(vpq_1, vu_mq[1][0].vx, a); b = VEC_MADD(vpq_3, vu_mq[3][0].vx, b); c = VEC_MADD(vpq_1, vu_mq[1][1].vx, c); d = VEC_MADD(vpq_3, vu_mq[3][1].vx, d); destq_01 = VEC_ADD(a, b); destq_23 = VEC_ADD(c, d); a = VEC_MULT(vpr_0, vu_mr[0][0].vx); b = VEC_MULT(vpr_2, vu_mr[2][0].vx); c = VEC_MULT(vpr_0, vu_mr[0][1].vx); d = VEC_MULT(vpr_2, vu_mr[2][1].vx); a = VEC_MADD(vpr_1, vu_mr[1][0].vx, a); b = VEC_MADD(vpr_3, vu_mr[3][0].vx, b); c = VEC_MADD(vpr_1, vu_mr[1][1].vx, c); d = VEC_MADD(vpr_3, vu_mr[3][1].vx, d); destr_01 = VEC_ADD(a, b); destr_23 = VEC_ADD(c, d); # endif # if 1// destPvec[0] = VEC_MULT(destq_01, destr_01); destPvec[1] = VEC_MULT(destq_23, destr_23); destPvec += 2; # else /* VEC_STORE did demonstrate a measurable performance gain as it copies all (2/4) values to memory simultaneously; I can no longer reproduce the performance gain (?) */ VEC_STORE(destP + v + 0,VEC_MULT(destq_01, destr_01)); VEC_STORE(destP + v + 2,VEC_MULT(destq_23, destr_23)); # endif v += 4; } w += OFFSET*4; if (kExtraPatterns) { destPvec += kExtraPatterns * 2; v += kExtraPatterns * 4; } destPvec += patternDefficit * 2; v += patternDefficit * 4; } }
subq $0x668, %rsp # imm = 0x668 movl 0x678(%rsp), %eax movl 0x670(%rsp), %eax movq %rdi, 0x278(%rsp) movq %rsi, 0x270(%rsp) movq %rdx, 0x268(%rsp) movq %rcx, 0x260(%rsp) movq %r8, 0x258(%rsp) movq %r9, 0x250(%rsp) movq 0x278(%rsp), %rcx movq %rcx, -0x78(%rsp) movl 0x14(%rcx), %eax addl 0x1c(%rcx), %eax subl 0x678(%rsp), %eax movl %eax, 0x24c(%rsp) movl $0x0, 0x248(%rsp) movl $0x0, 0x244(%rsp) movq 0x270(%rsp), %rax movq %rax, 0x78(%rsp) movl $0x0, 0x74(%rsp) movq -0x78(%rsp), %rcx movl 0x74(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x705ca movl 0x670(%rsp), %ecx shll %ecx movq 0x78(%rsp), %rax movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x78(%rsp) movl 0x670(%rsp), %eax shll $0x2, %eax addl 0x248(%rsp), %eax movl %eax, 0x248(%rsp) movq 0x260(%rsp), %rax movslq 0x244(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x68(%rsp) movq 0x250(%rsp), %rax movslq 0x244(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x60(%rsp) movl $0x0, 0x5c(%rsp) cmpl $0x6, 0x5c(%rsp) jge 0x6fd49 movq 0x68(%rsp), %rax vmovsd (%rax), %xmm0 movslq 0x5c(%rsp), %rcx leaq 0x140(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, (%rax) movq 0x68(%rsp), %rax vmovsd 0x30(%rax), %xmm0 movslq 0x5c(%rsp), %rcx leaq 0x140(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x8(%rax) movq 0x60(%rsp), %rax vmovsd (%rax), %xmm0 movslq 0x5c(%rsp), %rcx leaq 0x80(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, (%rax) movq 0x60(%rsp), %rax vmovsd 0x30(%rax), %xmm0 movslq 0x5c(%rsp), %rcx leaq 0x80(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x8(%rax) movq 0x68(%rsp), %rax vmovsd 0x60(%rax), %xmm0 movslq 0x5c(%rsp), %rcx leaq 0x140(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x10(%rax) movq 0x68(%rsp), %rax vmovsd 0x90(%rax), %xmm0 movslq 0x5c(%rsp), %rcx leaq 0x140(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x18(%rax) movq 0x60(%rsp), %rax vmovsd 0x60(%rax), %xmm0 movslq 0x5c(%rsp), %rcx leaq 0x80(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x10(%rax) movq 0x60(%rsp), %rax vmovsd 0x90(%rax), %xmm0 movslq 0x5c(%rsp), %rcx leaq 0x80(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x18(%rax) movl 0x5c(%rsp), %eax addl $0x1, %eax movl %eax, 0x5c(%rsp) movq 0x68(%rsp), %rax addq $0x8, %rax movq %rax, 0x68(%rsp) movq 0x60(%rsp), %rax addq $0x8, %rax movq %rax, 0x60(%rsp) jmp 0x6fbf8 movl 0x670(%rsp), %eax movl %eax, 0x58(%rsp) movl 0x58(%rsp), %eax cmpl 0x678(%rsp), %eax jge 0x70537 movq 0x268(%rsp), %rax movslq 0x248(%rsp), %rcx prefetcht0 0x200(%rax,%rcx,8) movq 0x258(%rsp), %rax movslq 0x248(%rsp), %rcx prefetcht0 0x200(%rax,%rcx,8) movq 0x268(%rsp), %rax movslq 0x248(%rsp), %rcx leaq (%rax,%rcx,8), %rax movq %rax, 0x660(%rsp) movq 0x660(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, (%rsp) movq 0x268(%rsp), %rax movslq 0x248(%rsp), %rcx leaq 0x10(%rax,%rcx,8), %rax movq %rax, 0x658(%rsp) movq 0x658(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, -0x10(%rsp) vmovddup (%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x40(%rsp) vmovddup 0x8(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x30(%rsp) vmovddup -0x10(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x20(%rsp) vmovddup -0x8(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x10(%rsp) movq 0x258(%rsp), %rax movslq 0x248(%rsp), %rcx leaq (%rax,%rcx,8), %rax movq %rax, 0x650(%rsp) movq 0x650(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, -0x60(%rsp) movq 0x258(%rsp), %rax movslq 0x248(%rsp), %rcx leaq 0x10(%rax,%rcx,8), %rax movq %rax, 0x648(%rsp) movq 0x648(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, -0x70(%rsp) vmovddup -0x60(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, -0x20(%rsp) vmovddup -0x58(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, -0x30(%rsp) vmovddup -0x70(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, -0x40(%rsp) vmovddup -0x68(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, -0x50(%rsp) vmovapd 0x40(%rsp), %xmm1 vmovapd 0x140(%rsp), %xmm0 vmovapd %xmm1, 0x4b0(%rsp) vmovapd %xmm0, 0x4a0(%rsp) vmovapd 0x4b0(%rsp), %xmm0 vmulpd 0x4a0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x230(%rsp) vmovapd 0x30(%rsp), %xmm1 vmovapd 0x160(%rsp), %xmm0 vmovapd %xmm1, 0x490(%rsp) vmovapd %xmm0, 0x480(%rsp) vmovapd 0x490(%rsp), %xmm0 vmulpd 0x480(%rsp), %xmm0, %xmm1 vmovapd 0x230(%rsp), %xmm0 vmovapd %xmm1, 0x630(%rsp) vmovapd %xmm0, 0x620(%rsp) vmovapd 0x630(%rsp), %xmm0 vaddpd 0x620(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x230(%rsp) vmovapd 0x20(%rsp), %xmm1 vmovapd 0x180(%rsp), %xmm0 vmovapd %xmm1, 0x470(%rsp) vmovapd %xmm0, 0x460(%rsp) vmovapd 0x470(%rsp), %xmm0 vmulpd 0x460(%rsp), %xmm0, %xmm1 vmovapd 0x230(%rsp), %xmm0 vmovapd %xmm1, 0x610(%rsp) vmovapd %xmm0, 0x600(%rsp) vmovapd 0x610(%rsp), %xmm0 vaddpd 0x600(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x230(%rsp) vmovapd 0x10(%rsp), %xmm1 vmovapd 0x1a0(%rsp), %xmm0 vmovapd %xmm1, 0x450(%rsp) vmovapd %xmm0, 0x440(%rsp) vmovapd 0x450(%rsp), %xmm0 vmulpd 0x440(%rsp), %xmm0, %xmm1 vmovapd 0x230(%rsp), %xmm0 vmovapd %xmm1, 0x5f0(%rsp) vmovapd %xmm0, 0x5e0(%rsp) vmovapd 0x5f0(%rsp), %xmm0 vaddpd 0x5e0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x230(%rsp) vmovapd 0x40(%rsp), %xmm1 vmovapd 0x150(%rsp), %xmm0 vmovapd %xmm1, 0x430(%rsp) vmovapd %xmm0, 0x420(%rsp) vmovapd 0x430(%rsp), %xmm0 vmulpd 0x420(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x220(%rsp) vmovapd 0x30(%rsp), %xmm1 vmovapd 0x170(%rsp), %xmm0 vmovapd %xmm1, 0x410(%rsp) vmovapd %xmm0, 0x400(%rsp) vmovapd 0x410(%rsp), %xmm0 vmulpd 0x400(%rsp), %xmm0, %xmm1 vmovapd 0x220(%rsp), %xmm0 vmovapd %xmm1, 0x5d0(%rsp) vmovapd %xmm0, 0x5c0(%rsp) vmovapd 0x5d0(%rsp), %xmm0 vaddpd 0x5c0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x220(%rsp) vmovapd 0x20(%rsp), %xmm1 vmovapd 0x190(%rsp), %xmm0 vmovapd %xmm1, 0x3f0(%rsp) vmovapd %xmm0, 0x3e0(%rsp) vmovapd 0x3f0(%rsp), %xmm0 vmulpd 0x3e0(%rsp), %xmm0, %xmm1 vmovapd 0x220(%rsp), %xmm0 vmovapd %xmm1, 0x5b0(%rsp) vmovapd %xmm0, 0x5a0(%rsp) vmovapd 0x5b0(%rsp), %xmm0 vaddpd 0x5a0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x220(%rsp) vmovapd 0x10(%rsp), %xmm1 vmovapd 0x1b0(%rsp), %xmm0 vmovapd %xmm1, 0x3d0(%rsp) vmovapd %xmm0, 0x3c0(%rsp) vmovapd 0x3d0(%rsp), %xmm0 vmulpd 0x3c0(%rsp), %xmm0, %xmm1 vmovapd 0x220(%rsp), %xmm0 vmovapd %xmm1, 0x590(%rsp) vmovapd %xmm0, 0x580(%rsp) vmovapd 0x590(%rsp), %xmm0 vaddpd 0x580(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x220(%rsp) vmovapd -0x20(%rsp), %xmm1 vmovapd 0x80(%rsp), %xmm0 vmovapd %xmm1, 0x3b0(%rsp) vmovapd %xmm0, 0x3a0(%rsp) vmovapd 0x3b0(%rsp), %xmm0 vmulpd 0x3a0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x210(%rsp) vmovapd -0x30(%rsp), %xmm1 vmovapd 0xa0(%rsp), %xmm0 vmovapd %xmm1, 0x390(%rsp) vmovapd %xmm0, 0x380(%rsp) vmovapd 0x390(%rsp), %xmm0 vmulpd 0x380(%rsp), %xmm0, %xmm1 vmovapd 0x210(%rsp), %xmm0 vmovapd %xmm1, 0x570(%rsp) vmovapd %xmm0, 0x560(%rsp) vmovapd 0x570(%rsp), %xmm0 vaddpd 0x560(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x210(%rsp) vmovapd -0x40(%rsp), %xmm1 vmovapd 0xc0(%rsp), %xmm0 vmovapd %xmm1, 0x370(%rsp) vmovapd %xmm0, 0x360(%rsp) vmovapd 0x370(%rsp), %xmm0 vmulpd 0x360(%rsp), %xmm0, %xmm1 vmovapd 0x210(%rsp), %xmm0 vmovapd %xmm1, 0x550(%rsp) vmovapd %xmm0, 0x540(%rsp) vmovapd 0x550(%rsp), %xmm0 vaddpd 0x540(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x210(%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd 0xe0(%rsp), %xmm0 vmovapd %xmm1, 0x350(%rsp) vmovapd %xmm0, 0x340(%rsp) vmovapd 0x350(%rsp), %xmm0 vmulpd 0x340(%rsp), %xmm0, %xmm1 vmovapd 0x210(%rsp), %xmm0 vmovapd %xmm1, 0x530(%rsp) vmovapd %xmm0, 0x520(%rsp) vmovapd 0x530(%rsp), %xmm0 vaddpd 0x520(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x210(%rsp) vmovapd -0x20(%rsp), %xmm1 vmovapd 0x90(%rsp), %xmm0 vmovapd %xmm1, 0x330(%rsp) vmovapd %xmm0, 0x320(%rsp) vmovapd 0x330(%rsp), %xmm0 vmulpd 0x320(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x200(%rsp) vmovapd -0x30(%rsp), %xmm1 vmovapd 0xb0(%rsp), %xmm0 vmovapd %xmm1, 0x310(%rsp) vmovapd %xmm0, 0x300(%rsp) vmovapd 0x310(%rsp), %xmm0 vmulpd 0x300(%rsp), %xmm0, %xmm1 vmovapd 0x200(%rsp), %xmm0 vmovapd %xmm1, 0x510(%rsp) vmovapd %xmm0, 0x500(%rsp) vmovapd 0x510(%rsp), %xmm0 vaddpd 0x500(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x200(%rsp) vmovapd -0x40(%rsp), %xmm1 vmovapd 0xd0(%rsp), %xmm0 vmovapd %xmm1, 0x2f0(%rsp) vmovapd %xmm0, 0x2e0(%rsp) vmovapd 0x2f0(%rsp), %xmm0 vmulpd 0x2e0(%rsp), %xmm0, %xmm1 vmovapd 0x200(%rsp), %xmm0 vmovapd %xmm1, 0x4f0(%rsp) vmovapd %xmm0, 0x4e0(%rsp) vmovapd 0x4f0(%rsp), %xmm0 vaddpd 0x4e0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x200(%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd 0xf0(%rsp), %xmm0 vmovapd %xmm1, 0x2d0(%rsp) vmovapd %xmm0, 0x2c0(%rsp) vmovapd 0x2d0(%rsp), %xmm0 vmulpd 0x2c0(%rsp), %xmm0, %xmm1 vmovapd 0x200(%rsp), %xmm0 vmovapd %xmm1, 0x4d0(%rsp) vmovapd %xmm0, 0x4c0(%rsp) vmovapd 0x4d0(%rsp), %xmm0 vaddpd 0x4c0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x200(%rsp) vmovapd 0x230(%rsp), %xmm1 vmovapd 0x210(%rsp), %xmm0 vmovapd %xmm1, 0x2b0(%rsp) vmovapd %xmm0, 0x2a0(%rsp) vmovapd 0x2b0(%rsp), %xmm0 vmulpd 0x2a0(%rsp), %xmm0, %xmm0 movq 0x78(%rsp), %rax vmovapd %xmm0, (%rax) vmovapd 0x220(%rsp), %xmm1 vmovapd 0x200(%rsp), %xmm0 vmovapd %xmm1, 0x290(%rsp) vmovapd %xmm0, 0x280(%rsp) vmovapd 0x290(%rsp), %xmm0 vmulpd 0x280(%rsp), %xmm0, %xmm0 movq 0x78(%rsp), %rax vmovapd %xmm0, 0x10(%rax) movq 0x78(%rsp), %rax addq $0x20, %rax movq %rax, 0x78(%rsp) movl 0x248(%rsp), %eax addl $0x4, %eax movl %eax, 0x248(%rsp) movl 0x58(%rsp), %eax addl $0x1, %eax movl %eax, 0x58(%rsp) jmp 0x6fd54 movq -0x78(%rsp), %rax movl 0x244(%rsp), %ecx addl $0x18, %ecx movl %ecx, 0x244(%rsp) cmpl $0x0, 0x1c(%rax) je 0x70585 movq -0x78(%rsp), %rax movl 0x1c(%rax), %edx shll %edx movq 0x78(%rsp), %rcx movslq %edx, %rdx shlq $0x4, %rdx addq %rdx, %rcx movq %rcx, 0x78(%rsp) movl 0x1c(%rax), %eax shll $0x2, %eax addl 0x248(%rsp), %eax movl %eax, 0x248(%rsp) movl 0x24c(%rsp), %ecx shll %ecx movq 0x78(%rsp), %rax movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x78(%rsp) movl 0x24c(%rsp), %eax shll $0x2, %eax addl 0x248(%rsp), %eax movl %eax, 0x248(%rsp) movl 0x74(%rsp), %eax addl $0x1, %eax movl %eax, 0x74(%rsp) jmp 0x6fb71 addq $0x668, %rsp # imm = 0x668 retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateSSEImpl.hpp
beagle::cpu::BeagleCPU4StateImpl<double, 2, 0>::calcRootLogLikelihoods(int, int, int, int, double*)
BEAGLE_CPU_TEMPLATE int BeagleCPU4StateImpl<BEAGLE_CPU_GENERIC>::calcRootLogLikelihoods(const int bufferIndex, const int categoryWeightsIndex, const int stateFrequenciesIndex, const int scalingFactorsIndex, double* outSumLogLikelihood) { const REALTYPE* rootPartials = gPartials[bufferIndex]; assert(rootPartials); const REALTYPE* wt = gCategoryWeights[categoryWeightsIndex]; int u = 0; int v = 0; const REALTYPE wt0 = wt[0]; for (int k = 0; k < kPatternCount; k++) { integrationTmp[v ] = rootPartials[v ] * wt0; integrationTmp[v + 1] = rootPartials[v + 1] * wt0; integrationTmp[v + 2] = rootPartials[v + 2] * wt0; integrationTmp[v + 3] = rootPartials[v + 3] * wt0; v += 4; } for (int l = 1; l < kCategoryCount; l++) { u = 0; const REALTYPE wtl = wt[l]; for (int k = 0; k < kPatternCount; k++) { integrationTmp[u ] += rootPartials[v ] * wtl; integrationTmp[u + 1] += rootPartials[v + 1] * wtl; integrationTmp[u + 2] += rootPartials[v + 2] * wtl; integrationTmp[u + 3] += rootPartials[v + 3] * wtl; u += 4; v += 4; } v += 4 * kExtraPatterns; } return integrateOutStatesAndScale(integrationTmp, stateFrequenciesIndex, scalingFactorsIndex, outSumLogLikelihood); }
subq $0x68, %rsp movq %rdi, 0x60(%rsp) movl %esi, 0x5c(%rsp) movl %edx, 0x58(%rsp) movl %ecx, 0x54(%rsp) movl %r8d, 0x50(%rsp) movq %r9, 0x48(%rsp) movq 0x60(%rsp), %rax movq %rax, 0x8(%rsp) movq 0xb0(%rax), %rcx movslq 0x5c(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movq %rcx, 0x40(%rsp) movq 0xa0(%rax), %rax movslq 0x58(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x38(%rsp) movl $0x0, 0x34(%rsp) movl $0x0, 0x30(%rsp) movq 0x38(%rsp), %rax vmovsd (%rax), %xmm0 vmovsd %xmm0, 0x28(%rsp) movl $0x0, 0x24(%rsp) movq 0x8(%rsp), %rcx movl 0x24(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x719f2 movq 0x8(%rsp), %rax movq 0x40(%rsp), %rcx movslq 0x30(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmulsd 0x28(%rsp), %xmm0, %xmm0 movq 0xe0(%rax), %rcx movslq 0x30(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) movq 0x40(%rsp), %rcx movl 0x30(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmulsd 0x28(%rsp), %xmm0, %xmm0 movq 0xe0(%rax), %rcx movl 0x30(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd %xmm0, (%rcx,%rdx,8) movq 0x40(%rsp), %rcx movl 0x30(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmulsd 0x28(%rsp), %xmm0, %xmm0 movq 0xe0(%rax), %rcx movl 0x30(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd %xmm0, (%rcx,%rdx,8) movq 0x40(%rsp), %rcx movl 0x30(%rsp), %edx addl $0x3, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmulsd 0x28(%rsp), %xmm0, %xmm0 movq 0xe0(%rax), %rax movl 0x30(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x30(%rsp), %eax addl $0x4, %eax movl %eax, 0x30(%rsp) movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) jmp 0x7190a movl $0x1, 0x20(%rsp) movq 0x8(%rsp), %rcx movl 0x20(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x71b6f movl $0x0, 0x34(%rsp) movq 0x38(%rsp), %rax movslq 0x20(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x18(%rsp) movl $0x0, 0x14(%rsp) movq 0x8(%rsp), %rcx movl 0x14(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x71b4c movq 0x8(%rsp), %rax movq 0x40(%rsp), %rcx movslq 0x30(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 vmovsd 0x18(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0x40(%rsp), %rcx movl 0x30(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm1 vmovsd 0x18(%rsp), %xmm0 movq 0xe0(%rax), %rcx movl 0x34(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0x40(%rsp), %rcx movl 0x30(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm1 vmovsd 0x18(%rsp), %xmm0 movq 0xe0(%rax), %rcx movl 0x34(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0x40(%rsp), %rcx movl 0x30(%rsp), %edx addl $0x3, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm1 vmovsd 0x18(%rsp), %xmm0 movq 0xe0(%rax), %rax movl 0x34(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0x34(%rsp), %eax addl $0x4, %eax movl %eax, 0x34(%rsp) movl 0x30(%rsp), %eax addl $0x4, %eax movl %eax, 0x30(%rsp) movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0x71a31 movq 0x8(%rsp), %rax movl 0x1c(%rax), %eax shll $0x2, %eax addl 0x30(%rsp), %eax movl %eax, 0x30(%rsp) movl 0x20(%rsp), %eax addl $0x1, %eax movl %eax, 0x20(%rsp) jmp 0x719fa movq 0x8(%rsp), %rdi movq 0xe0(%rdi), %rsi movl 0x54(%rsp), %edx movl 0x50(%rsp), %ecx movq 0x48(%rsp), %r8 callq 0x5f120 addq $0x68, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::calcRootLogLikelihoodsPerCategory(int, int, int, double*)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcRootLogLikelihoodsPerCategory( const int bufferIndex, const int stateFrequenciesIndex, const int scalingFactorsIndex, double* outLogLikelihoodPerCategory) { int returnCode = BEAGLE_SUCCESS; const REALTYPE* rootPartials = gPartials[bufferIndex]; const REALTYPE* freqs = gStateFrequencies[stateFrequenciesIndex]; int u = 0; int v = 0; for (int l = 0; l < kCategoryCount; l++) { for (int k = 0; k < kPatternCount; k++) { REALTYPE sum = 0.0; for (int i = 0; i < kStateCount; i++) { sum += rootPartials[v] * freqs[i]; v++; } outLogLikelihoodPerCategory[u] = log(sum); u++; v += P_PAD; } } if (scalingFactorsIndex >= 0) { const REALTYPE* cumulativeScaleFactors = gScaleBuffers[scalingFactorsIndex]; int u = 0; for (int l = 0; l < kCategoryCount; l++) { for (int i = 0; i < kPatternCount; i++) { outLogLikelihoodPerCategory[u] += cumulativeScaleFactors[i]; u++; } } } return returnCode; }
subq $0x78, %rsp movq %rdi, 0x70(%rsp) movl %esi, 0x6c(%rsp) movl %edx, 0x68(%rsp) movl %ecx, 0x64(%rsp) movq %r8, 0x58(%rsp) movq 0x70(%rsp), %rax movq %rax, (%rsp) movl $0x0, 0x54(%rsp) movq 0xb0(%rax), %rcx movslq 0x6c(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movq %rcx, 0x48(%rsp) movq 0xa8(%rax), %rax movslq 0x68(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x40(%rsp) movl $0x0, 0x3c(%rsp) movl $0x0, 0x38(%rsp) movl $0x0, 0x34(%rsp) movq (%rsp), %rcx movl 0x34(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x71cef movl $0x0, 0x30(%rsp) movq (%rsp), %rcx movl 0x30(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x71cdd vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x28(%rsp) movl $0x0, 0x24(%rsp) movq (%rsp), %rcx movl 0x24(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x71c9d movq 0x48(%rsp), %rax movslq 0x38(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x40(%rsp), %rax movslq 0x24(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x28(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x28(%rsp) movl 0x38(%rsp), %eax addl $0x1, %eax movl %eax, 0x38(%rsp) movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) jmp 0x71c49 vmovsd 0x28(%rsp), %xmm0 callq 0x61460 movq 0x58(%rsp), %rax movslq 0x3c(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x3c(%rsp), %eax addl $0x1, %eax movl %eax, 0x3c(%rsp) movl 0x38(%rsp), %eax addl $0x0, %eax movl %eax, 0x38(%rsp) movl 0x30(%rsp), %eax addl $0x1, %eax movl %eax, 0x30(%rsp) jmp 0x71c26 jmp 0x71cdf movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) jmp 0x71c0d cmpl $0x0, 0x64(%rsp) jl 0x71d91 movq (%rsp), %rax movq 0xc0(%rax), %rax movslq 0x64(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x18(%rsp) movl $0x0, 0x14(%rsp) movl $0x0, 0x10(%rsp) movq (%rsp), %rcx movl 0x10(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x71d8f movl $0x0, 0xc(%rsp) movq (%rsp), %rcx movl 0xc(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x71d80 movq 0x18(%rsp), %rax movslq 0xc(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x58(%rsp), %rax movslq 0x14(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x71d38 jmp 0x71d82 movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0x71d23 jmp 0x71d91 movl 0x54(%rsp), %eax addq $0x78, %rsp retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPU4StateSSEImpl<double, 2, 0>::calcEdgeLogLikelihoodsByPartition(int const*, int const*, int const*, int const*, int const*, int const*, int const*, int, double*)
BEAGLE_CPU_4_SSE_TEMPLATE void BeagleCPU4StateSSEImpl<BEAGLE_CPU_4_SSE_DOUBLE>::calcEdgeLogLikelihoodsByPartition( const int* parentBufferIndices, const int* childBufferIndices, const int* probabilityIndices, const int* categoryWeightsIndices, const int* stateFrequenciesIndices, const int* cumulativeScaleIndices, const int* partitionIndices, int partitionCount, double* outSumLogLikelihoodByPartition) { double* cl_p = integrationTmp; for (int p = 0; p < partitionCount; p++) { int pIndex = partitionIndices[p]; int startPattern = gPatternPartitionsStartPatterns[pIndex]; int endPattern = gPatternPartitionsStartPatterns[pIndex + 1]; memset(&cl_p[startPattern*kStateCount], 0, ((endPattern - startPattern) * kStateCount)*sizeof(double)); const int parIndex = parentBufferIndices[p]; const int childIndex = childBufferIndices[p]; const int probIndex = probabilityIndices[p]; const int categoryWeightsIndex = categoryWeightsIndices[p]; const int stateFrequenciesIndex = stateFrequenciesIndices[p]; const int scalingFactorsIndex = cumulativeScaleIndices[p]; assert(parIndex >= kTipCount); const double* cl_r = gPartials[parIndex]; const double* transMatrix = gTransitionMatrices[probIndex]; const double* wt = gCategoryWeights[categoryWeightsIndex]; const double* freqs = gStateFrequencies[stateFrequenciesIndex]; if (childIndex < kTipCount && gTipStates[childIndex]) { // Integrate against a state at the child const int* statesChild = gTipStates[childIndex]; int w = 0; V_Real *vcl_r = (V_Real *) (cl_r + startPattern * 4); for(int l = 0; l < kCategoryCount; l++) { VecUnion vu_m[OFFSET][2]; SSE_PREFETCH_MATRIX(transMatrix + w, vu_m) V_Real *vcl_p = (V_Real *) (cl_p + startPattern * 4); for(int k = startPattern; k < endPattern; k++) { const int stateChild = statesChild[k]; V_Real vwt = VEC_SPLAT(wt[l]); V_Real wtdPartials = VEC_MULT(*vcl_r++, vwt); *vcl_p = VEC_MADD(vu_m[stateChild][0].vx, wtdPartials, *vcl_p); vcl_p++; wtdPartials = VEC_MULT(*vcl_r++, vwt); *vcl_p = VEC_MADD(vu_m[stateChild][1].vx, wtdPartials, *vcl_p); vcl_p++; } w += OFFSET*4; vcl_r += 2 * kExtraPatterns; vcl_r += ((kPatternCount - endPattern) + startPattern) * 2; } } else { // Integrate against a partial at the child const double* cl_q = gPartials[childIndex]; V_Real * vcl_r = (V_Real *) (cl_r + startPattern * 4); int v = startPattern * 4; int w = 0; for(int l = 0; l < kCategoryCount; l++) { V_Real * vcl_p = (V_Real *) (cl_p + startPattern * 4); VecUnion vu_m[OFFSET][2]; SSE_PREFETCH_MATRIX(transMatrix + w, vu_m) for(int k = startPattern; k < endPattern; k++) { V_Real vclp_01, vclp_23; V_Real vwt = VEC_SPLAT(wt[l]); V_Real vcl_q0, vcl_q1, vcl_q2, vcl_q3; SSE_PREFETCH_PARTIALS(vcl_q,cl_q,v); vclp_01 = VEC_MULT(vcl_q0, vu_m[0][0].vx); vclp_01 = VEC_MADD(vcl_q1, vu_m[1][0].vx, vclp_01); vclp_01 = VEC_MADD(vcl_q2, vu_m[2][0].vx, vclp_01); vclp_01 = VEC_MADD(vcl_q3, vu_m[3][0].vx, vclp_01); vclp_23 = VEC_MULT(vcl_q0, vu_m[0][1].vx); vclp_23 = VEC_MADD(vcl_q1, vu_m[1][1].vx, vclp_23); vclp_23 = VEC_MADD(vcl_q2, vu_m[2][1].vx, vclp_23); vclp_23 = VEC_MADD(vcl_q3, vu_m[3][1].vx, vclp_23); vclp_01 = VEC_MULT(vclp_01, vwt); vclp_23 = VEC_MULT(vclp_23, vwt); *vcl_p = VEC_MADD(vclp_01, *vcl_r++, *vcl_p); vcl_p++; *vcl_p = VEC_MADD(vclp_23, *vcl_r++, *vcl_p); vcl_p++; v += 4; } w += 4*OFFSET; if (kExtraPatterns) { vcl_r += 2 * kExtraPatterns; v += 4 * kExtraPatterns; } vcl_r += ((kPatternCount - endPattern) + startPattern) * 2; v += ((kPatternCount - endPattern) + startPattern) * 4; } } int u = startPattern * 4; for(int k = startPattern; k < endPattern; k++) { double sumOverI = 0.0; for(int i = 0; i < kStateCount; i++) { sumOverI += freqs[i] * cl_p[u]; u++; } outLogLikelihoodsTmp[k] = log(sumOverI); } if (scalingFactorsIndex != BEAGLE_OP_NONE) { const double* scalingFactors = gScaleBuffers[scalingFactorsIndex]; for(int k=startPattern; k < endPattern; k++) outLogLikelihoodsTmp[k] += scalingFactors[k]; } outSumLogLikelihoodByPartition[p] = 0.0; for (int i = startPattern; i < endPattern; i++) { outSumLogLikelihoodByPartition[p] += outLogLikelihoodsTmp[i] * gPatternWeights[i]; } } }
subq $0x6f8, %rsp # imm = 0x6F8 movq 0x718(%rsp), %rax movl 0x710(%rsp), %eax movq 0x708(%rsp), %rax movq 0x700(%rsp), %rax movq %rdi, 0x358(%rsp) movq %rsi, 0x350(%rsp) movq %rdx, 0x348(%rsp) movq %rcx, 0x340(%rsp) movq %r8, 0x338(%rsp) movq %r9, 0x330(%rsp) movq 0x358(%rsp), %rax movq %rax, (%rsp) movq 0xe0(%rax), %rax movq %rax, 0x328(%rsp) movl $0x0, 0x324(%rsp) movl 0x324(%rsp), %eax cmpl 0x710(%rsp), %eax jge 0x75b60 movq (%rsp), %rcx movq 0x708(%rsp), %rax movslq 0x324(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x320(%rsp) movq 0x90(%rcx), %rax movslq 0x320(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x31c(%rsp) movq 0x90(%rcx), %rax movl 0x320(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x318(%rsp) movq 0x328(%rsp), %rdi movl 0x31c(%rsp), %eax imull 0x24(%rcx), %eax cltq shlq $0x3, %rax addq %rax, %rdi movl 0x318(%rsp), %eax subl 0x31c(%rsp), %eax imull 0x24(%rcx), %eax movslq %eax, %rdx shlq $0x3, %rdx xorl %esi, %esi callq 0x60760 movq (%rsp), %rcx movq 0x350(%rsp), %rax movslq 0x324(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x314(%rsp) movq 0x348(%rsp), %rax movslq 0x324(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x310(%rsp) movq 0x340(%rsp), %rax movslq 0x324(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x30c(%rsp) movq 0x338(%rsp), %rax movslq 0x324(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x308(%rsp) movq 0x330(%rsp), %rax movslq 0x324(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x304(%rsp) movq 0x700(%rsp), %rax movslq 0x324(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x300(%rsp) movq 0xb0(%rcx), %rax movslq 0x314(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x2f8(%rsp) movq 0xd8(%rcx), %rax movslq 0x30c(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x2f0(%rsp) movq 0xa0(%rcx), %rax movslq 0x308(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x2e8(%rsp) movq 0xa8(%rcx), %rax movslq 0x304(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x2e0(%rsp) movl 0x310(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0x75166 movq (%rsp), %rax movq 0xb8(%rax), %rax movslq 0x310(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) je 0x75166 movq (%rsp), %rax movq 0xb8(%rax), %rax movslq 0x310(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x2d8(%rsp) movl $0x0, 0x2d4(%rsp) movq 0x2f8(%rsp), %rax movl 0x31c(%rsp), %ecx shll $0x2, %ecx movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x2c8(%rsp) movl $0x0, 0x2c4(%rsp) movq (%rsp), %rcx movl 0x2c4(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x75161 movq 0x2f0(%rsp), %rax movslq 0x2d4(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x1f8(%rsp) movl $0x0, 0x1f4(%rsp) cmpl $0x6, 0x1f4(%rsp) jge 0x74e5f movq 0x1f8(%rsp), %rax vmovsd (%rax), %xmm0 movslq 0x1f4(%rsp), %rcx leaq 0x200(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, (%rax) movq 0x1f8(%rsp), %rax vmovsd 0x30(%rax), %xmm0 movslq 0x1f4(%rsp), %rcx leaq 0x200(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x8(%rax) movq 0x1f8(%rsp), %rax vmovsd 0x60(%rax), %xmm0 movslq 0x1f4(%rsp), %rcx leaq 0x200(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x10(%rax) movq 0x1f8(%rsp), %rax vmovsd 0x90(%rax), %xmm0 movslq 0x1f4(%rsp), %rcx leaq 0x200(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x18(%rax) movl 0x1f4(%rsp), %eax addl $0x1, %eax movl %eax, 0x1f4(%rsp) movq 0x1f8(%rsp), %rax addq $0x8, %rax movq %rax, 0x1f8(%rsp) jmp 0x74d82 movq 0x328(%rsp), %rax movl 0x31c(%rsp), %ecx shll $0x2, %ecx movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x1e8(%rsp) movl 0x31c(%rsp), %eax movl %eax, 0x1e4(%rsp) movl 0x1e4(%rsp), %eax cmpl 0x318(%rsp), %eax jge 0x750ea movq 0x2d8(%rsp), %rax movslq 0x1e4(%rsp), %rcx movl (%rax,%rcx,4), %eax movl %eax, 0x1e0(%rsp) movq 0x2e8(%rsp), %rax movslq 0x2c4(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x398(%rsp) vmovddup 0x398(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x380(%rsp) vmovapd 0x380(%rsp), %xmm0 vmovapd %xmm0, 0x1d0(%rsp) movq 0x2c8(%rsp), %rax movq %rax, %rcx addq $0x10, %rcx movq %rcx, 0x2c8(%rsp) vmovapd (%rax), %xmm1 vmovapd 0x1d0(%rsp), %xmm0 vmovapd %xmm1, 0x590(%rsp) vmovapd %xmm0, 0x580(%rsp) vmovapd 0x590(%rsp), %xmm0 vmulpd 0x580(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x1c0(%rsp) movslq 0x1e0(%rsp), %rcx leaq 0x200(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovapd (%rax), %xmm1 vmovapd 0x1c0(%rsp), %xmm0 vmovapd %xmm1, 0x570(%rsp) vmovapd %xmm0, 0x560(%rsp) vmovapd 0x570(%rsp), %xmm0 vmulpd 0x560(%rsp), %xmm0, %xmm1 movq 0x1e8(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm1, 0x6d0(%rsp) vmovapd %xmm0, 0x6c0(%rsp) vmovapd 0x6d0(%rsp), %xmm0 vaddpd 0x6c0(%rsp), %xmm0, %xmm0 movq 0x1e8(%rsp), %rax vmovapd %xmm0, (%rax) movq 0x1e8(%rsp), %rax addq $0x10, %rax movq %rax, 0x1e8(%rsp) movq 0x2c8(%rsp), %rax movq %rax, %rcx addq $0x10, %rcx movq %rcx, 0x2c8(%rsp) vmovapd (%rax), %xmm1 vmovapd 0x1d0(%rsp), %xmm0 vmovapd %xmm1, 0x550(%rsp) vmovapd %xmm0, 0x540(%rsp) vmovapd 0x550(%rsp), %xmm0 vmulpd 0x540(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0x1c0(%rsp) movslq 0x1e0(%rsp), %rcx leaq 0x200(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovapd 0x10(%rax), %xmm1 vmovapd 0x1c0(%rsp), %xmm0 vmovapd %xmm1, 0x530(%rsp) vmovapd %xmm0, 0x520(%rsp) vmovapd 0x530(%rsp), %xmm0 vmulpd 0x520(%rsp), %xmm0, %xmm1 movq 0x1e8(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm1, 0x6b0(%rsp) vmovapd %xmm0, 0x6a0(%rsp) vmovapd 0x6b0(%rsp), %xmm0 vaddpd 0x6a0(%rsp), %xmm0, %xmm0 movq 0x1e8(%rsp), %rax vmovapd %xmm0, (%rax) movq 0x1e8(%rsp), %rax addq $0x10, %rax movq %rax, 0x1e8(%rsp) movl 0x1e4(%rsp), %eax addl $0x1, %eax movl %eax, 0x1e4(%rsp) jmp 0x74e91 movq (%rsp), %rax movl 0x2d4(%rsp), %ecx addl $0x18, %ecx movl %ecx, 0x2d4(%rsp) movl 0x1c(%rax), %edx shll %edx movq 0x2c8(%rsp), %rcx movslq %edx, %rdx shlq $0x4, %rdx addq %rdx, %rcx movq %rcx, 0x2c8(%rsp) movl 0x14(%rax), %ecx subl 0x318(%rsp), %ecx addl 0x31c(%rsp), %ecx shll %ecx movq 0x2c8(%rsp), %rax movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x2c8(%rsp) movl 0x2c4(%rsp), %eax addl $0x1, %eax movl %eax, 0x2c4(%rsp) jmp 0x74d44 jmp 0x75989 movq (%rsp), %rax movq 0xb0(%rax), %rax movslq 0x310(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x1b8(%rsp) movq 0x2f8(%rsp), %rax movl 0x31c(%rsp), %ecx shll $0x2, %ecx movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x1b0(%rsp) movl 0x31c(%rsp), %eax shll $0x2, %eax movl %eax, 0x1ac(%rsp) movl $0x0, 0x1a8(%rsp) movl $0x0, 0x1a4(%rsp) movq (%rsp), %rcx movl 0x1a4(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x75987 movq 0x328(%rsp), %rax movl 0x31c(%rsp), %ecx shll $0x2, %ecx movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x198(%rsp) movq 0x2f0(%rsp), %rax movslq 0x1a8(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0xc8(%rsp) movl $0x0, 0xc4(%rsp) cmpl $0x6, 0xc4(%rsp) jge 0x7530f movq 0xc8(%rsp), %rax vmovsd (%rax), %xmm0 movslq 0xc4(%rsp), %rcx leaq 0xd0(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, (%rax) movq 0xc8(%rsp), %rax vmovsd 0x30(%rax), %xmm0 movslq 0xc4(%rsp), %rcx leaq 0xd0(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x8(%rax) movq 0xc8(%rsp), %rax vmovsd 0x60(%rax), %xmm0 movslq 0xc4(%rsp), %rcx leaq 0xd0(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x10(%rax) movq 0xc8(%rsp), %rax vmovsd 0x90(%rax), %xmm0 movslq 0xc4(%rsp), %rcx leaq 0xd0(%rsp), %rax shlq $0x5, %rcx addq %rcx, %rax vmovsd %xmm0, 0x18(%rax) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) movq 0xc8(%rsp), %rax addq $0x8, %rax movq %rax, 0xc8(%rsp) jmp 0x75232 movl 0x31c(%rsp), %eax movl %eax, 0xc0(%rsp) movl 0xc0(%rsp), %eax cmpl 0x318(%rsp), %eax jge 0x758cc movq 0x2e8(%rsp), %rax movslq 0x1a4(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x378(%rsp) vmovddup 0x378(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x360(%rsp) vmovapd 0x360(%rsp), %xmm0 vmovapd %xmm0, 0x90(%rsp) movq 0x1b8(%rsp), %rax movslq 0x1ac(%rsp), %rcx leaq (%rax,%rcx,8), %rax movq %rax, 0x6f0(%rsp) movq 0x6f0(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, 0x40(%rsp) movq 0x1b8(%rsp), %rax movslq 0x1ac(%rsp), %rcx leaq 0x10(%rax,%rcx,8), %rax movq %rax, 0x6e8(%rsp) movq 0x6e8(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, 0x30(%rsp) vmovddup 0x40(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x80(%rsp) vmovddup 0x48(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x70(%rsp) vmovddup 0x30(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x60(%rsp) vmovddup 0x38(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x50(%rsp) vmovapd 0x80(%rsp), %xmm1 vmovapd 0xd0(%rsp), %xmm0 vmovapd %xmm1, 0x510(%rsp) vmovapd %xmm0, 0x500(%rsp) vmovapd 0x510(%rsp), %xmm0 vmulpd 0x500(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xb0(%rsp) vmovapd 0x70(%rsp), %xmm1 vmovapd 0xf0(%rsp), %xmm0 vmovapd %xmm1, 0x4f0(%rsp) vmovapd %xmm0, 0x4e0(%rsp) vmovapd 0x4f0(%rsp), %xmm0 vmulpd 0x4e0(%rsp), %xmm0, %xmm1 vmovapd 0xb0(%rsp), %xmm0 vmovapd %xmm1, 0x690(%rsp) vmovapd %xmm0, 0x680(%rsp) vmovapd 0x690(%rsp), %xmm0 vaddpd 0x680(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xb0(%rsp) vmovapd 0x60(%rsp), %xmm1 vmovapd 0x110(%rsp), %xmm0 vmovapd %xmm1, 0x4d0(%rsp) vmovapd %xmm0, 0x4c0(%rsp) vmovapd 0x4d0(%rsp), %xmm0 vmulpd 0x4c0(%rsp), %xmm0, %xmm1 vmovapd 0xb0(%rsp), %xmm0 vmovapd %xmm1, 0x670(%rsp) vmovapd %xmm0, 0x660(%rsp) vmovapd 0x670(%rsp), %xmm0 vaddpd 0x660(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xb0(%rsp) vmovapd 0x50(%rsp), %xmm1 vmovapd 0x130(%rsp), %xmm0 vmovapd %xmm1, 0x4b0(%rsp) vmovapd %xmm0, 0x4a0(%rsp) vmovapd 0x4b0(%rsp), %xmm0 vmulpd 0x4a0(%rsp), %xmm0, %xmm1 vmovapd 0xb0(%rsp), %xmm0 vmovapd %xmm1, 0x650(%rsp) vmovapd %xmm0, 0x640(%rsp) vmovapd 0x650(%rsp), %xmm0 vaddpd 0x640(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xb0(%rsp) vmovapd 0x80(%rsp), %xmm1 vmovapd 0xe0(%rsp), %xmm0 vmovapd %xmm1, 0x490(%rsp) vmovapd %xmm0, 0x480(%rsp) vmovapd 0x490(%rsp), %xmm0 vmulpd 0x480(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xa0(%rsp) vmovapd 0x70(%rsp), %xmm1 vmovapd 0x100(%rsp), %xmm0 vmovapd %xmm1, 0x470(%rsp) vmovapd %xmm0, 0x460(%rsp) vmovapd 0x470(%rsp), %xmm0 vmulpd 0x460(%rsp), %xmm0, %xmm1 vmovapd 0xa0(%rsp), %xmm0 vmovapd %xmm1, 0x630(%rsp) vmovapd %xmm0, 0x620(%rsp) vmovapd 0x630(%rsp), %xmm0 vaddpd 0x620(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xa0(%rsp) vmovapd 0x60(%rsp), %xmm1 vmovapd 0x120(%rsp), %xmm0 vmovapd %xmm1, 0x450(%rsp) vmovapd %xmm0, 0x440(%rsp) vmovapd 0x450(%rsp), %xmm0 vmulpd 0x440(%rsp), %xmm0, %xmm1 vmovapd 0xa0(%rsp), %xmm0 vmovapd %xmm1, 0x610(%rsp) vmovapd %xmm0, 0x600(%rsp) vmovapd 0x610(%rsp), %xmm0 vaddpd 0x600(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xa0(%rsp) vmovapd 0x50(%rsp), %xmm1 vmovapd 0x140(%rsp), %xmm0 vmovapd %xmm1, 0x430(%rsp) vmovapd %xmm0, 0x420(%rsp) vmovapd 0x430(%rsp), %xmm0 vmulpd 0x420(%rsp), %xmm0, %xmm1 vmovapd 0xa0(%rsp), %xmm0 vmovapd %xmm1, 0x5f0(%rsp) vmovapd %xmm0, 0x5e0(%rsp) vmovapd 0x5f0(%rsp), %xmm0 vaddpd 0x5e0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xa0(%rsp) vmovapd 0xb0(%rsp), %xmm1 vmovapd 0x90(%rsp), %xmm0 vmovapd %xmm1, 0x410(%rsp) vmovapd %xmm0, 0x400(%rsp) vmovapd 0x410(%rsp), %xmm0 vmulpd 0x400(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xb0(%rsp) vmovapd 0xa0(%rsp), %xmm1 vmovapd 0x90(%rsp), %xmm0 vmovapd %xmm1, 0x3f0(%rsp) vmovapd %xmm0, 0x3e0(%rsp) vmovapd 0x3f0(%rsp), %xmm0 vmulpd 0x3e0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, 0xa0(%rsp) vmovapd 0xb0(%rsp), %xmm1 movq 0x1b0(%rsp), %rax movq %rax, %rcx addq $0x10, %rcx movq %rcx, 0x1b0(%rsp) vmovapd (%rax), %xmm0 vmovapd %xmm1, 0x3d0(%rsp) vmovapd %xmm0, 0x3c0(%rsp) vmovapd 0x3d0(%rsp), %xmm0 vmulpd 0x3c0(%rsp), %xmm0, %xmm1 movq 0x198(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm1, 0x5d0(%rsp) vmovapd %xmm0, 0x5c0(%rsp) vmovapd 0x5d0(%rsp), %xmm0 vaddpd 0x5c0(%rsp), %xmm0, %xmm0 movq 0x198(%rsp), %rax vmovapd %xmm0, (%rax) movq 0x198(%rsp), %rax addq $0x10, %rax movq %rax, 0x198(%rsp) vmovapd 0xa0(%rsp), %xmm1 movq 0x1b0(%rsp), %rax movq %rax, %rcx addq $0x10, %rcx movq %rcx, 0x1b0(%rsp) vmovapd (%rax), %xmm0 vmovapd %xmm1, 0x3b0(%rsp) vmovapd %xmm0, 0x3a0(%rsp) vmovapd 0x3b0(%rsp), %xmm0 vmulpd 0x3a0(%rsp), %xmm0, %xmm1 movq 0x198(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm1, 0x5b0(%rsp) vmovapd %xmm0, 0x5a0(%rsp) vmovapd 0x5b0(%rsp), %xmm0 vaddpd 0x5a0(%rsp), %xmm0, %xmm0 movq 0x198(%rsp), %rax vmovapd %xmm0, (%rax) movq 0x198(%rsp), %rax addq $0x10, %rax movq %rax, 0x198(%rsp) movl 0x1ac(%rsp), %eax addl $0x4, %eax movl %eax, 0x1ac(%rsp) movl 0xc0(%rsp), %eax addl $0x1, %eax movl %eax, 0xc0(%rsp) jmp 0x7531d movq (%rsp), %rax movl 0x1a8(%rsp), %ecx addl $0x18, %ecx movl %ecx, 0x1a8(%rsp) cmpl $0x0, 0x1c(%rax) je 0x7591e movq (%rsp), %rax movl 0x1c(%rax), %edx shll %edx movq 0x1b0(%rsp), %rcx movslq %edx, %rdx shlq $0x4, %rdx addq %rdx, %rcx movq %rcx, 0x1b0(%rsp) movl 0x1c(%rax), %eax shll $0x2, %eax addl 0x1ac(%rsp), %eax movl %eax, 0x1ac(%rsp) movq (%rsp), %rax movl 0x14(%rax), %edx subl 0x318(%rsp), %edx addl 0x31c(%rsp), %edx shll %edx movq 0x1b0(%rsp), %rcx movslq %edx, %rdx shlq $0x4, %rdx addq %rdx, %rcx movq %rcx, 0x1b0(%rsp) movl 0x14(%rax), %eax subl 0x318(%rsp), %eax addl 0x31c(%rsp), %eax shll $0x2, %eax addl 0x1ac(%rsp), %eax movl %eax, 0x1ac(%rsp) movl 0x1a4(%rsp), %eax addl $0x1, %eax movl %eax, 0x1a4(%rsp) jmp 0x751d0 jmp 0x75989 movl 0x31c(%rsp), %eax shll $0x2, %eax movl %eax, 0x2c(%rsp) movl 0x31c(%rsp), %eax movl %eax, 0x28(%rsp) movl 0x28(%rsp), %eax cmpl 0x318(%rsp), %eax jge 0x75a4f vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x20(%rsp) movl $0x0, 0x1c(%rsp) movq (%rsp), %rcx movl 0x1c(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x75a1f movq 0x2e0(%rsp), %rax movslq 0x1c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x328(%rsp), %rax movslq 0x2c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x20(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x20(%rsp) movl 0x2c(%rsp), %eax addl $0x1, %eax movl %eax, 0x2c(%rsp) movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x759c5 vmovsd 0x20(%rsp), %xmm0 callq 0x61460 movq (%rsp), %rax movq 0x110(%rax), %rax movslq 0x28(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x28(%rsp), %eax addl $0x1, %eax movl %eax, 0x28(%rsp) jmp 0x759a2 cmpl $-0x1, 0x300(%rsp) je 0x75ac5 movq (%rsp), %rax movq 0xc0(%rax), %rax movslq 0x300(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x10(%rsp) movl 0x31c(%rsp), %eax movl %eax, 0xc(%rsp) movl 0xc(%rsp), %eax cmpl 0x318(%rsp), %eax jge 0x75ac3 movq (%rsp), %rax movq 0x10(%rsp), %rcx movslq 0xc(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x110(%rax), %rax movslq 0xc(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0x75a80 jmp 0x75ac5 movq 0x718(%rsp), %rax movslq 0x324(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x31c(%rsp), %eax movl %eax, 0x8(%rsp) movl 0x8(%rsp), %eax cmpl 0x318(%rsp), %eax jge 0x75b48 movq (%rsp), %rax movq 0x110(%rax), %rcx movslq 0x8(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x80(%rax), %rax movslq 0x8(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x718(%rsp), %rax movslq 0x324(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0x8(%rsp), %eax addl $0x1, %eax movl %eax, 0x8(%rsp) jmp 0x75ae9 jmp 0x75b4a movl 0x324(%rsp), %eax addl $0x1, %eax movl %eax, 0x324(%rsp) jmp 0x74b0c addq $0x6f8, %rsp # imm = 0x6F8 retq nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateSSEImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::calcEdgeLogLikelihoodsSecondDerivByPartition(int const*, int const*, int const*, int const*, int const*, int const*, int const*, int const*, int const*, int, double*, double*, double*)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcEdgeLogLikelihoodsSecondDerivByPartition( const int* parentBufferIndices, const int* childBufferIndices, const int* probabilityIndices, const int* firstDerivativeIndices, const int* secondDerivativeIndices, const int* categoryWeightsIndices, const int* stateFrequenciesIndices, const int* cumulativeScaleIndices, const int* partitionIndices, int partitionCount, double* outSumLogLikelihoodByPartition, double* outSumFirstDerivativeByPartition, double* outSumSecondDerivativeByPartition) { for (int p = 0; p < partitionCount; p++) { int pIndex = partitionIndices[p]; int startPattern = gPatternPartitionsStartPatterns[pIndex]; int endPattern = gPatternPartitionsStartPatterns[pIndex + 1]; memset(&integrationTmp[startPattern*kStateCount], 0, ((endPattern - startPattern) * kStateCount)*sizeof(REALTYPE)); memset(&firstDerivTmp[startPattern*kStateCount], 0, ((endPattern - startPattern) * kStateCount)*sizeof(REALTYPE)); memset(&secondDerivTmp[startPattern*kStateCount], 0, ((endPattern - startPattern) * kStateCount)*sizeof(REALTYPE)); const int parIndex = parentBufferIndices[p]; const int childIndex = childBufferIndices[p]; const int probIndex = probabilityIndices[p]; const int firstDerivativeIndex = firstDerivativeIndices[p]; const int secondDerivativeIndex = secondDerivativeIndices[p]; const int categoryWeightsIndex = categoryWeightsIndices[p]; const int stateFrequenciesIndex = stateFrequenciesIndices[p]; const int scalingFactorsIndex = cumulativeScaleIndices[p]; assert(parIndex >= kTipCount); const REALTYPE* partialsParent = gPartials[parIndex]; const REALTYPE* transMatrix = gTransitionMatrices[probIndex]; const REALTYPE* firstDerivMatrix = gTransitionMatrices[firstDerivativeIndex]; const REALTYPE* secondDerivMatrix = gTransitionMatrices[secondDerivativeIndex]; const REALTYPE* wt = gCategoryWeights[categoryWeightsIndex]; const REALTYPE* freqs = gStateFrequencies[stateFrequenciesIndex]; if (childIndex < kTipCount && gTipStates[childIndex]) { // Integrate against a state at the child const int* statesChild = gTipStates[childIndex]; int v = startPattern * kPartialsPaddedStateCount; // Index for parent partials for(int l = 0; l < kCategoryCount; l++) { int u = startPattern * kStateCount; // Index in resulting product-partials (summed over categories) const REALTYPE weight = wt[l]; for(int k = startPattern; k < endPattern; k++) { const int stateChild = statesChild[k]; // DISCUSSION PT: Does it make sense to change the order of the partials, // so we can interchange the patterCount and categoryCount loop order? int w = l * kMatrixSize; for(int i = 0; i < kStateCount; i++) { integrationTmp[u] += transMatrix[w + stateChild] * partialsParent[v + i] * weight; firstDerivTmp[u] += firstDerivMatrix[w + stateChild] * partialsParent[v + i] * weight; secondDerivTmp[u] += secondDerivMatrix[w + stateChild] * partialsParent[v + i] * weight; u++; w += kTransPaddedStateCount; } v += kPartialsPaddedStateCount; } v += ((kPatternCount - endPattern) + startPattern) * kPartialsPaddedStateCount; } } else { // Integrate against a partial at the child const REALTYPE* partialsChild = gPartials[childIndex]; int v = startPattern * kPartialsPaddedStateCount; for(int l = 0; l < kCategoryCount; l++) { int u = startPattern * kStateCount; const REALTYPE weight = wt[l]; for(int k = startPattern; k < endPattern; k++) { int w = l * kMatrixSize; for(int i = 0; i < kStateCount; i++) { double sumOverJ = 0.0; double sumOverJD1 = 0.0; double sumOverJD2 = 0.0; for(int j = 0; j < kStateCount; j++) { sumOverJ += transMatrix[w] * partialsChild[v + j]; sumOverJD1 += firstDerivMatrix[w] * partialsChild[v + j]; sumOverJD2 += secondDerivMatrix[w] * partialsChild[v + j]; w++; } // increment for the extra column at the end w += T_PAD; integrationTmp[u] += sumOverJ * partialsParent[v + i] * weight; firstDerivTmp[u] += sumOverJD1 * partialsParent[v + i] * weight; secondDerivTmp[u] += sumOverJD2 * partialsParent[v + i] * weight; u++; } v += kPartialsPaddedStateCount; } v += ((kPatternCount - endPattern) + startPattern) * kPartialsPaddedStateCount; } } int u = startPattern * kStateCount; for(int k = startPattern; k < endPattern; k++) { REALTYPE sumOverI = 0.0; REALTYPE sumOverID1 = 0.0; REALTYPE sumOverID2 = 0.0; for(int i = 0; i < kStateCount; i++) { sumOverI += freqs[i] * integrationTmp[u]; sumOverID1 += freqs[i] * firstDerivTmp[u]; sumOverID2 += freqs[i] * secondDerivTmp[u]; u++; } outLogLikelihoodsTmp[k] = log(sumOverI); outFirstDerivativesTmp[k] = sumOverID1 / sumOverI; outSecondDerivativesTmp[k] = sumOverID2 / sumOverI - outFirstDerivativesTmp[k] * outFirstDerivativesTmp[k]; } if (scalingFactorsIndex != BEAGLE_OP_NONE) { const REALTYPE* scalingFactors = gScaleBuffers[scalingFactorsIndex]; for(int k=startPattern; k < endPattern; k++) outLogLikelihoodsTmp[k] += scalingFactors[k]; } outSumLogLikelihoodByPartition[p] = 0.0; outSumFirstDerivativeByPartition[p] = 0.0; outSumSecondDerivativeByPartition[p] = 0.0; for (int i = startPattern; i < endPattern; i++) { outSumLogLikelihoodByPartition[p] += outLogLikelihoodsTmp[i] * gPatternWeights[i]; outSumFirstDerivativeByPartition[p] += outFirstDerivativesTmp[i] * gPatternWeights[i]; outSumSecondDerivativeByPartition[p] += outSecondDerivativesTmp[i] * gPatternWeights[i]; } } }
subq $0x158, %rsp # imm = 0x158 movq 0x198(%rsp), %rax movq 0x190(%rsp), %rax movq 0x188(%rsp), %rax movl 0x180(%rsp), %eax movq 0x178(%rsp), %rax movq 0x170(%rsp), %rax movq 0x168(%rsp), %rax movq 0x160(%rsp), %rax movq %rdi, 0x150(%rsp) movq %rsi, 0x148(%rsp) movq %rdx, 0x140(%rsp) movq %rcx, 0x138(%rsp) movq %r8, 0x130(%rsp) movq %r9, 0x128(%rsp) movq 0x150(%rsp), %rax movq %rax, 0x8(%rsp) movl $0x0, 0x124(%rsp) movl 0x124(%rsp), %eax cmpl 0x180(%rsp), %eax jge 0x76875 movq 0x8(%rsp), %rcx movq 0x178(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x120(%rsp) movq 0x90(%rcx), %rax movslq 0x120(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x11c(%rsp) movq 0x90(%rcx), %rax movl 0x120(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x118(%rsp) movq 0xe0(%rcx), %rdi movl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax cltq shlq $0x3, %rax addq %rax, %rdi movl 0x118(%rsp), %eax subl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax movslq %eax, %rdx shlq $0x3, %rdx xorl %esi, %esi callq 0x60760 movq 0x8(%rsp), %rcx movq 0xe8(%rcx), %rdi movl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax cltq shlq $0x3, %rax addq %rax, %rdi movl 0x118(%rsp), %eax subl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax movslq %eax, %rdx shlq $0x3, %rdx xorl %esi, %esi callq 0x60760 movq 0x8(%rsp), %rcx movq 0xf0(%rcx), %rdi movl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax cltq shlq $0x3, %rax addq %rax, %rdi movl 0x118(%rsp), %eax subl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax movslq %eax, %rdx shlq $0x3, %rdx xorl %esi, %esi callq 0x60760 movq 0x8(%rsp), %rcx movq 0x148(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x114(%rsp) movq 0x140(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x110(%rsp) movq 0x138(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x10c(%rsp) movq 0x130(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x108(%rsp) movq 0x128(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x104(%rsp) movq 0x160(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x100(%rsp) movq 0x168(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xfc(%rsp) movq 0x170(%rsp), %rax movslq 0x124(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xf8(%rsp) movq 0xb0(%rcx), %rax movslq 0x114(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xf0(%rsp) movq 0xd8(%rcx), %rax movslq 0x10c(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xe8(%rsp) movq 0xd8(%rcx), %rax movslq 0x108(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xe0(%rsp) movq 0xd8(%rcx), %rax movslq 0x104(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xd8(%rsp) movq 0xa0(%rcx), %rax movslq 0x100(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xd0(%rsp) movq 0xa8(%rcx), %rax movslq 0xfc(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xc8(%rsp) movl 0x110(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0x76198 movq 0x8(%rsp), %rax movq 0xb8(%rax), %rax movslq 0x110(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) je 0x76198 movq 0x8(%rsp), %rcx movq 0xb8(%rcx), %rax movslq 0x110(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xc0(%rsp) movl 0x11c(%rsp), %eax imull 0x2c(%rcx), %eax movl %eax, 0xbc(%rsp) movl $0x0, 0xb8(%rsp) movq 0x8(%rsp), %rcx movl 0xb8(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x76193 movq 0x8(%rsp), %rcx movl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax movl %eax, 0xb4(%rsp) movq 0xd0(%rsp), %rax movslq 0xb8(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xa8(%rsp) movl 0x11c(%rsp), %eax movl %eax, 0xa4(%rsp) movl 0xa4(%rsp), %eax cmpl 0x118(%rsp), %eax jge 0x76155 movq 0x8(%rsp), %rcx movq 0xc0(%rsp), %rax movslq 0xa4(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xa0(%rsp) movl 0xb8(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, 0x9c(%rsp) movl $0x0, 0x98(%rsp) movq 0x8(%rsp), %rcx movl 0x98(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x76129 movq 0x8(%rsp), %rax movq 0xe8(%rsp), %rcx movl 0x9c(%rsp), %edx addl 0xa0(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf0(%rsp), %rcx movl 0xbc(%rsp), %edx addl 0x98(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xa8(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0xb4(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0xe0(%rsp), %rcx movl 0x9c(%rsp), %edx addl 0xa0(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf0(%rsp), %rcx movl 0xbc(%rsp), %edx addl 0x98(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xa8(%rsp), %xmm0 movq 0xe8(%rax), %rcx movslq 0xb4(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0xd8(%rsp), %rcx movl 0x9c(%rsp), %edx addl 0xa0(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf0(%rsp), %rcx movl 0xbc(%rsp), %edx addl 0x98(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xa8(%rsp), %xmm0 movq 0xf0(%rax), %rcx movslq 0xb4(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movl 0xb4(%rsp), %ecx addl $0x1, %ecx movl %ecx, 0xb4(%rsp) movl 0x28(%rax), %eax addl 0x9c(%rsp), %eax movl %eax, 0x9c(%rsp) movl 0x98(%rsp), %eax addl $0x1, %eax movl %eax, 0x98(%rsp) jmp 0x75fae movq 0x8(%rsp), %rax movl 0x2c(%rax), %eax addl 0xbc(%rsp), %eax movl %eax, 0xbc(%rsp) movl 0xa4(%rsp), %eax addl $0x1, %eax movl %eax, 0xa4(%rsp) jmp 0x75f5e movq 0x8(%rsp), %rcx movl 0x14(%rcx), %eax subl 0x118(%rsp), %eax addl 0x11c(%rsp), %eax imull 0x2c(%rcx), %eax addl 0xbc(%rsp), %eax movl %eax, 0xbc(%rsp) movl 0xb8(%rsp), %eax addl $0x1, %eax movl %eax, 0xb8(%rsp) jmp 0x75f06 jmp 0x764d9 movq 0x8(%rsp), %rcx movq 0xb0(%rcx), %rax movslq 0x110(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x90(%rsp) movl 0x11c(%rsp), %eax imull 0x2c(%rcx), %eax movl %eax, 0x8c(%rsp) movl $0x0, 0x88(%rsp) movq 0x8(%rsp), %rcx movl 0x88(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x764d7 movq 0x8(%rsp), %rcx movl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax movl %eax, 0x84(%rsp) movq 0xd0(%rsp), %rax movslq 0x88(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x78(%rsp) movl 0x11c(%rsp), %eax movl %eax, 0x74(%rsp) movl 0x74(%rsp), %eax cmpl 0x118(%rsp), %eax jge 0x76499 movq 0x8(%rsp), %rcx movl 0x88(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, 0x70(%rsp) movl $0x0, 0x6c(%rsp) movq 0x8(%rsp), %rcx movl 0x6c(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x76473 vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x60(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x58(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x50(%rsp) movl $0x0, 0x4c(%rsp) movq 0x8(%rsp), %rcx movl 0x4c(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x76373 movq 0xe8(%rsp), %rax movslq 0x70(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x90(%rsp), %rax movl 0x8c(%rsp), %ecx addl 0x4c(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x60(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x60(%rsp) movq 0xe0(%rsp), %rax movslq 0x70(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x90(%rsp), %rax movl 0x8c(%rsp), %ecx addl 0x4c(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x58(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x58(%rsp) movq 0xd8(%rsp), %rax movslq 0x70(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x90(%rsp), %rax movl 0x8c(%rsp), %ecx addl 0x4c(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x50(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x50(%rsp) movl 0x70(%rsp), %eax addl $0x1, %eax movl %eax, 0x70(%rsp) movl 0x4c(%rsp), %eax addl $0x1, %eax movl %eax, 0x4c(%rsp) jmp 0x7628c movq 0x8(%rsp), %rax movl 0x70(%rsp), %ecx addl $0x2, %ecx movl %ecx, 0x70(%rsp) vmovsd 0x60(%rsp), %xmm0 movq 0xf0(%rsp), %rcx movl 0x8c(%rsp), %edx addl 0x6c(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x78(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0x84(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x58(%rsp), %xmm0 movq 0xf0(%rsp), %rcx movl 0x8c(%rsp), %edx addl 0x6c(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x78(%rsp), %xmm0 movq 0xe8(%rax), %rcx movslq 0x84(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x50(%rsp), %xmm0 movq 0xf0(%rsp), %rcx movl 0x8c(%rsp), %edx addl 0x6c(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x78(%rsp), %xmm0 movq 0xf0(%rax), %rax movslq 0x84(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0x84(%rsp), %eax addl $0x1, %eax movl %eax, 0x84(%rsp) movl 0x6c(%rsp), %eax addl $0x1, %eax movl %eax, 0x6c(%rsp) jmp 0x76254 movq 0x8(%rsp), %rax movl 0x2c(%rax), %eax addl 0x8c(%rsp), %eax movl %eax, 0x8c(%rsp) movl 0x74(%rsp), %eax addl $0x1, %eax movl %eax, 0x74(%rsp) jmp 0x76227 movq 0x8(%rsp), %rcx movl 0x14(%rcx), %eax subl 0x118(%rsp), %eax addl 0x11c(%rsp), %eax imull 0x2c(%rcx), %eax addl 0x8c(%rsp), %eax movl %eax, 0x8c(%rsp) movl 0x88(%rsp), %eax addl $0x1, %eax movl %eax, 0x88(%rsp) jmp 0x761d5 jmp 0x764d9 movq 0x8(%rsp), %rcx movl 0x11c(%rsp), %eax imull 0x24(%rcx), %eax movl %eax, 0x48(%rsp) movl 0x11c(%rsp), %eax movl %eax, 0x44(%rsp) movl 0x44(%rsp), %eax cmpl 0x118(%rsp), %eax jge 0x766a6 vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x38(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x30(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x28(%rsp) movl $0x0, 0x24(%rsp) movq 0x8(%rsp), %rcx movl 0x24(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x765fd movq 0x8(%rsp), %rax movq 0xc8(%rsp), %rcx movslq 0x24(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0xe0(%rax), %rcx movslq 0x48(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd 0x38(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x38(%rsp) movq 0xc8(%rsp), %rcx movslq 0x24(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0xe8(%rax), %rcx movslq 0x48(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd 0x30(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x30(%rsp) movq 0xc8(%rsp), %rcx movslq 0x24(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0xf0(%rax), %rax movslq 0x48(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x28(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x28(%rsp) movl 0x48(%rsp), %eax addl $0x1, %eax movl %eax, 0x48(%rsp) movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) jmp 0x7652f vmovsd 0x38(%rsp), %xmm0 callq 0x61460 movq 0x8(%rsp), %rax movq 0x110(%rax), %rcx movslq 0x44(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x30(%rsp), %xmm0 vdivsd 0x38(%rsp), %xmm0, %xmm0 movq 0x118(%rax), %rcx movslq 0x44(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x28(%rsp), %xmm0 vdivsd 0x38(%rsp), %xmm0, %xmm2 movq 0x118(%rax), %rcx movslq 0x44(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x118(%rax), %rcx movslq 0x44(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovq %xmm1, %rcx movabsq $-0x8000000000000000, %rdx # imm = 0x8000000000000000 xorq %rdx, %rcx vmovq %rcx, %xmm1 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 movq 0x120(%rax), %rax movslq 0x44(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x44(%rsp), %eax addl $0x1, %eax movl %eax, 0x44(%rsp) jmp 0x764f8 cmpl $-0x1, 0xf8(%rsp) je 0x7671e movq 0x8(%rsp), %rax movq 0xc0(%rax), %rax movslq 0xf8(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x18(%rsp) movl 0x11c(%rsp), %eax movl %eax, 0x14(%rsp) movl 0x14(%rsp), %eax cmpl 0x118(%rsp), %eax jge 0x7671c movq 0x8(%rsp), %rax movq 0x18(%rsp), %rcx movslq 0x14(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x110(%rax), %rax movslq 0x14(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0x766d8 jmp 0x7671e movq 0x188(%rsp), %rax movslq 0x124(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movq 0x190(%rsp), %rax movslq 0x124(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movq 0x198(%rsp), %rax movslq 0x124(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x11c(%rsp), %eax movl %eax, 0x10(%rsp) movl 0x10(%rsp), %eax cmpl 0x118(%rsp), %eax jge 0x7685d movq 0x8(%rsp), %rax movq 0x110(%rax), %rcx movslq 0x10(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x80(%rax), %rcx movslq 0x10(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x188(%rsp), %rcx movslq 0x124(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0x118(%rax), %rcx movslq 0x10(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x80(%rax), %rcx movslq 0x10(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x190(%rsp), %rcx movslq 0x124(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0x120(%rax), %rcx movslq 0x10(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x80(%rax), %rax movslq 0x10(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x198(%rsp), %rax movslq 0x124(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0x76774 jmp 0x7685f movl 0x124(%rsp), %eax addl $0x1, %eax movl %eax, 0x124(%rsp) jmp 0x75bfe addq $0x158, %rsp # imm = 0x158 retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::calcEdgeLogLikelihoodsMulti(int const*, int const*, int const*, int const*, int const*, int const*, int, double*)
BEAGLE_CPU_TEMPLATE int BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcEdgeLogLikelihoodsMulti(const int* parentBufferIndices, const int* childBufferIndices, const int* probabilityIndices, const int* categoryWeightsIndices, const int* stateFrequenciesIndices, const int* scalingFactorsIndices, int count, double* outSumLogLikelihood) { std::vector<int> indexMaxScale(kPatternCount); std::vector<REALTYPE> maxScaleFactor(kPatternCount); int returnCode = BEAGLE_SUCCESS; for (int subsetIndex = 0 ; subsetIndex < count; ++subsetIndex ) { const REALTYPE* partialsParent = gPartials[parentBufferIndices[subsetIndex]]; const REALTYPE* transMatrix = gTransitionMatrices[probabilityIndices[subsetIndex]]; const REALTYPE* wt = gCategoryWeights[categoryWeightsIndices[subsetIndex]]; const REALTYPE* freqs = gStateFrequencies[stateFrequenciesIndices[subsetIndex]]; int childIndex = childBufferIndices[subsetIndex]; memset(integrationTmp, 0, (kPatternCount * kStateCount)*sizeof(REALTYPE)); if (childIndex < kTipCount && gTipStates[childIndex]) { // Integrate against a state at the child const int* statesChild = gTipStates[childIndex]; int v = 0; // Index for parent partials for(int l = 0; l < kCategoryCount; l++) { int u = 0; // Index in resulting product-partials (summed over categories) const REALTYPE weight = wt[l]; for(int k = 0; k < kPatternCount; k++) { const int stateChild = statesChild[k]; // DISCUSSION PT: Does it make sense to change the order of the partials, // so we can interchange the patterCount and categoryCount loop order? int w = l * kMatrixSize; for(int i = 0; i < kStateCount; i++) { integrationTmp[u] += transMatrix[w + stateChild] * partialsParent[v + i] * weight; u++; w += kTransPaddedStateCount; } v += kPartialsPaddedStateCount; } } } else { const REALTYPE* partialsChild = gPartials[childIndex]; int v = 0; int stateCountModFour = (kStateCount / 4) * 4; for(int l = 0; l < kCategoryCount; l++) { int u = 0; const REALTYPE weight = wt[l]; for(int k = 0; k < kPatternCount; k++) { int w = l * kMatrixSize; const REALTYPE* partialsChildPtr = &partialsChild[v]; for(int i = 0; i < kStateCount; i++) { double sumOverJA = 0.0, sumOverJB = 0.0; int j = 0; const REALTYPE* transMatrixPtr = &transMatrix[w]; for (; j < stateCountModFour; j += 4) { sumOverJA += transMatrixPtr[j + 0] * partialsChildPtr[j + 0]; sumOverJB += transMatrixPtr[j + 1] * partialsChildPtr[j + 1]; sumOverJA += transMatrixPtr[j + 2] * partialsChildPtr[j + 2]; sumOverJB += transMatrixPtr[j + 3] * partialsChildPtr[j + 3]; } for (; j < kStateCount; j++) { sumOverJA += transMatrixPtr[j] * partialsChildPtr[j]; } // for(int j = 0; j < kStateCount; j++) { // sumOverJ += transMatrix[w] * partialsChild[v + j]; // w++; // } integrationTmp[u] += (sumOverJA + sumOverJB) * partialsParent[v + i] * weight; u++; w += kStateCount; // increment for the extra column at the end w += T_PAD; } v += kPartialsPaddedStateCount; } } } int u = 0; for(int k = 0; k < kPatternCount; k++) { REALTYPE sumOverI = 0.0; for(int i = 0; i < kStateCount; i++) { sumOverI += freqs[i] * integrationTmp[u]; u++; } if (scalingFactorsIndices[0] != BEAGLE_OP_NONE) { int cumulativeScalingFactorIndex; cumulativeScalingFactorIndex = scalingFactorsIndices[subsetIndex]; const REALTYPE* cumulativeScaleFactors = gScaleBuffers[cumulativeScalingFactorIndex]; if (subsetIndex == 0) { indexMaxScale[k] = 0; maxScaleFactor[k] = cumulativeScaleFactors[k]; for (int j = 1; j < count; j++) { REALTYPE tmpScaleFactor; tmpScaleFactor = gScaleBuffers[scalingFactorsIndices[j]][k]; if (tmpScaleFactor > maxScaleFactor[k]) { indexMaxScale[k] = j; maxScaleFactor[k] = tmpScaleFactor; } } } if (subsetIndex != indexMaxScale[k]) sumOverI *= exp((REALTYPE)(cumulativeScaleFactors[k] - maxScaleFactor[k])); } if (subsetIndex == 0) { outLogLikelihoodsTmp[k] = sumOverI; } else if (subsetIndex == count - 1) { REALTYPE tmpSum = outLogLikelihoodsTmp[k] + sumOverI; outLogLikelihoodsTmp[k] = log(tmpSum); } else { outLogLikelihoodsTmp[k] += sumOverI; } } } if (scalingFactorsIndices[0] != BEAGLE_OP_NONE) { for(int i=0; i<kPatternCount; i++) outLogLikelihoodsTmp[i] += maxScaleFactor[i]; } *outSumLogLikelihood = 0.0; for (int i = 0; i < kPatternCount; i++) { *outSumLogLikelihood += outLogLikelihoodsTmp[i] * gPatternWeights[i]; } if (*outSumLogLikelihood != *outSumLogLikelihood) returnCode = BEAGLE_ERROR_FLOATING_POINT; return returnCode; }
subq $0x1d8, %rsp # imm = 0x1D8 movq 0x1f0(%rsp), %rax movl 0x1e8(%rsp), %eax movq 0x1e0(%rsp), %rax movq %rdi, 0x1d0(%rsp) movq %rsi, 0x1c8(%rsp) movq %rdx, 0x1c0(%rsp) movq %rcx, 0x1b8(%rsp) movq %r8, 0x1b0(%rsp) movq %r9, 0x1a8(%rsp) movq 0x1d0(%rsp), %rax movq %rax, 0x48(%rsp) movslq 0x14(%rax), %rax movq %rax, 0x50(%rsp) leaq 0x18f(%rsp), %rdi movq %rdi, 0x58(%rsp) callq 0x602b0 movq 0x50(%rsp), %rsi movq 0x58(%rsp), %rdx leaq 0x190(%rsp), %rdi callq 0x5fb40 jmp 0x7690f leaq 0x18f(%rsp), %rdi callq 0x5f4a0 movq 0x48(%rsp), %rax movslq 0x14(%rax), %rax movq %rax, 0x38(%rsp) leaq 0x15f(%rsp), %rdi movq %rdi, 0x40(%rsp) callq 0x63e90 movq 0x38(%rsp), %rsi movq 0x40(%rsp), %rdx leaq 0x160(%rsp), %rdi callq 0x61870 jmp 0x76955 leaq 0x15f(%rsp), %rdi callq 0x60320 movl $0x0, 0x158(%rsp) movl $0x0, 0x154(%rsp) movl 0x154(%rsp), %eax cmpl 0x1e8(%rsp), %eax jge 0x7745b movq 0x48(%rsp), %rcx movq 0xb0(%rcx), %rax movq 0x1c8(%rsp), %rdx movslq 0x154(%rsp), %rsi movslq (%rdx,%rsi,4), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x148(%rsp) movq 0xd8(%rcx), %rax movq 0x1b8(%rsp), %rdx movslq 0x154(%rsp), %rsi movslq (%rdx,%rsi,4), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x140(%rsp) movq 0xa0(%rcx), %rax movq 0x1b0(%rsp), %rdx movslq 0x154(%rsp), %rsi movslq (%rdx,%rsi,4), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x138(%rsp) movq 0xa8(%rcx), %rax movq 0x1a8(%rsp), %rdx movslq 0x154(%rsp), %rsi movslq (%rdx,%rsi,4), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x130(%rsp) movq 0x1c0(%rsp), %rax movslq 0x154(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x12c(%rsp) movq 0xe0(%rcx), %rdi movl 0x14(%rcx), %eax imull 0x24(%rcx), %eax movslq %eax, %rdx shlq $0x3, %rdx xorl %esi, %esi callq 0x60760 movq 0x48(%rsp), %rcx movl 0x12c(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0x76cbe movq 0x48(%rsp), %rax movq 0xb8(%rax), %rax movslq 0x12c(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) je 0x76cbe movq 0x48(%rsp), %rax movq 0xb8(%rax), %rax movslq 0x12c(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x120(%rsp) movl $0x0, 0x11c(%rsp) movl $0x0, 0x118(%rsp) movq 0x48(%rsp), %rcx movl 0x118(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x76cb9 movl $0x0, 0x114(%rsp) movq 0x138(%rsp), %rax movslq 0x118(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x108(%rsp) movl $0x0, 0x104(%rsp) movq 0x48(%rsp), %rcx movl 0x104(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x76ca1 movq 0x48(%rsp), %rcx movq 0x120(%rsp), %rax movslq 0x104(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x100(%rsp) movl 0x118(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, 0xfc(%rsp) movl $0x0, 0xf8(%rsp) movq 0x48(%rsp), %rcx movl 0xf8(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x76c75 movq 0x48(%rsp), %rax movq 0x140(%rsp), %rcx movl 0xfc(%rsp), %edx addl 0x100(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x148(%rsp), %rcx movl 0x11c(%rsp), %edx addl 0xf8(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x108(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0x114(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movl 0x114(%rsp), %ecx addl $0x1, %ecx movl %ecx, 0x114(%rsp) movl 0x28(%rax), %eax addl 0xfc(%rsp), %eax movl %eax, 0xfc(%rsp) movl 0xf8(%rsp), %eax addl $0x1, %eax movl %eax, 0xf8(%rsp) jmp 0x76b67 movq %rax, %rcx movl %edx, %eax movq %rcx, 0x180(%rsp) movl %eax, 0x17c(%rsp) leaq 0x18f(%rsp), %rdi callq 0x5f4a0 jmp 0x7758a movq %rax, %rcx movl %edx, %eax movq %rcx, 0x180(%rsp) movl %eax, 0x17c(%rsp) leaq 0x15f(%rsp), %rdi callq 0x60320 leaq 0x190(%rsp), %rdi callq 0x612a0 jmp 0x7758a movq 0x48(%rsp), %rax movl 0x2c(%rax), %eax addl 0x11c(%rsp), %eax movl %eax, 0x11c(%rsp) movl 0x104(%rsp), %eax addl $0x1, %eax movl %eax, 0x104(%rsp) jmp 0x76b16 jmp 0x76ca3 movl 0x118(%rsp), %eax addl $0x1, %eax movl %eax, 0x118(%rsp) jmp 0x76acd jmp 0x770a9 movq 0x48(%rsp), %rax movq 0xb0(%rax), %rcx movslq 0x12c(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movq %rcx, 0xf0(%rsp) movl $0x0, 0xec(%rsp) movl 0x24(%rax), %eax movl $0x4, %ecx cltd idivl %ecx shll $0x2, %eax movl %eax, 0xe8(%rsp) movl $0x0, 0xe4(%rsp) movq 0x48(%rsp), %rcx movl 0xe4(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x770a7 movl $0x0, 0xe0(%rsp) movq 0x138(%rsp), %rax movslq 0xe4(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xd8(%rsp) movl $0x0, 0xd4(%rsp) movq 0x48(%rsp), %rcx movl 0xd4(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x7708f movq 0x48(%rsp), %rcx movl 0xe4(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, 0xd0(%rsp) movq 0xf0(%rsp), %rax movslq 0xec(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0xc8(%rsp) movl $0x0, 0xc4(%rsp) movq 0x48(%rsp), %rcx movl 0xc4(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x77063 vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0xb8(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0xb0(%rsp) movl $0x0, 0xac(%rsp) movq 0x140(%rsp), %rax movslq 0xd0(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0xa0(%rsp) movl 0xac(%rsp), %eax cmpl 0xe8(%rsp), %eax jge 0x76f57 movq 0xa0(%rsp), %rax movl 0xac(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0xc8(%rsp), %rax movl 0xac(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0xb8(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xb8(%rsp) movq 0xa0(%rsp), %rax movl 0xac(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0xc8(%rsp), %rax movl 0xac(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0xb0(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xb0(%rsp) movq 0xa0(%rsp), %rax movl 0xac(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0xc8(%rsp), %rax movl 0xac(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0xb8(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xb8(%rsp) movq 0xa0(%rsp), %rax movl 0xac(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0xc8(%rsp), %rax movl 0xac(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0xb0(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xb0(%rsp) movl 0xac(%rsp), %eax addl $0x4, %eax movl %eax, 0xac(%rsp) jmp 0x76e01 jmp 0x76f59 movq 0x48(%rsp), %rcx movl 0xac(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x76fbe movq 0xa0(%rsp), %rax movslq 0xac(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0xc8(%rsp), %rax movslq 0xac(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0xb8(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xb8(%rsp) movl 0xac(%rsp), %eax addl $0x1, %eax movl %eax, 0xac(%rsp) jmp 0x76f59 movq 0x48(%rsp), %rax vmovsd 0xb8(%rsp), %xmm0 vaddsd 0xb0(%rsp), %xmm0, %xmm0 movq 0x148(%rsp), %rcx movl 0xec(%rsp), %edx addl 0xc4(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xd8(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0xe0(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movl 0xe0(%rsp), %ecx addl $0x1, %ecx movl %ecx, 0xe0(%rsp) movl 0x24(%rax), %eax addl 0xd0(%rsp), %eax movl %eax, 0xd0(%rsp) movl 0xd0(%rsp), %eax addl $0x2, %eax movl %eax, 0xd0(%rsp) movl 0xc4(%rsp), %eax addl $0x1, %eax movl %eax, 0xc4(%rsp) jmp 0x76da8 movq 0x48(%rsp), %rax movl 0x2c(%rax), %eax addl 0xec(%rsp), %eax movl %eax, 0xec(%rsp) movl 0xd4(%rsp), %eax addl $0x1, %eax movl %eax, 0xd4(%rsp) jmp 0x76d52 jmp 0x77091 movl 0xe4(%rsp), %eax addl $0x1, %eax movl %eax, 0xe4(%rsp) jmp 0x76d09 jmp 0x770a9 movl $0x0, 0x9c(%rsp) movl $0x0, 0x98(%rsp) movq 0x48(%rsp), %rcx movl 0x98(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x77443 vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x90(%rsp) movl $0x0, 0x8c(%rsp) movq 0x48(%rsp), %rcx movl 0x8c(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x77166 movq 0x48(%rsp), %rax movq 0x130(%rsp), %rcx movslq 0x8c(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0xe0(%rax), %rax movslq 0x9c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x90(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x90(%rsp) movl 0x9c(%rsp), %eax addl $0x1, %eax movl %eax, 0x9c(%rsp) movl 0x8c(%rsp), %eax addl $0x1, %eax movl %eax, 0x8c(%rsp) jmp 0x770ec movq 0x1e0(%rsp), %rax cmpl $-0x1, (%rax) je 0x7736e movq 0x48(%rsp), %rax movq 0x1e0(%rsp), %rcx movslq 0x154(%rsp), %rdx movl (%rcx,%rdx,4), %ecx movl %ecx, 0x88(%rsp) movq 0xc0(%rax), %rax movslq 0x88(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x80(%rsp) cmpl $0x0, 0x154(%rsp) jne 0x772f0 movslq 0x98(%rsp), %rsi leaq 0x190(%rsp), %rdi callq 0x601c0 movl $0x0, (%rax) movq 0x80(%rsp), %rax movslq 0x98(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x30(%rsp) movslq 0x98(%rsp), %rsi leaq 0x160(%rsp), %rdi callq 0x61ef0 vmovsd 0x30(%rsp), %xmm0 vmovsd %xmm0, (%rax) movl $0x1, 0x7c(%rsp) movl 0x7c(%rsp), %eax cmpl 0x1e8(%rsp), %eax jge 0x772ee movq 0x48(%rsp), %rax movq 0xc0(%rax), %rax movq 0x1e0(%rsp), %rcx movslq 0x7c(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movslq 0x98(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x70(%rsp) vmovsd 0x70(%rsp), %xmm0 vmovsd %xmm0, 0x28(%rsp) movslq 0x98(%rsp), %rsi leaq 0x160(%rsp), %rdi callq 0x61ef0 vmovsd 0x28(%rsp), %xmm0 vucomisd (%rax), %xmm0 jbe 0x772dc movl 0x7c(%rsp), %eax movl %eax, 0x1c(%rsp) movslq 0x98(%rsp), %rsi leaq 0x190(%rsp), %rdi callq 0x601c0 movl 0x1c(%rsp), %ecx movl %ecx, (%rax) vmovsd 0x70(%rsp), %xmm0 vmovsd %xmm0, 0x20(%rsp) movslq 0x98(%rsp), %rsi leaq 0x160(%rsp), %rdi callq 0x61ef0 vmovsd 0x20(%rsp), %xmm0 vmovsd %xmm0, (%rax) jmp 0x772de movl 0x7c(%rsp), %eax addl $0x1, %eax movl %eax, 0x7c(%rsp) jmp 0x7721c jmp 0x772f0 movl 0x154(%rsp), %eax movl %eax, 0x18(%rsp) movslq 0x98(%rsp), %rsi leaq 0x190(%rsp), %rdi callq 0x601c0 movq %rax, %rcx movl 0x18(%rsp), %eax cmpl (%rcx), %eax je 0x7736c movq 0x80(%rsp), %rax movslq 0x98(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x10(%rsp) movslq 0x98(%rsp), %rsi leaq 0x160(%rsp), %rdi callq 0x61ef0 vmovsd 0x10(%rsp), %xmm0 vsubsd (%rax), %xmm0, %xmm0 callq 0x645e0 vmulsd 0x90(%rsp), %xmm0, %xmm0 vmovsd %xmm0, 0x90(%rsp) jmp 0x7736e cmpl $0x0, 0x154(%rsp) jne 0x7739f movq 0x48(%rsp), %rax vmovsd 0x90(%rsp), %xmm0 movq 0x110(%rax), %rax movslq 0x98(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) jmp 0x7742b movl 0x154(%rsp), %eax movl 0x1e8(%rsp), %ecx subl $0x1, %ecx cmpl %ecx, %eax jne 0x77402 movq 0x48(%rsp), %rax movq 0x110(%rax), %rax movslq 0x98(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vaddsd 0x90(%rsp), %xmm0, %xmm0 vmovsd %xmm0, 0x68(%rsp) vmovsd 0x68(%rsp), %xmm0 callq 0x61460 movq 0x48(%rsp), %rax movq 0x110(%rax), %rax movslq 0x98(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) jmp 0x77429 movq 0x48(%rsp), %rax vmovsd 0x90(%rsp), %xmm0 movq 0x110(%rax), %rax movslq 0x98(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x7742b jmp 0x7742d movl 0x98(%rsp), %eax addl $0x1, %eax movl %eax, 0x98(%rsp) jmp 0x770bf jmp 0x77445 movl 0x154(%rsp), %eax addl $0x1, %eax movl %eax, 0x154(%rsp) jmp 0x76978 movq 0x1e0(%rsp), %rax cmpl $-0x1, (%rax) je 0x774c1 movl $0x0, 0x64(%rsp) movq 0x48(%rsp), %rcx movl 0x64(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x774bf movslq 0x64(%rsp), %rsi leaq 0x160(%rsp), %rdi callq 0x61ef0 movq %rax, %rcx movq 0x48(%rsp), %rax vmovsd (%rcx), %xmm0 movq 0x110(%rax), %rax movslq 0x64(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x64(%rsp), %eax addl $0x1, %eax movl %eax, 0x64(%rsp) jmp 0x77470 jmp 0x774c1 movq 0x1f0(%rsp), %rax vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl $0x0, 0x60(%rsp) movq 0x48(%rsp), %rcx movl 0x60(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x77530 movq 0x48(%rsp), %rax movq 0x110(%rax), %rcx movslq 0x60(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x80(%rax), %rax movslq 0x60(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x1f0(%rsp), %rax vmovsd (%rax), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax) movl 0x60(%rsp), %eax addl $0x1, %eax movl %eax, 0x60(%rsp) jmp 0x774d9 movq 0x1f0(%rsp), %rax vmovsd (%rax), %xmm0 movq 0x1f0(%rsp), %rax vucomisd (%rax), %xmm0 jne 0x7754e jp 0x7754e jmp 0x77559 movl $0xfffffff8, 0x158(%rsp) # imm = 0xFFFFFFF8 movl 0x158(%rsp), %eax movl %eax, 0xc(%rsp) leaq 0x160(%rsp), %rdi callq 0x63710 leaq 0x190(%rsp), %rdi callq 0x612a0 movl 0xc(%rsp), %eax addq $0x1d8, %rsp # imm = 0x1D8 retq movq 0x180(%rsp), %rdi callq 0x644b0 nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::accumulateDerivatives(double*, double*, double*)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivatives(double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outDerivatives == NULL) { accumulateDerivativesDispatch1<false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesDispatch1<true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x18(%rsp) jne 0x7c293 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x62780 jmp 0x7c2ab movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x61200 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPU4StateImpl<double, 2, 0>::calcStatesStates(double*, int const*, double const*, int const*, double const*, int, int)
BEAGLE_CPU_TEMPLATE void BeagleCPU4StateImpl<BEAGLE_CPU_GENERIC>::calcStatesStates(REALTYPE* destP, const int* states1, const REALTYPE* matrices1, const int* states2, const REALTYPE* matrices2, int startPattern, int endPattern) { #pragma omp parallel for num_threads(kCategoryCount) for (int l = 0; l < kCategoryCount; l++) { int v = l*4*kPaddedPatternCount; if (startPattern != 0) { v += 4*startPattern; } int w = l*4*OFFSET; for (int k = startPattern; k < endPattern; k++) { const int state1 = states1[k]; const int state2 = states2[k]; destP[v ] = matrices1[w + state1] * matrices2[w + state2]; destP[v + 1] = matrices1[w + OFFSET*1 + state1] * matrices2[w + OFFSET*1 + state2]; destP[v + 2] = matrices1[w + OFFSET*2 + state1] * matrices2[w + OFFSET*2 + state2]; destP[v + 3] = matrices1[w + OFFSET*3 + state1] * matrices2[w + OFFSET*3 + state2]; v += 4; } } }
movl 0x10(%rsp), %eax movl 0x8(%rsp), %eax movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq %r8, -0x28(%rsp) movq %r9, -0x30(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x50(%rsp) movl $0x0, -0x34(%rsp) movq -0x50(%rsp), %rcx movl -0x34(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x7c49c movq -0x50(%rsp), %rcx movl -0x34(%rsp), %eax shll $0x2, %eax imull 0x18(%rcx), %eax movl %eax, -0x38(%rsp) cmpl $0x0, 0x8(%rsp) je 0x7c324 movl 0x8(%rsp), %eax shll $0x2, %eax addl -0x38(%rsp), %eax movl %eax, -0x38(%rsp) movl -0x34(%rsp), %eax shll $0x2, %eax imull $0x6, %eax, %eax movl %eax, -0x3c(%rsp) movl 0x8(%rsp), %eax movl %eax, -0x40(%rsp) movl -0x40(%rsp), %eax cmpl 0x10(%rsp), %eax jge 0x7c48a movq -0x18(%rsp), %rax movslq -0x40(%rsp), %rcx movl (%rax,%rcx,4), %eax movl %eax, -0x44(%rsp) movq -0x28(%rsp), %rax movslq -0x40(%rsp), %rcx movl (%rax,%rcx,4), %eax movl %eax, -0x48(%rsp) movq -0x20(%rsp), %rax movl -0x3c(%rsp), %ecx addl -0x44(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 movq -0x30(%rsp), %rax movl -0x3c(%rsp), %ecx addl -0x48(%rsp), %ecx movslq %ecx, %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 movq -0x10(%rsp), %rax movslq -0x38(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movq -0x20(%rsp), %rax movl -0x3c(%rsp), %ecx addl $0x6, %ecx addl -0x44(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 movq -0x30(%rsp), %rax movl -0x3c(%rsp), %ecx addl $0x6, %ecx addl -0x48(%rsp), %ecx movslq %ecx, %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 movq -0x10(%rsp), %rax movl -0x38(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) movq -0x20(%rsp), %rax movl -0x3c(%rsp), %ecx addl $0xc, %ecx addl -0x44(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 movq -0x30(%rsp), %rax movl -0x3c(%rsp), %ecx addl $0xc, %ecx addl -0x48(%rsp), %ecx movslq %ecx, %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 movq -0x10(%rsp), %rax movl -0x38(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) movq -0x20(%rsp), %rax movl -0x3c(%rsp), %ecx addl $0x12, %ecx addl -0x44(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 movq -0x30(%rsp), %rax movl -0x3c(%rsp), %ecx addl $0x12, %ecx addl -0x48(%rsp), %ecx movslq %ecx, %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 movq -0x10(%rsp), %rax movl -0x38(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) movl -0x38(%rsp), %eax addl $0x4, %eax movl %eax, -0x38(%rsp) movl -0x40(%rsp), %eax addl $0x1, %eax movl %eax, -0x40(%rsp) jmp 0x7c33a jmp 0x7c48c movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0x7c2e8 retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateImpl.hpp
beagle::cpu::BeagleCPU4StateImpl<double, 2, 0>::calcStatesPartials(double*, int const*, double const*, double const*, double const*, int, int)
BEAGLE_CPU_TEMPLATE void BeagleCPU4StateImpl<BEAGLE_CPU_GENERIC>::calcStatesPartials(REALTYPE* destP, const int* states1, const REALTYPE* matrices1, const REALTYPE* partials2, const REALTYPE* matrices2, int startPattern, int endPattern) { #pragma omp parallel for num_threads(kCategoryCount) for (int l = 0; l < kCategoryCount; l++) { int u = l*4*kPaddedPatternCount; if (startPattern != 0) { u += 4*startPattern; } int w = l*4*OFFSET; PREFETCH_MATRIX(2,matrices2,w); for (int k = startPattern; k < endPattern; k++) { const int state1 = states1[k]; PREFETCH_PARTIALS(2,partials2,u); DO_INTEGRATION(2); // defines sum20, sum21, sum22, sum23; destP[u ] = matrices1[w + state1] * sum20; destP[u + 1] = matrices1[w + OFFSET*1 + state1] * sum21; destP[u + 2] = matrices1[w + OFFSET*2 + state1] * sum22; destP[u + 3] = matrices1[w + OFFSET*3 + state1] * sum23; u += 4; } } }
subq $0x90, %rsp movl 0xa0(%rsp), %eax movl 0x98(%rsp), %eax movq %rdi, 0x88(%rsp) movq %rsi, 0x80(%rsp) movq %rdx, 0x78(%rsp) movq %rcx, 0x70(%rsp) movq %r8, 0x68(%rsp) movq %r9, 0x60(%rsp) movq 0x88(%rsp), %rax movq %rax, -0x80(%rsp) movl $0x0, 0x5c(%rsp) movq -0x80(%rsp), %rcx movl 0x5c(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x7c9ae movq -0x80(%rsp), %rcx movl 0x5c(%rsp), %eax shll $0x2, %eax imull 0x18(%rcx), %eax movl %eax, 0x58(%rsp) cmpl $0x0, 0x98(%rsp) je 0x7c530 movl 0x98(%rsp), %eax shll $0x2, %eax addl 0x58(%rsp), %eax movl %eax, 0x58(%rsp) movl 0x5c(%rsp), %eax shll $0x2, %eax imull $0x6, %eax, %eax movl %eax, 0x54(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x0, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x48(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x0, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x40(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x0, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x38(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x0, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x30(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x6, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x28(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x6, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x20(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x6, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x18(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x6, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x10(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0xc, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x8(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0xc, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, (%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0xc, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x8(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0xc, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x10(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x12, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x18(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x12, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x20(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x12, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x28(%rsp) movq 0x60(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x12, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x30(%rsp) movl 0x98(%rsp), %eax movl %eax, -0x34(%rsp) movl -0x34(%rsp), %eax cmpl 0xa0(%rsp), %eax jge 0x7c99c movq 0x78(%rsp), %rax movslq -0x34(%rsp), %rcx movl (%rax,%rcx,4), %eax movl %eax, -0x38(%rsp) movq 0x68(%rsp), %rax movl 0x58(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x40(%rsp) movq 0x68(%rsp), %rax movl 0x58(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x48(%rsp) movq 0x68(%rsp), %rax movl 0x58(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x50(%rsp) movq 0x68(%rsp), %rax movl 0x58(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x58(%rsp) vmovsd 0x48(%rsp), %xmm0 vmovsd -0x40(%rsp), %xmm1 vmovsd 0x40(%rsp), %xmm2 vmulsd -0x48(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x38(%rsp), %xmm0 vmovsd -0x50(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x30(%rsp), %xmm1 vmovsd -0x58(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x60(%rsp) vmovsd 0x28(%rsp), %xmm0 vmovsd -0x40(%rsp), %xmm1 vmovsd 0x20(%rsp), %xmm2 vmulsd -0x48(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x18(%rsp), %xmm0 vmovsd -0x50(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x10(%rsp), %xmm1 vmovsd -0x58(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) vmovsd 0x8(%rsp), %xmm0 vmovsd -0x40(%rsp), %xmm1 vmovsd (%rsp), %xmm2 vmulsd -0x48(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd -0x8(%rsp), %xmm0 vmovsd -0x50(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd -0x10(%rsp), %xmm1 vmovsd -0x58(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) vmovsd -0x18(%rsp), %xmm0 vmovsd -0x40(%rsp), %xmm1 vmovsd -0x20(%rsp), %xmm2 vmulsd -0x48(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd -0x28(%rsp), %xmm0 vmovsd -0x50(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd -0x30(%rsp), %xmm1 vmovsd -0x58(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x78(%rsp) movq 0x70(%rsp), %rax movl 0x54(%rsp), %ecx addl -0x38(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmulsd -0x60(%rsp), %xmm0, %xmm0 movq 0x80(%rsp), %rax movslq 0x58(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movq 0x70(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x6, %ecx addl -0x38(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmulsd -0x68(%rsp), %xmm0, %xmm0 movq 0x80(%rsp), %rax movl 0x58(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) movq 0x70(%rsp), %rax movl 0x54(%rsp), %ecx addl $0xc, %ecx addl -0x38(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmulsd -0x70(%rsp), %xmm0, %xmm0 movq 0x80(%rsp), %rax movl 0x58(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) movq 0x70(%rsp), %rax movl 0x54(%rsp), %ecx addl $0x12, %ecx addl -0x38(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmulsd -0x78(%rsp), %xmm0, %xmm0 movq 0x80(%rsp), %rax movl 0x58(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x58(%rsp), %eax addl $0x4, %eax movl %eax, 0x58(%rsp) movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0x7c718 jmp 0x7c99e movl 0x5c(%rsp), %eax addl $0x1, %eax movl %eax, 0x5c(%rsp) jmp 0x7c4ee addq $0x90, %rsp retq nopw %cs:(%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateImpl.hpp
beagle::cpu::BeagleCPU4StateImpl<double, 2, 0>::calcEdgeLogLikelihoods(int, int, int, int, int, int, double*)
BEAGLE_CPU_TEMPLATE int BeagleCPU4StateImpl<BEAGLE_CPU_GENERIC>::calcEdgeLogLikelihoods(const int parIndex, const int childIndex, const int probIndex, const int categoryWeightsIndex, const int stateFrequenciesIndex, const int scalingFactorsIndex, double* outSumLogLikelihood) { // TODO: implement derivatives for calculateEdgeLnL assert(parIndex >= kTipCount); const REALTYPE* partialsParent = gPartials[parIndex]; const REALTYPE* transMatrix = gTransitionMatrices[probIndex]; const REALTYPE* wt = gCategoryWeights[categoryWeightsIndex]; memset(integrationTmp, 0, (kPatternCount * kStateCount)*sizeof(REALTYPE)); if (childIndex < kTipCount && gTipStates[childIndex]) { // Integrate against a state at the child const int* statesChild = gTipStates[childIndex]; int v = 0; // Index for parent partials int w = 0; for(int l = 0; l < kCategoryCount; l++) { int u = 0; // Index in resulting product-partials (summed over categories) const REALTYPE weight = wt[l]; for(int k = 0; k < kPatternCount; k++) { const int stateChild = statesChild[k]; integrationTmp[u ] += transMatrix[w + stateChild] * partialsParent[v ] * weight; integrationTmp[u + 1] += transMatrix[w + OFFSET*1 + stateChild] * partialsParent[v + 1] * weight; integrationTmp[u + 2] += transMatrix[w + OFFSET*2 + stateChild] * partialsParent[v + 2] * weight; integrationTmp[u + 3] += transMatrix[w + OFFSET*3 + stateChild] * partialsParent[v + 3] * weight; u += 4; v += 4; } w += OFFSET*4; if (kExtraPatterns) v += 4 * kExtraPatterns; } } else { // Integrate against a partial at the child const REALTYPE* partialsChild = gPartials[childIndex]; #if 0// int v = 0; #endif int w = 0; for(int l = 0; l < kCategoryCount; l++) { int u = 0; #if 1// int v = l*kPaddedPatternCount*4; #endif const REALTYPE weight = wt[l]; PREFETCH_MATRIX(1,transMatrix,w); for(int k = 0; k < kPatternCount; k++) { const REALTYPE* partials1 = partialsChild; PREFETCH_PARTIALS(1,partials1,v); DO_INTEGRATION(1); integrationTmp[u ] += sum10 * partialsParent[v ] * weight; integrationTmp[u + 1] += sum11 * partialsParent[v + 1] * weight; integrationTmp[u + 2] += sum12 * partialsParent[v + 2] * weight; integrationTmp[u + 3] += sum13 * partialsParent[v + 3] * weight; u += 4; v += 4; } w += OFFSET*4; #if 0// if (kExtraPatterns) v += 4 * kExtraPatterns; #endif// } } return integrateOutStatesAndScale(integrationTmp, stateFrequenciesIndex, scalingFactorsIndex, outSumLogLikelihood); }
subq $0x158, %rsp # imm = 0x158 movq 0x168(%rsp), %rax movl 0x160(%rsp), %eax movq %rdi, 0x150(%rsp) movl %esi, 0x14c(%rsp) movl %edx, 0x148(%rsp) movl %ecx, 0x144(%rsp) movl %r8d, 0x140(%rsp) movl %r9d, 0x13c(%rsp) movq 0x150(%rsp), %rcx movq %rcx, (%rsp) movq 0xb0(%rcx), %rax movslq 0x14c(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x130(%rsp) movq 0xd8(%rcx), %rax movslq 0x144(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x128(%rsp) movq 0xa0(%rcx), %rax movslq 0x140(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0x120(%rsp) movq 0xe0(%rcx), %rdi movl 0x14(%rcx), %eax imull 0x24(%rcx), %eax movslq %eax, %rdx shlq $0x3, %rdx xorl %esi, %esi callq 0x60760 movq (%rsp), %rcx movl 0x148(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0x7e71f movq (%rsp), %rax movq 0xb8(%rax), %rax movslq 0x148(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) je 0x7e71f movq (%rsp), %rax movq 0xb8(%rax), %rax movslq 0x148(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x118(%rsp) movl $0x0, 0x114(%rsp) movl $0x0, 0x110(%rsp) movl $0x0, 0x10c(%rsp) movq (%rsp), %rcx movl 0x10c(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x7e71a movl $0x0, 0x108(%rsp) movq 0x120(%rsp), %rax movslq 0x10c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x100(%rsp) movl $0x0, 0xfc(%rsp) movq (%rsp), %rcx movl 0xfc(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x7e6cf movq (%rsp), %rax movq 0x118(%rsp), %rcx movslq 0xfc(%rsp), %rdx movl (%rcx,%rdx,4), %ecx movl %ecx, 0xf8(%rsp) movq 0x128(%rsp), %rcx movl 0x110(%rsp), %edx addl 0xf8(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x130(%rsp), %rcx movslq 0x114(%rsp), %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x100(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0x108(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0x128(%rsp), %rcx movl 0x110(%rsp), %edx addl $0x6, %edx addl 0xf8(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x130(%rsp), %rcx movl 0x114(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x100(%rsp), %xmm0 movq 0xe0(%rax), %rcx movl 0x108(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0x128(%rsp), %rcx movl 0x110(%rsp), %edx addl $0xc, %edx addl 0xf8(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x130(%rsp), %rcx movl 0x114(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x100(%rsp), %xmm0 movq 0xe0(%rax), %rcx movl 0x108(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movq 0x128(%rsp), %rcx movl 0x110(%rsp), %edx addl $0x12, %edx addl 0xf8(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x130(%rsp), %rcx movl 0x114(%rsp), %edx addl $0x3, %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x100(%rsp), %xmm0 movq 0xe0(%rax), %rax movl 0x108(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0x108(%rsp), %eax addl $0x4, %eax movl %eax, 0x108(%rsp) movl 0x114(%rsp), %eax addl $0x4, %eax movl %eax, 0x114(%rsp) movl 0xfc(%rsp), %eax addl $0x1, %eax movl %eax, 0xfc(%rsp) jmp 0x7e4d6 movq (%rsp), %rax movl 0x110(%rsp), %ecx addl $0x18, %ecx movl %ecx, 0x110(%rsp) cmpl $0x0, 0x1c(%rax) je 0x7e702 movq (%rsp), %rax movl 0x1c(%rax), %eax shll $0x2, %eax addl 0x114(%rsp), %eax movl %eax, 0x114(%rsp) jmp 0x7e704 movl 0x10c(%rsp), %eax addl $0x1, %eax movl %eax, 0x10c(%rsp) jmp 0x7e48e jmp 0x7ed4f movq (%rsp), %rax movq 0xb0(%rax), %rax movslq 0x148(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0xf0(%rsp) movl $0x0, 0xec(%rsp) movl $0x0, 0xe8(%rsp) movq (%rsp), %rcx movl 0xe8(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x7ed4d movq (%rsp), %rcx movl $0x0, 0xe4(%rsp) movl 0xe8(%rsp), %eax imull 0x18(%rcx), %eax shll $0x2, %eax movl %eax, 0xe0(%rsp) movq 0x120(%rsp), %rax movslq 0xe8(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xd8(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x0, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xd0(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x0, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xc8(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x0, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xc0(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x0, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xb8(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x6, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xb0(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x6, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xa8(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x6, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xa0(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x6, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x98(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0xc, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x90(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0xc, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x88(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0xc, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x80(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0xc, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x78(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x12, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x70(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x12, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x68(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x12, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x60(%rsp) movq 0x128(%rsp), %rax movl 0xec(%rsp), %ecx addl $0x12, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x58(%rsp) movl $0x0, 0x54(%rsp) movq (%rsp), %rcx movl 0x54(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x7ed26 movq (%rsp), %rax movq 0xf0(%rsp), %rcx movq %rcx, 0x48(%rsp) movq 0x48(%rsp), %rcx movl 0xe0(%rsp), %edx addl $0x0, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd %xmm0, 0x40(%rsp) movq 0x48(%rsp), %rcx movl 0xe0(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd %xmm0, 0x38(%rsp) movq 0x48(%rsp), %rcx movl 0xe0(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd %xmm0, 0x30(%rsp) movq 0x48(%rsp), %rcx movl 0xe0(%rsp), %edx addl $0x3, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd %xmm0, 0x28(%rsp) vmovsd 0xd0(%rsp), %xmm0 vmovsd 0x40(%rsp), %xmm1 vmovsd 0xc8(%rsp), %xmm2 vmulsd 0x38(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0xc0(%rsp), %xmm0 vmovsd 0x30(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0xb8(%rsp), %xmm1 vmovsd 0x28(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x20(%rsp) vmovsd 0xb0(%rsp), %xmm0 vmovsd 0x40(%rsp), %xmm1 vmovsd 0xa8(%rsp), %xmm2 vmulsd 0x38(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0xa0(%rsp), %xmm0 vmovsd 0x30(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x98(%rsp), %xmm1 vmovsd 0x28(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x18(%rsp) vmovsd 0x90(%rsp), %xmm0 vmovsd 0x40(%rsp), %xmm1 vmovsd 0x88(%rsp), %xmm2 vmulsd 0x38(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x80(%rsp), %xmm0 vmovsd 0x30(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x78(%rsp), %xmm1 vmovsd 0x28(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x10(%rsp) vmovsd 0x70(%rsp), %xmm0 vmovsd 0x40(%rsp), %xmm1 vmovsd 0x68(%rsp), %xmm2 vmulsd 0x38(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x60(%rsp), %xmm0 vmovsd 0x30(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x58(%rsp), %xmm1 vmovsd 0x28(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x8(%rsp) vmovsd 0x20(%rsp), %xmm0 movq 0x130(%rsp), %rcx movslq 0xe0(%rsp), %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xd8(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0xe4(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x18(%rsp), %xmm0 movq 0x130(%rsp), %rcx movl 0xe0(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xd8(%rsp), %xmm0 movq 0xe0(%rax), %rcx movl 0xe4(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x10(%rsp), %xmm0 movq 0x130(%rsp), %rcx movl 0xe0(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xd8(%rsp), %xmm0 movq 0xe0(%rax), %rcx movl 0xe4(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x8(%rsp), %xmm0 movq 0x130(%rsp), %rcx movl 0xe0(%rsp), %edx addl $0x3, %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xd8(%rsp), %xmm0 movq 0xe0(%rax), %rax movl 0xe4(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0xe4(%rsp), %eax addl $0x4, %eax movl %eax, 0xe4(%rsp) movl 0xe0(%rsp), %eax addl $0x4, %eax movl %eax, 0xe0(%rsp) movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) jmp 0x7ea03 movl 0xec(%rsp), %eax addl $0x18, %eax movl %eax, 0xec(%rsp) movl 0xe8(%rsp), %eax addl $0x1, %eax movl %eax, 0xe8(%rsp) jmp 0x7e754 jmp 0x7ed4f movq (%rsp), %rdi movq 0xe0(%rdi), %rsi movl 0x13c(%rsp), %edx movl 0x160(%rsp), %ecx movq 0x168(%rsp), %r8 callq 0x5f120 addq $0x158, %rsp # imm = 0x158 retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateImpl.hpp
beagle::cpu::BeagleCPU4StateImpl<double, 2, 0>::calcPartialsPartialsAutoScaling(double*, double const*, double const*, double const*, double const*, int*)
BEAGLE_CPU_TEMPLATE void BeagleCPU4StateImpl<BEAGLE_CPU_GENERIC>::calcPartialsPartialsAutoScaling(REALTYPE* destP, const REALTYPE* partials1, const REALTYPE* matrices1, const REALTYPE* partials2, const REALTYPE* matrices2, int* activateScaling) { #pragma omp parallel for num_threads(kCategoryCount) for (int l = 0; l < kCategoryCount; l++) { int u = l*4*kPaddedPatternCount; int w = l*4*OFFSET; PREFETCH_MATRIX(1,matrices1,w); PREFETCH_MATRIX(2,matrices2,w); for (int k = 0; k < kPatternCount; k++) { PREFETCH_PARTIALS(1,partials1,u); PREFETCH_PARTIALS(2,partials2,u); DO_INTEGRATION(1); // defines sum10, sum11, sum12, sum13 DO_INTEGRATION(2); // defines sum20, sum21, sum22, sum23 // Final results destP[u ] = sum10 * sum20; destP[u + 1] = sum11 * sum21; destP[u + 2] = sum12 * sum22; destP[u + 3] = sum13 * sum23; if (*activateScaling == 0) { int expTmp; int expMax; frexp(destP[u], &expMax); frexp(destP[u + 1], &expTmp); if (abs(expTmp) > abs(expMax)) expMax = expTmp; frexp(destP[u + 2], &expTmp); if (abs(expTmp) > abs(expMax)) expMax = expTmp; frexp(destP[u + 3], &expTmp); if (abs(expTmp) > abs(expMax)) expMax = expTmp; if(abs(expMax) > scalingExponentThreshold) { *activateScaling = 1; } } u += 4; } } }
subq $0x1d8, %rsp # imm = 0x1D8 movq 0x1e0(%rsp), %rax movq %rdi, 0x1d0(%rsp) movq %rsi, 0x1c8(%rsp) movq %rdx, 0x1c0(%rsp) movq %rcx, 0x1b8(%rsp) movq %r8, 0x1b0(%rsp) movq %r9, 0x1a8(%rsp) movq 0x1d0(%rsp), %rax movq %rax, (%rsp) movl $0x0, 0x1a4(%rsp) movq (%rsp), %rcx movl 0x1a4(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x81394 movq (%rsp), %rcx movl 0x1a4(%rsp), %eax shll $0x2, %eax imull 0x18(%rcx), %eax movl %eax, 0x1a0(%rsp) movl 0x1a4(%rsp), %eax shll $0x2, %eax imull $0x6, %eax, %eax movl %eax, 0x19c(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x0, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x190(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x0, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x188(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x0, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x180(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x0, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x178(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x6, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x170(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x6, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x168(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x6, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x160(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x6, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x158(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0xc, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x150(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0xc, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x148(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0xc, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x140(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0xc, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x138(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x12, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x130(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x12, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x128(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x12, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x120(%rsp) movq 0x1b8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x12, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x118(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x0, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x110(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x0, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x108(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x0, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x100(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x0, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xf8(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x6, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xf0(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x6, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xe8(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x6, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xe0(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x6, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xd8(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0xc, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xd0(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0xc, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xc8(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0xc, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xc0(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0xc, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xb8(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x12, %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xb0(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x12, %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xa8(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x12, %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xa0(%rsp) movq 0x1a8(%rsp), %rax movl 0x19c(%rsp), %ecx addl $0x12, %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x98(%rsp) movl $0x0, 0x94(%rsp) movq (%rsp), %rcx movl 0x94(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x8137c movq 0x1c0(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x88(%rsp) movq 0x1c0(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x80(%rsp) movq 0x1c0(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x78(%rsp) movq 0x1c0(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x70(%rsp) movq 0x1b0(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x68(%rsp) movq 0x1b0(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x60(%rsp) movq 0x1b0(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x58(%rsp) movq 0x1b0(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x50(%rsp) vmovsd 0x190(%rsp), %xmm0 vmovsd 0x88(%rsp), %xmm1 vmovsd 0x188(%rsp), %xmm2 vmulsd 0x80(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x180(%rsp), %xmm0 vmovsd 0x78(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x178(%rsp), %xmm1 vmovsd 0x70(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x48(%rsp) vmovsd 0x170(%rsp), %xmm0 vmovsd 0x88(%rsp), %xmm1 vmovsd 0x168(%rsp), %xmm2 vmulsd 0x80(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x160(%rsp), %xmm0 vmovsd 0x78(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x158(%rsp), %xmm1 vmovsd 0x70(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x40(%rsp) vmovsd 0x150(%rsp), %xmm0 vmovsd 0x88(%rsp), %xmm1 vmovsd 0x148(%rsp), %xmm2 vmulsd 0x80(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x140(%rsp), %xmm0 vmovsd 0x78(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x138(%rsp), %xmm1 vmovsd 0x70(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x38(%rsp) vmovsd 0x130(%rsp), %xmm0 vmovsd 0x88(%rsp), %xmm1 vmovsd 0x128(%rsp), %xmm2 vmulsd 0x80(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x120(%rsp), %xmm0 vmovsd 0x78(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x118(%rsp), %xmm1 vmovsd 0x70(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x30(%rsp) vmovsd 0x110(%rsp), %xmm0 vmovsd 0x68(%rsp), %xmm1 vmovsd 0x108(%rsp), %xmm2 vmulsd 0x60(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0x100(%rsp), %xmm0 vmovsd 0x58(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0xf8(%rsp), %xmm1 vmovsd 0x50(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x28(%rsp) vmovsd 0xf0(%rsp), %xmm0 vmovsd 0x68(%rsp), %xmm1 vmovsd 0xe8(%rsp), %xmm2 vmulsd 0x60(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0xe0(%rsp), %xmm0 vmovsd 0x58(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0xd8(%rsp), %xmm1 vmovsd 0x50(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x20(%rsp) vmovsd 0xd0(%rsp), %xmm0 vmovsd 0x68(%rsp), %xmm1 vmovsd 0xc8(%rsp), %xmm2 vmulsd 0x60(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0xc0(%rsp), %xmm0 vmovsd 0x58(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0xb8(%rsp), %xmm1 vmovsd 0x50(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x18(%rsp) vmovsd 0xb0(%rsp), %xmm0 vmovsd 0x68(%rsp), %xmm1 vmovsd 0xa8(%rsp), %xmm2 vmulsd 0x60(%rsp), %xmm2, %xmm2 vfmadd213sd %xmm2, %xmm0, %xmm1 # xmm1 = (xmm0 * xmm1) + xmm2 vmovsd 0xa0(%rsp), %xmm0 vmovsd 0x58(%rsp), %xmm2 vfmadd213sd %xmm1, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm1 vmovsd 0x98(%rsp), %xmm1 vmovsd 0x50(%rsp), %xmm0 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x10(%rsp) vmovsd 0x48(%rsp), %xmm0 vmulsd 0x28(%rsp), %xmm0, %xmm0 movq 0x1c8(%rsp), %rax movslq 0x1a0(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) vmovsd 0x40(%rsp), %xmm0 vmulsd 0x20(%rsp), %xmm0, %xmm0 movq 0x1c8(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) vmovsd 0x38(%rsp), %xmm0 vmulsd 0x18(%rsp), %xmm0, %xmm0 movq 0x1c8(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) vmovsd 0x30(%rsp), %xmm0 vmulsd 0x10(%rsp), %xmm0, %xmm0 movq 0x1c8(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd %xmm0, (%rax,%rcx,8) movq 0x1e0(%rsp), %rax cmpl $0x0, (%rax) jne 0x81355 movq 0x1c8(%rsp), %rax movslq 0x1a0(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 leaq 0x8(%rsp), %rdi callq 0x64990 movq 0x1c8(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 leaq 0xc(%rsp), %rdi callq 0x64990 movl 0xc(%rsp), %eax movl %eax, %ecx negl %ecx cmovnsl %ecx, %eax movl 0x8(%rsp), %ecx movl %ecx, %edx negl %edx cmovnsl %edx, %ecx cmpl %ecx, %eax jle 0x812a5 movl 0xc(%rsp), %eax movl %eax, 0x8(%rsp) movq 0x1c8(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 leaq 0xc(%rsp), %rdi callq 0x64990 movl 0xc(%rsp), %eax movl %eax, %ecx negl %ecx cmovnsl %ecx, %eax movl 0x8(%rsp), %ecx movl %ecx, %edx negl %edx cmovnsl %edx, %ecx cmpl %ecx, %eax jle 0x812eb movl 0xc(%rsp), %eax movl %eax, 0x8(%rsp) movq 0x1c8(%rsp), %rax movl 0x1a0(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 leaq 0xc(%rsp), %rdi callq 0x64990 movl 0xc(%rsp), %eax movl %eax, %ecx negl %ecx cmovnsl %ecx, %eax movl 0x8(%rsp), %ecx movl %ecx, %edx negl %edx cmovnsl %edx, %ecx cmpl %ecx, %eax jle 0x81331 movl 0xc(%rsp), %eax movl %eax, 0x8(%rsp) movq (%rsp), %rcx movl 0x8(%rsp), %eax movl %eax, %edx negl %edx cmovnsl %edx, %eax cmpl 0x68(%rcx), %eax jle 0x81353 movq 0x1e0(%rsp), %rax movl $0x1, (%rax) jmp 0x81355 movl 0x1a0(%rsp), %eax addl $0x4, %eax movl %eax, 0x1a0(%rsp) movl 0x94(%rsp), %eax addl $0x1, %eax movl %eax, 0x94(%rsp) jmp 0x80de2 jmp 0x8137e movl 0x1a4(%rsp), %eax addl $0x1, %eax movl %eax, 0x1a4(%rsp) jmp 0x808d6 addq $0x1d8, %rsp # imm = 0x1D8 retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::~BeagleCPUImpl()
BEAGLE_CPU_TEMPLATE BeagleCPUImpl<BEAGLE_CPU_GENERIC>::~BeagleCPUImpl() { // free all that stuff... // If you delete partials, make sure not to delete the last element // which is TEMP_SCRATCH_PARTIAL twice. for(unsigned int i=0; i<kEigenDecompCount; i++) { if (gCategoryWeights[i] != NULL) free(gCategoryWeights[i]); if (gStateFrequencies[i] != NULL) free(gStateFrequencies[i]); } for(unsigned int i=0; i<kMatrixCount; i++) { if (gTransitionMatrices[i] != NULL) free(gTransitionMatrices[i]); } free(gTransitionMatrices); for(unsigned int i=0; i<kBufferCount; i++) { if (gPartials[i] != NULL) free(gPartials[i]); if (gTipStates[i] != NULL) free(gTipStates[i]); } free(gPartials); free(gTipStates); if (kFlags & BEAGLE_FLAG_SCALING_AUTO) { for(unsigned int i=0; i<kScaleBufferCount; i++) { if (gAutoScaleBuffers[i] != NULL) free(gAutoScaleBuffers[i]); } if (gAutoScaleBuffers) free(gAutoScaleBuffers); free(gActiveScalingFactors); if (gScaleBuffers[0] != NULL) free(gScaleBuffers[0]); } else { for(unsigned int i=0; i<kScaleBufferCount; i++) { if (gScaleBuffers[i] != NULL) free(gScaleBuffers[i]); } } if (gScaleBuffers) free(gScaleBuffers); free(gCategoryRates); free(gPatternWeights); if (kPartitionsInitialised) { free(gPatternPartitions); free(gPatternPartitionsStartPatterns); if (kPatternsReordered) { free(gPatternsNewOrder); } } free(integrationTmp); free(firstDerivTmp); free(secondDerivTmp); // free(cLikelihoodTmp); free(grandDenominatorDerivTmp); // free(grandNumeratorUpperBoundDerivTmp); // free(grandNumeratorLowerBoundDerivTmp); free(grandNumeratorDerivTmp); if (crossProductNumeratorTmp != nullptr) { free(crossProductNumeratorTmp); } free(outLogLikelihoodsTmp); free(outFirstDerivativesTmp); free(outSecondDerivativesTmp); free(ones); free(zeros); delete gEigenDecomposition; if (kThreadingEnabled) { // Send stop signal to all threads and join them... for (int i = 0; i < kNumThreads; i++) { threadData* td = &gThreads[i]; std::unique_lock<std::mutex> l(td->m); td->stop = true; td->cv.notify_one(); } // Join all the threads for (int i = 0; i < kNumThreads; i++) { threadData* td = &gThreads[i]; td->t.join(); } delete[] gThreads; delete[] gFutures; for (int i=0; i<kNumThreads; i++) { free(gThreadOperations[i]); } free(gThreadOperations); free(gThreadOpCounts); } if (kAutoPartitioningEnabled) { free(gAutoPartitionOperations); if (kAutoRootPartitioningEnabled) { free(gAutoPartitionIndices); free(gAutoPartitionOutSumLogLikelihoods); } } }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq 0x10(%rsp), %rdi movq %rdi, 0x8(%rsp) callq 0x61400 movq 0x8(%rsp), %rdi callq 0x61b60 addq $0x18, %rsp retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::calcCrossProductsPartials(double const*, double const*, double const*, double const*, double, double*, double*)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcCrossProductsPartials(const REALTYPE *postOrderPartial, const REALTYPE *preOrderPartial, const double *categoryRates, const REALTYPE *categoryWeights, const double edgeLength, double *outCrossProducts, double *outSumSquaredDerivatives) { for (int pattern = 0; pattern < kPatternCount; pattern++) { std::vector<REALTYPE> tmp(kStateCount * kStateCount, 0.0); // TODO Handle temporary memory better REALTYPE patternDenominator = 0.0; for (int category = 0; category < kCategoryCount; category++) { const REALTYPE scale = (REALTYPE) categoryRates[category] * edgeLength; const REALTYPE weight = categoryWeights[category]; const int patternIndex = category * kPatternCount + pattern; // Bad memory access const int v = patternIndex * kPartialsPaddedStateCount; REALTYPE denominator = 0.0; for (int k = 0; k < kStateCount; k++) { denominator += postOrderPartial[v + k] * preOrderPartial[v + k]; } patternDenominator += denominator * weight; for (int k = 0; k < kStateCount; k++) { for (int j = 0; j < kStateCount; j++) { tmp[k * kStateCount + j] += preOrderPartial[v + k] * postOrderPartial[v + j] * weight * scale; } } } const auto patternWeight = gPatternWeights[pattern] / patternDenominator; for (int k = 0; k < kStateCount; k++) { for (int j = 0; j < kStateCount; j++) { outCrossProducts[k * kStateCount + j] += tmp[k * kStateCount + j] * patternWeight; } } } }
subq $0xf8, %rsp movq 0x100(%rsp), %rax movq %rdi, 0xf0(%rsp) movq %rsi, 0xe8(%rsp) movq %rdx, 0xe0(%rsp) movq %rcx, 0xd8(%rsp) movq %r8, 0xd0(%rsp) vmovsd %xmm0, 0xc8(%rsp) movq %r9, 0xc0(%rsp) movq 0xf0(%rsp), %rax movq %rax, 0x28(%rsp) movl $0x0, 0xbc(%rsp) movq 0x28(%rsp), %rcx movl 0xbc(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x82a0a movq 0x28(%rsp), %rax movl 0x24(%rax), %eax imull %eax, %eax movq %rax, 0x18(%rsp) movq $0x0, 0x98(%rsp) leaq 0x97(%rsp), %rdi movq %rdi, 0x20(%rsp) callq 0x63e90 movq 0x18(%rsp), %rsi movq 0x20(%rsp), %rcx leaq 0xa0(%rsp), %rdi leaq 0x98(%rsp), %rdx callq 0x641f0 jmp 0x826f4 leaq 0x97(%rsp), %rdi callq 0x60320 vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x78(%rsp) movl $0x0, 0x74(%rsp) movq 0x28(%rsp), %rcx movl 0x74(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x8291d movq 0x28(%rsp), %rcx movq 0xd8(%rsp), %rax movslq 0x74(%rsp), %rdx vmovsd (%rax,%rdx,8), %xmm0 vmulsd 0xc8(%rsp), %xmm0, %xmm0 vmovsd %xmm0, 0x68(%rsp) movq 0xd0(%rsp), %rax movslq 0x74(%rsp), %rdx vmovsd (%rax,%rdx,8), %xmm0 vmovsd %xmm0, 0x60(%rsp) movl 0x74(%rsp), %eax imull 0x14(%rcx), %eax addl 0xbc(%rsp), %eax movl %eax, 0x5c(%rsp) movl 0x5c(%rsp), %eax imull 0x2c(%rcx), %eax movl %eax, 0x58(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x50(%rsp) movl $0x0, 0x4c(%rsp) movq 0x28(%rsp), %rcx movl 0x4c(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x82816 movq 0xe8(%rsp), %rax movl 0x58(%rsp), %ecx addl 0x4c(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0xe0(%rsp), %rax movl 0x58(%rsp), %ecx addl 0x4c(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x50(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x50(%rsp) movl 0x4c(%rsp), %eax addl $0x1, %eax movl %eax, 0x4c(%rsp) jmp 0x82794 movq %rax, %rcx movl %edx, %eax movq %rcx, 0x88(%rsp) movl %eax, 0x84(%rsp) leaq 0x97(%rsp), %rdi callq 0x60320 jmp 0x82a12 vmovsd 0x50(%rsp), %xmm1 vmovsd 0x60(%rsp), %xmm0 vmovsd 0x78(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x78(%rsp) movl $0x0, 0x48(%rsp) movq 0x28(%rsp), %rcx movl 0x48(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x8290b movl $0x0, 0x44(%rsp) movq 0x28(%rsp), %rcx movl 0x44(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x828f9 movq 0x28(%rsp), %rcx movq 0xe0(%rsp), %rax movl 0x58(%rsp), %edx addl 0x48(%rsp), %edx movslq %edx, %rdx vmovsd (%rax,%rdx,8), %xmm0 movq 0xe8(%rsp), %rax movl 0x58(%rsp), %edx addl 0x44(%rsp), %edx movslq %edx, %rdx vmulsd (%rax,%rdx,8), %xmm0, %xmm0 vmulsd 0x60(%rsp), %xmm0, %xmm0 vmovsd %xmm0, 0x10(%rsp) vmovsd 0x68(%rsp), %xmm0 vmovsd %xmm0, 0x8(%rsp) movl 0x48(%rsp), %eax imull 0x24(%rcx), %eax addl 0x44(%rsp), %eax movslq %eax, %rsi leaq 0xa0(%rsp), %rdi callq 0x61ef0 vmovsd 0x8(%rsp), %xmm0 vmovsd 0x10(%rsp), %xmm1 vmovsd (%rax), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax) movl 0x44(%rsp), %eax addl $0x1, %eax movl %eax, 0x44(%rsp) jmp 0x82855 jmp 0x828fb movl 0x48(%rsp), %eax addl $0x1, %eax movl %eax, 0x48(%rsp) jmp 0x8283b jmp 0x8290d movl 0x74(%rsp), %eax addl $0x1, %eax movl %eax, 0x74(%rsp) jmp 0x82713 movq 0x28(%rsp), %rax movq 0x80(%rax), %rax movslq 0xbc(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vdivsd 0x78(%rsp), %xmm0, %xmm0 vmovsd %xmm0, 0x38(%rsp) movl $0x0, 0x34(%rsp) movq 0x28(%rsp), %rcx movl 0x34(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x829e7 movl $0x0, 0x30(%rsp) movq 0x28(%rsp), %rcx movl 0x30(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x829d5 movq 0x28(%rsp), %rcx movl 0x34(%rsp), %eax imull 0x24(%rcx), %eax addl 0x30(%rsp), %eax movslq %eax, %rsi leaq 0xa0(%rsp), %rdi callq 0x61ef0 movq 0x28(%rsp), %rdx vmovsd (%rax), %xmm1 vmovsd 0x38(%rsp), %xmm0 movq 0xc0(%rsp), %rax movl 0x34(%rsp), %ecx imull 0x24(%rdx), %ecx addl 0x30(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0x30(%rsp), %eax addl $0x1, %eax movl %eax, 0x30(%rsp) jmp 0x82964 jmp 0x829d7 movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) jmp 0x8294a leaq 0xa0(%rsp), %rdi callq 0x63710 movl 0xbc(%rsp), %eax addl $0x1, %eax movl %eax, 0xbc(%rsp) jmp 0x82690 addq $0xf8, %rsp retq movq 0x88(%rsp), %rdi callq 0x644b0 nop
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::calcStatesPartials(double*, int const*, double const*, double const*, double const*, int, int)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcStatesPartials(REALTYPE* destP, const int* states1, const REALTYPE* matrices1, const REALTYPE* partials2, const REALTYPE* matrices2, int startPattern, int endPattern) { int matrixIncr = kStateCount; // increment for the extra column at the end matrixIncr += T_PAD; int stateCountModFour = (kStateCount / 4) * 4; #pragma omp parallel for num_threads(kCategoryCount) for (int l = 0; l < kCategoryCount; l++) { int v = l*kPartialsPaddedStateCount*kPatternCount + kPartialsPaddedStateCount*startPattern; int matrixOffset = l*kMatrixSize; const REALTYPE* partials2Ptr = &partials2[v]; REALTYPE* destPtr = &destP[v]; for (int k = startPattern; k < endPattern; k++) { int w = l * kMatrixSize; int state1 = states1[k]; for (int i = 0; i < kStateCount; i++) { const REALTYPE* matrices2Ptr = matrices2 + matrixOffset + i * matrixIncr; REALTYPE tmp = matrices1[w + state1]; REALTYPE sumA = 0.0; REALTYPE sumB = 0.0; int j = 0; for (; j < stateCountModFour; j += 4) { sumA += matrices2Ptr[j + 0] * partials2Ptr[j + 0]; sumB += matrices2Ptr[j + 1] * partials2Ptr[j + 1]; sumA += matrices2Ptr[j + 2] * partials2Ptr[j + 2]; sumB += matrices2Ptr[j + 3] * partials2Ptr[j + 3]; } for (; j < kStateCount; j++) { sumA += matrices2Ptr[j] * partials2Ptr[j]; } w += matrixIncr; *(destPtr++) = tmp * (sumA + sumB); } if (P_PAD) { for (int pad = 0; pad < P_PAD; pad++) { *(destPtr++) = 0.0; } } partials2Ptr += kPartialsPaddedStateCount; } } }
subq $0x18, %rsp movl 0x28(%rsp), %eax movl 0x20(%rsp), %eax movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq %rdx, (%rsp) movq %rcx, -0x8(%rsp) movq %r8, -0x10(%rsp) movq %r9, -0x18(%rsp) movq 0x10(%rsp), %rax movq %rax, -0x80(%rsp) movl 0x24(%rax), %ecx movl %ecx, -0x1c(%rsp) movl -0x1c(%rsp), %ecx addl $0x2, %ecx movl %ecx, -0x1c(%rsp) movl 0x24(%rax), %eax movl $0x4, %ecx cltd idivl %ecx shll $0x2, %eax movl %eax, -0x20(%rsp) movl $0x0, -0x24(%rsp) movq -0x80(%rsp), %rcx movl -0x24(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x82ebc movq -0x80(%rsp), %rcx movl -0x24(%rsp), %eax imull 0x2c(%rcx), %eax imull 0x14(%rcx), %eax movl 0x2c(%rcx), %edx imull 0x20(%rsp), %edx addl %edx, %eax movl %eax, -0x28(%rsp) movl -0x24(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, -0x2c(%rsp) movq -0x10(%rsp), %rax movslq -0x28(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x38(%rsp) movq 0x8(%rsp), %rax movslq -0x28(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x40(%rsp) movl 0x20(%rsp), %eax movl %eax, -0x44(%rsp) movl -0x44(%rsp), %eax cmpl 0x28(%rsp), %eax jge 0x82eaa movq -0x80(%rsp), %rcx movl -0x24(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, -0x48(%rsp) movq (%rsp), %rax movslq -0x44(%rsp), %rcx movl (%rax,%rcx,4), %eax movl %eax, -0x4c(%rsp) movl $0x0, -0x50(%rsp) movq -0x80(%rsp), %rcx movl -0x50(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x82e7e movq -0x18(%rsp), %rax movslq -0x2c(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movl -0x50(%rsp), %ecx imull -0x1c(%rsp), %ecx movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x58(%rsp) movq -0x8(%rsp), %rax movl -0x48(%rsp), %ecx addl -0x4c(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x60(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x68(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x70(%rsp) movl $0x0, -0x74(%rsp) movl -0x74(%rsp), %eax cmpl -0x20(%rsp), %eax jge 0x82deb movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movl -0x74(%rsp), %eax addl $0x4, %eax movl %eax, -0x74(%rsp) jmp 0x82ce9 jmp 0x82ded movq -0x80(%rsp), %rcx movl -0x74(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x82e37 movq -0x58(%rsp), %rax movslq -0x74(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movslq -0x74(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movl -0x74(%rsp), %eax addl $0x1, %eax movl %eax, -0x74(%rsp) jmp 0x82ded movl -0x1c(%rsp), %eax addl -0x48(%rsp), %eax movl %eax, -0x48(%rsp) vmovsd -0x60(%rsp), %xmm0 vmovsd -0x68(%rsp), %xmm1 vaddsd -0x70(%rsp), %xmm1, %xmm1 vmulsd %xmm1, %xmm0, %xmm0 movq -0x40(%rsp), %rax movq %rax, %rcx addq $0x8, %rcx movq %rcx, -0x40(%rsp) vmovsd %xmm0, (%rax) movl -0x50(%rsp), %eax addl $0x1, %eax movl %eax, -0x50(%rsp) jmp 0x82c77 movq -0x80(%rsp), %rax movl 0x2c(%rax), %ecx movq -0x38(%rsp), %rax movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x38(%rsp) movl -0x44(%rsp), %eax addl $0x1, %eax movl %eax, -0x44(%rsp) jmp 0x82c40 jmp 0x82eac movl -0x24(%rsp), %eax addl $0x1, %eax movl %eax, -0x24(%rsp) jmp 0x82bcf addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::calcEdgeLogLikelihoodsByPartition(int const*, int const*, int const*, int const*, int const*, int const*, int const*, int, double*)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcEdgeLogLikelihoodsByPartition( const int* parentBufferIndices, const int* childBufferIndices, const int* probabilityIndices, const int* categoryWeightsIndices, const int* stateFrequenciesIndices, const int* cumulativeScaleIndices, const int* partitionIndices, int partitionCount, double* outSumLogLikelihoodByPartition) { for (int p = 0; p < partitionCount; p++) { int pIndex = partitionIndices[p]; int startPattern = gPatternPartitionsStartPatterns[pIndex]; int endPattern = gPatternPartitionsStartPatterns[pIndex + 1]; memset(&integrationTmp[startPattern*kStateCount], 0, ((endPattern - startPattern) * kStateCount)*sizeof(REALTYPE)); const int parIndex = parentBufferIndices[p]; const int childIndex = childBufferIndices[p]; const int probIndex = probabilityIndices[p]; const int categoryWeightsIndex = categoryWeightsIndices[p]; const int stateFrequenciesIndex = stateFrequenciesIndices[p]; const int scalingFactorsIndex = cumulativeScaleIndices[p]; assert(parIndex >= kTipCount); const REALTYPE* partialsParent = gPartials[parIndex]; const REALTYPE* transMatrix = gTransitionMatrices[probIndex]; const REALTYPE* wt = gCategoryWeights[categoryWeightsIndex]; const REALTYPE* freqs = gStateFrequencies[stateFrequenciesIndex]; if (childIndex < kTipCount && gTipStates[childIndex]) { // Integrate against a state at the child const int* statesChild = gTipStates[childIndex]; int v = startPattern * kPartialsPaddedStateCount; // Index for parent partials for(int l = 0; l < kCategoryCount; l++) { int u = startPattern * kStateCount; // Index in resulting product-partials (summed over categories) const REALTYPE weight = wt[l]; for(int k = startPattern; k < endPattern; k++) { const int stateChild = statesChild[k]; // DISCUSSION PT: Does it make sense to change the order of the partials, // so we can interchange the patterCount and categoryCount loop order? int w = l * kMatrixSize; for(int i = 0; i < kStateCount; i++) { integrationTmp[u] += transMatrix[w + stateChild] * partialsParent[v + i] * weight; u++; w += kTransPaddedStateCount; } v += kPartialsPaddedStateCount; } v += ((kPatternCount - endPattern) + startPattern) * kPartialsPaddedStateCount; } } else { // Integrate against a partial at the child const REALTYPE* partialsChild = gPartials[childIndex]; int v = startPattern * kPartialsPaddedStateCount; int stateCountModFour = (kStateCount / 4) * 4; for(int l = 0; l < kCategoryCount; l++) { int u = startPattern * kStateCount; const REALTYPE weight = wt[l]; for(int k = startPattern; k < endPattern; k++) { int w = l * kMatrixSize; const REALTYPE* partialsChildPtr = &partialsChild[v]; for(int i = 0; i < kStateCount; i++) { double sumOverJA = 0.0, sumOverJB = 0.0; int j = 0; const REALTYPE* transMatrixPtr = &transMatrix[w]; for (; j < stateCountModFour; j += 4) { sumOverJA += transMatrixPtr[j + 0] * partialsChildPtr[j + 0]; sumOverJB += transMatrixPtr[j + 1] * partialsChildPtr[j + 1]; sumOverJA += transMatrixPtr[j + 2] * partialsChildPtr[j + 2]; sumOverJB += transMatrixPtr[j + 3] * partialsChildPtr[j + 3]; } for (; j < kStateCount; j++) { sumOverJA += transMatrixPtr[j] * partialsChildPtr[j]; } // for(int j = 0; j < kStateCount; j++) { // sumOverJ += transMatrix[w] * partialsChild[v + j]; // w++; // } integrationTmp[u] += (sumOverJA + sumOverJB) * partialsParent[v + i] * weight; u++; w += kStateCount; // increment for the extra column at the end w += T_PAD; } v += kPartialsPaddedStateCount; } v += ((kPatternCount - endPattern) + startPattern) * kPartialsPaddedStateCount; } } int u = startPattern * kStateCount; for(int k = startPattern; k < endPattern; k++) { REALTYPE sumOverI = 0.0; for(int i = 0; i < kStateCount; i++) { sumOverI += freqs[i] * integrationTmp[u]; u++; } outLogLikelihoodsTmp[k] = log(sumOverI); } if (scalingFactorsIndex != BEAGLE_OP_NONE) { const REALTYPE* scalingFactors = gScaleBuffers[scalingFactorsIndex]; for(int k=startPattern; k < endPattern; k++) outLogLikelihoodsTmp[k] += scalingFactors[k]; } outSumLogLikelihoodByPartition[p] = 0.0; for (int i = startPattern; i < endPattern; i++) { outSumLogLikelihoodByPartition[p] += outLogLikelihoodsTmp[i] * gPatternWeights[i]; } } }
subq $0x138, %rsp # imm = 0x138 movq 0x158(%rsp), %rax movl 0x150(%rsp), %eax movq 0x148(%rsp), %rax movq 0x140(%rsp), %rax movq %rdi, 0x130(%rsp) movq %rsi, 0x128(%rsp) movq %rdx, 0x120(%rsp) movq %rcx, 0x118(%rsp) movq %r8, 0x110(%rsp) movq %r9, 0x108(%rsp) movq 0x130(%rsp), %rax movq %rax, 0x8(%rsp) movl $0x0, 0x104(%rsp) movl 0x104(%rsp), %eax cmpl 0x150(%rsp), %eax jge 0x85f92 movq 0x8(%rsp), %rcx movq 0x148(%rsp), %rax movslq 0x104(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x100(%rsp) movq 0x90(%rcx), %rax movslq 0x100(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xfc(%rsp) movq 0x90(%rcx), %rax movl 0x100(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xf8(%rsp) movq 0xe0(%rcx), %rdi movl 0xfc(%rsp), %eax imull 0x24(%rcx), %eax cltq shlq $0x3, %rax addq %rax, %rdi movl 0xf8(%rsp), %eax subl 0xfc(%rsp), %eax imull 0x24(%rcx), %eax movslq %eax, %rdx shlq $0x3, %rdx xorl %esi, %esi callq 0x60760 movq 0x8(%rsp), %rcx movq 0x128(%rsp), %rax movslq 0x104(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xf4(%rsp) movq 0x120(%rsp), %rax movslq 0x104(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xf0(%rsp) movq 0x118(%rsp), %rax movslq 0x104(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xec(%rsp) movq 0x110(%rsp), %rax movslq 0x104(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xe8(%rsp) movq 0x108(%rsp), %rax movslq 0x104(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xe4(%rsp) movq 0x140(%rsp), %rax movslq 0x104(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0xe0(%rsp) movq 0xb0(%rcx), %rax movslq 0xf4(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xd8(%rsp) movq 0xd8(%rcx), %rax movslq 0xec(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xd0(%rsp) movq 0xa0(%rcx), %rax movslq 0xe8(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xc8(%rsp) movq 0xa8(%rcx), %rax movslq 0xe4(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xc0(%rsp) movl 0xf0(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0x85a58 movq 0x8(%rsp), %rax movq 0xb8(%rax), %rax movslq 0xf0(%rsp), %rcx cmpq $0x0, (%rax,%rcx,8) je 0x85a58 movq 0x8(%rsp), %rcx movq 0xb8(%rcx), %rax movslq 0xf0(%rsp), %rdx movq (%rax,%rdx,8), %rax movq %rax, 0xb8(%rsp) movl 0xfc(%rsp), %eax imull 0x2c(%rcx), %eax movl %eax, 0xb4(%rsp) movl $0x0, 0xb0(%rsp) movq 0x8(%rsp), %rcx movl 0xb0(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x85a53 movq 0x8(%rsp), %rcx movl 0xfc(%rsp), %eax imull 0x24(%rcx), %eax movl %eax, 0xac(%rsp) movq 0xc8(%rsp), %rax movslq 0xb0(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0xa0(%rsp) movl 0xfc(%rsp), %eax movl %eax, 0x9c(%rsp) movl 0x9c(%rsp), %eax cmpl 0xf8(%rsp), %eax jge 0x85a15 movq 0x8(%rsp), %rcx movq 0xb8(%rsp), %rax movslq 0x9c(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, 0x98(%rsp) movl 0xb0(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, 0x94(%rsp) movl $0x0, 0x90(%rsp) movq 0x8(%rsp), %rcx movl 0x90(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x859e9 movq 0x8(%rsp), %rax movq 0xd0(%rsp), %rcx movl 0x94(%rsp), %edx addl 0x98(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xd8(%rsp), %rcx movl 0xb4(%rsp), %edx addl 0x90(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0xa0(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0xac(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movl 0xac(%rsp), %ecx addl $0x1, %ecx movl %ecx, 0xac(%rsp) movl 0x28(%rax), %eax addl 0x94(%rsp), %eax movl %eax, 0x94(%rsp) movl 0x90(%rsp), %eax addl $0x1, %eax movl %eax, 0x90(%rsp) jmp 0x85934 movq 0x8(%rsp), %rax movl 0x2c(%rax), %eax addl 0xb4(%rsp), %eax movl %eax, 0xb4(%rsp) movl 0x9c(%rsp), %eax addl $0x1, %eax movl %eax, 0x9c(%rsp) jmp 0x858e4 movq 0x8(%rsp), %rcx movl 0x14(%rcx), %eax subl 0xf8(%rsp), %eax addl 0xfc(%rsp), %eax imull 0x2c(%rcx), %eax addl 0xb4(%rsp), %eax movl %eax, 0xb4(%rsp) movl 0xb0(%rsp), %eax addl $0x1, %eax movl %eax, 0xb0(%rsp) jmp 0x8588c jmp 0x85dac movq 0x8(%rsp), %rax movq 0xb0(%rax), %rcx movslq 0xf0(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movq %rcx, 0x88(%rsp) movl 0xfc(%rsp), %ecx imull 0x2c(%rax), %ecx movl %ecx, 0x84(%rsp) movl 0x24(%rax), %eax movl $0x4, %ecx cltd idivl %ecx shll $0x2, %eax movl %eax, 0x80(%rsp) movl $0x0, 0x7c(%rsp) movq 0x8(%rsp), %rcx movl 0x7c(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x85daa movq 0x8(%rsp), %rcx movl 0xfc(%rsp), %eax imull 0x24(%rcx), %eax movl %eax, 0x78(%rsp) movq 0xc8(%rsp), %rax movslq 0x7c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x70(%rsp) movl 0xfc(%rsp), %eax movl %eax, 0x6c(%rsp) movl 0x6c(%rsp), %eax cmpl 0xf8(%rsp), %eax jge 0x85d72 movq 0x8(%rsp), %rcx movl 0x7c(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, 0x68(%rsp) movq 0x88(%rsp), %rax movslq 0x84(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x60(%rsp) movl $0x0, 0x5c(%rsp) movq 0x8(%rsp), %rcx movl 0x5c(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x85d4c vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x50(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x48(%rsp) movl $0x0, 0x44(%rsp) movq 0xd0(%rsp), %rax movslq 0x68(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x38(%rsp) movl 0x44(%rsp), %eax cmpl 0x80(%rsp), %eax jge 0x85c82 movq 0x38(%rsp), %rax movl 0x44(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x60(%rsp), %rax movl 0x44(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x50(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x50(%rsp) movq 0x38(%rsp), %rax movl 0x44(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x60(%rsp), %rax movl 0x44(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x48(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x48(%rsp) movq 0x38(%rsp), %rax movl 0x44(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x60(%rsp), %rax movl 0x44(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x50(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x50(%rsp) movq 0x38(%rsp), %rax movl 0x44(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x60(%rsp), %rax movl 0x44(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x48(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x48(%rsp) movl 0x44(%rsp), %eax addl $0x4, %eax movl %eax, 0x44(%rsp) jmp 0x85b7d jmp 0x85c84 movq 0x8(%rsp), %rcx movl 0x44(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x85cce movq 0x38(%rsp), %rax movslq 0x44(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x60(%rsp), %rax movslq 0x44(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x50(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x50(%rsp) movl 0x44(%rsp), %eax addl $0x1, %eax movl %eax, 0x44(%rsp) jmp 0x85c84 movq 0x8(%rsp), %rax vmovsd 0x50(%rsp), %xmm0 vaddsd 0x48(%rsp), %xmm0, %xmm0 movq 0xd8(%rsp), %rcx movl 0x84(%rsp), %edx addl 0x5c(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm1 vmovsd 0x70(%rsp), %xmm0 movq 0xe0(%rax), %rcx movslq 0x78(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rcx,%rdx,8) movl 0x78(%rsp), %ecx addl $0x1, %ecx movl %ecx, 0x78(%rsp) movl 0x24(%rax), %eax addl 0x68(%rsp), %eax movl %eax, 0x68(%rsp) movl 0x68(%rsp), %eax addl $0x2, %eax movl %eax, 0x68(%rsp) movl 0x5c(%rsp), %eax addl $0x1, %eax movl %eax, 0x5c(%rsp) jmp 0x85b36 movq 0x8(%rsp), %rax movl 0x2c(%rax), %eax addl 0x84(%rsp), %eax movl %eax, 0x84(%rsp) movl 0x6c(%rsp), %eax addl $0x1, %eax movl %eax, 0x6c(%rsp) jmp 0x85af0 movq 0x8(%rsp), %rcx movl 0x14(%rcx), %eax subl 0xf8(%rsp), %eax addl 0xfc(%rsp), %eax imull 0x2c(%rcx), %eax addl 0x84(%rsp), %eax movl %eax, 0x84(%rsp) movl 0x7c(%rsp), %eax addl $0x1, %eax movl %eax, 0x7c(%rsp) jmp 0x85aa7 jmp 0x85dac movq 0x8(%rsp), %rcx movl 0xfc(%rsp), %eax imull 0x24(%rcx), %eax movl %eax, 0x34(%rsp) movl 0xfc(%rsp), %eax movl %eax, 0x30(%rsp) movl 0x30(%rsp), %eax cmpl 0xf8(%rsp), %eax jge 0x85e7e vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x28(%rsp) movl $0x0, 0x24(%rsp) movq 0x8(%rsp), %rcx movl 0x24(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x85e4d movq 0x8(%rsp), %rax movq 0xc0(%rsp), %rcx movslq 0x24(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0xe0(%rax), %rax movslq 0x34(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x28(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x28(%rsp) movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) jmp 0x85dee vmovsd 0x28(%rsp), %xmm0 callq 0x61460 movq 0x8(%rsp), %rax movq 0x110(%rax), %rax movslq 0x30(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x30(%rsp), %eax addl $0x1, %eax movl %eax, 0x30(%rsp) jmp 0x85dcb cmpl $-0x1, 0xe0(%rsp) je 0x85ef6 movq 0x8(%rsp), %rax movq 0xc0(%rax), %rax movslq 0xe0(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x18(%rsp) movl 0xfc(%rsp), %eax movl %eax, 0x14(%rsp) movl 0x14(%rsp), %eax cmpl 0xf8(%rsp), %eax jge 0x85ef4 movq 0x8(%rsp), %rax movq 0x18(%rsp), %rcx movslq 0x14(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0x110(%rax), %rax movslq 0x14(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0x85eb0 jmp 0x85ef6 movq 0x158(%rsp), %rax movslq 0x104(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0xfc(%rsp), %eax movl %eax, 0x10(%rsp) movl 0x10(%rsp), %eax cmpl 0xf8(%rsp), %eax jge 0x85f7a movq 0x8(%rsp), %rax movq 0x110(%rax), %rcx movslq 0x10(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x80(%rax), %rax movslq 0x10(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x158(%rsp), %rax movslq 0x104(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, (%rax,%rcx,8) movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0x85f1a jmp 0x85f7c movl 0x104(%rsp), %eax addl $0x1, %eax movl %eax, 0x104(%rsp) jmp 0x8566e addq $0x138, %rsp # imm = 0x138 retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::rescalePartials(double*, double*, double*, int)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::rescalePartials(REALTYPE* destP, REALTYPE* scaleFactors, REALTYPE* cumulativeScaleFactors, const int fillWithOnes) { if (DEBUGGING_OUTPUT) { std::cerr << "destP (before rescale): \n";// << destP << "\n"; for(int i=0; i<kPartialsSize; i++) fprintf(stderr,"destP[%d] = %.5f\n",i,destP[i]); } // TODO None of the code below has been optimized. for (int k = 0; k < kPatternCount; k++) { REALTYPE max = 0; const int patternOffset = k * kPartialsPaddedStateCount; for (int l = 0; l < kCategoryCount; l++) { int offset = l * kPaddedPatternCount * kPartialsPaddedStateCount + patternOffset; for (int i = 0; i < kStateCount; i++) { if(destP[offset] > max) max = destP[offset]; offset++; } } if (max == 0) max = 1.0; REALTYPE oneOverMax = REALTYPE(1.0) / max; for (int l = 0; l < kCategoryCount; l++) { int offset = l * kPaddedPatternCount * kPartialsPaddedStateCount + patternOffset; for (int i = 0; i < kStateCount; i++) destP[offset++] *= oneOverMax; } if (kFlags & BEAGLE_FLAG_SCALERS_LOG) { REALTYPE logMax = log(max); scaleFactors[k] = logMax; if( cumulativeScaleFactors != NULL ) cumulativeScaleFactors[k] += logMax; } else { scaleFactors[k] = max; if( cumulativeScaleFactors != NULL ) cumulativeScaleFactors[k] += log(max); } } if (DEBUGGING_OUTPUT) { for(int i=0; i<kPatternCount; i++) fprintf(stderr,"new scaleFactor[%d] = %.5f\n",i,scaleFactors[i]); } }
subq $0x68, %rsp movq %rdi, 0x60(%rsp) movq %rsi, 0x58(%rsp) movq %rdx, 0x50(%rsp) movq %rcx, 0x48(%rsp) movl %r8d, 0x44(%rsp) movq 0x60(%rsp), %rax movq %rax, (%rsp) movl $0x0, 0x40(%rsp) movq (%rsp), %rcx movl 0x40(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x86dda movq (%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x38(%rsp) movl 0x40(%rsp), %eax imull 0x2c(%rcx), %eax movl %eax, 0x34(%rsp) movl $0x0, 0x30(%rsp) movq (%rsp), %rcx movl 0x30(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x86c75 movq (%rsp), %rcx movl 0x30(%rsp), %eax imull 0x18(%rcx), %eax imull 0x2c(%rcx), %eax addl 0x34(%rsp), %eax movl %eax, 0x2c(%rsp) movl $0x0, 0x28(%rsp) movq (%rsp), %rcx movl 0x28(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x86c63 movq 0x58(%rsp), %rax movslq 0x2c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vucomisd 0x38(%rsp), %xmm0 jbe 0x86c4b movq 0x58(%rsp), %rax movslq 0x2c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x38(%rsp) movl 0x2c(%rsp), %eax addl $0x1, %eax movl %eax, 0x2c(%rsp) movl 0x28(%rsp), %eax addl $0x1, %eax movl %eax, 0x28(%rsp) jmp 0x86c12 jmp 0x86c65 movl 0x30(%rsp), %eax addl $0x1, %eax movl %eax, 0x30(%rsp) jmp 0x86be1 vmovsd 0x38(%rsp), %xmm0 vpxor %xmm1, %xmm1, %xmm1 vucomisd %xmm1, %xmm0 jne 0x86c95 jp 0x86c95 vmovsd 0x32379(%rip), %xmm0 # 0xb9008 vmovsd %xmm0, 0x38(%rsp) vmovsd 0x3236b(%rip), %xmm0 # 0xb9008 vdivsd 0x38(%rsp), %xmm0, %xmm0 vmovsd %xmm0, 0x20(%rsp) movl $0x0, 0x1c(%rsp) movq (%rsp), %rcx movl 0x1c(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x86d2c movq (%rsp), %rcx movl 0x1c(%rsp), %eax imull 0x18(%rcx), %eax imull 0x2c(%rcx), %eax addl 0x34(%rsp), %eax movl %eax, 0x18(%rsp) movl $0x0, 0x14(%rsp) movq (%rsp), %rcx movl 0x14(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x86d1d vmovsd 0x20(%rsp), %xmm0 movq 0x58(%rsp), %rax movl 0x18(%rsp), %ecx movl %ecx, %edx addl $0x1, %edx movl %edx, 0x18(%rsp) movslq %ecx, %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0x86cde jmp 0x86d1f movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x86cb1 movq (%rsp), %rax movq 0x58(%rax), %rax andq $0x400, %rax # imm = 0x400 cmpq $0x0, %rax je 0x86d8a vmovsd 0x38(%rsp), %xmm0 callq 0x61460 vmovsd %xmm0, 0x8(%rsp) vmovsd 0x8(%rsp), %xmm0 movq 0x50(%rsp), %rax movslq 0x40(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) cmpq $0x0, 0x48(%rsp) je 0x86d88 vmovsd 0x8(%rsp), %xmm0 movq 0x48(%rsp), %rax movslq 0x40(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x86dc8 vmovsd 0x38(%rsp), %xmm0 movq 0x50(%rsp), %rax movslq 0x40(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) cmpq $0x0, 0x48(%rsp) je 0x86dc6 vmovsd 0x38(%rsp), %xmm0 callq 0x61460 movq 0x48(%rsp), %rax movslq 0x40(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x86dc8 jmp 0x86dca movl 0x40(%rsp), %eax addl $0x1, %eax movl %eax, 0x40(%rsp) jmp 0x86bae addq $0x68, %rsp retq nop
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 2, 0>::rescalePartialsByPartition(double*, double*, double*, int, int)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::rescalePartialsByPartition(REALTYPE* destP, REALTYPE* scaleFactors, REALTYPE* cumulativeScaleFactors, const int fillWithOnes, const int partitionIndex) { int startPattern = gPatternPartitionsStartPatterns[partitionIndex]; int endPattern = gPatternPartitionsStartPatterns[partitionIndex + 1]; // TODO None of the code below has been optimized. for (int k = startPattern; k < endPattern; k++) { REALTYPE max = 0; const int patternOffset = k * kPartialsPaddedStateCount; for (int l = 0; l < kCategoryCount; l++) { int offset = l * kPaddedPatternCount * kPartialsPaddedStateCount + patternOffset; for (int i = 0; i < kStateCount; i++) { if(destP[offset] > max) max = destP[offset]; offset++; } } if (max == 0) max = 1.0; REALTYPE oneOverMax = REALTYPE(1.0) / max; for (int l = 0; l < kCategoryCount; l++) { int offset = l * kPaddedPatternCount * kPartialsPaddedStateCount + patternOffset; for (int i = 0; i < kStateCount; i++) destP[offset++] *= oneOverMax; } if (kFlags & BEAGLE_FLAG_SCALERS_LOG) { REALTYPE logMax = log(max); scaleFactors[k] = logMax; if( cumulativeScaleFactors != NULL ) cumulativeScaleFactors[k] += logMax; } else { scaleFactors[k] = max; if( cumulativeScaleFactors != NULL ) cumulativeScaleFactors[k] += log(max); } } }
subq $0x78, %rsp movq %rdi, 0x70(%rsp) movq %rsi, 0x68(%rsp) movq %rdx, 0x60(%rsp) movq %rcx, 0x58(%rsp) movl %r8d, 0x54(%rsp) movl %r9d, 0x50(%rsp) movq 0x70(%rsp), %rax movq %rax, (%rsp) movq 0x90(%rax), %rcx movslq 0x50(%rsp), %rdx movl (%rcx,%rdx,4), %ecx movl %ecx, 0x4c(%rsp) movq 0x90(%rax), %rax movl 0x50(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx movl (%rax,%rcx,4), %eax movl %eax, 0x48(%rsp) movl 0x4c(%rsp), %eax movl %eax, 0x44(%rsp) movl 0x44(%rsp), %eax cmpl 0x48(%rsp), %eax jge 0x87067 movq (%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x38(%rsp) movl 0x44(%rsp), %eax imull 0x2c(%rcx), %eax movl %eax, 0x34(%rsp) movl $0x0, 0x30(%rsp) movq (%rsp), %rcx movl 0x30(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x86f02 movq (%rsp), %rcx movl 0x30(%rsp), %eax imull 0x18(%rcx), %eax imull 0x2c(%rcx), %eax addl 0x34(%rsp), %eax movl %eax, 0x2c(%rsp) movl $0x0, 0x28(%rsp) movq (%rsp), %rcx movl 0x28(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x86ef0 movq 0x68(%rsp), %rax movslq 0x2c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vucomisd 0x38(%rsp), %xmm0 jbe 0x86ed8 movq 0x68(%rsp), %rax movslq 0x2c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x38(%rsp) movl 0x2c(%rsp), %eax addl $0x1, %eax movl %eax, 0x2c(%rsp) movl 0x28(%rsp), %eax addl $0x1, %eax movl %eax, 0x28(%rsp) jmp 0x86e9f jmp 0x86ef2 movl 0x30(%rsp), %eax addl $0x1, %eax movl %eax, 0x30(%rsp) jmp 0x86e6e vmovsd 0x38(%rsp), %xmm0 vpxor %xmm1, %xmm1, %xmm1 vucomisd %xmm1, %xmm0 jne 0x86f22 jp 0x86f22 vmovsd 0x320ec(%rip), %xmm0 # 0xb9008 vmovsd %xmm0, 0x38(%rsp) vmovsd 0x320de(%rip), %xmm0 # 0xb9008 vdivsd 0x38(%rsp), %xmm0, %xmm0 vmovsd %xmm0, 0x20(%rsp) movl $0x0, 0x1c(%rsp) movq (%rsp), %rcx movl 0x1c(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0x86fb9 movq (%rsp), %rcx movl 0x1c(%rsp), %eax imull 0x18(%rcx), %eax imull 0x2c(%rcx), %eax addl 0x34(%rsp), %eax movl %eax, 0x18(%rsp) movl $0x0, 0x14(%rsp) movq (%rsp), %rcx movl 0x14(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0x86faa vmovsd 0x20(%rsp), %xmm0 movq 0x68(%rsp), %rax movl 0x18(%rsp), %ecx movl %ecx, %edx addl $0x1, %edx movl %edx, 0x18(%rsp) movslq %ecx, %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0x86f6b jmp 0x86fac movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x86f3e movq (%rsp), %rax movq 0x58(%rax), %rax andq $0x400, %rax # imm = 0x400 cmpq $0x0, %rax je 0x87017 vmovsd 0x38(%rsp), %xmm0 callq 0x61460 vmovsd %xmm0, 0x8(%rsp) vmovsd 0x8(%rsp), %xmm0 movq 0x60(%rsp), %rax movslq 0x44(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) cmpq $0x0, 0x58(%rsp) je 0x87015 vmovsd 0x8(%rsp), %xmm0 movq 0x58(%rsp), %rax movslq 0x44(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x87055 vmovsd 0x38(%rsp), %xmm0 movq 0x60(%rsp), %rax movslq 0x44(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) cmpq $0x0, 0x58(%rsp) je 0x87053 vmovsd 0x38(%rsp), %xmm0 callq 0x61460 movq 0x58(%rsp), %rax movslq 0x44(%rsp), %rcx vaddsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) jmp 0x87055 jmp 0x87057 movl 0x44(%rsp), %eax addl $0x1, %eax movl %eax, 0x44(%rsp) jmp 0x86e3e addq $0x78, %rsp retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 2, 0>::accumulateDerivativesDispatch1<false>(double*, double*, double*)
BEAGLE_CPU_TEMPLATE template <bool DoDerivatives> void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesDispatch1( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outSumDerivatives == NULL) { accumulateDerivativesDispatch2<DoDerivatives, false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesDispatch2<DoDerivatives, true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x10(%rsp) jne 0x89e03 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x62410 jmp 0x89e1b movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x64790 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 2, 0>::accumulateDerivativesDispatch1<true>(double*, double*, double*)
BEAGLE_CPU_TEMPLATE template <bool DoDerivatives> void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesDispatch1( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outSumDerivatives == NULL) { accumulateDerivativesDispatch2<DoDerivatives, false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesDispatch2<DoDerivatives, true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x10(%rsp) jne 0x89e63 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x61fc0 jmp 0x89e7b movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x64530 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPU4StateSSEImpl<double, 2, 0>::accumulateDerivativesImpl<false, false, false>(double*, double*, double*)
void BeagleCPU4StateSSEImpl<BEAGLE_CPU_4_SSE_DOUBLE>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { V_Real vSum = VEC_SETZERO(); V_Real vSumSquared = VEC_SETZERO(); int k = 0; for (; k < kPatternCount - 1; k += 2) { V_Real numerator = VEC_LOAD(grandNumeratorDerivTmp + k); V_Real denominator = VEC_LOAD(grandDenominatorDerivTmp + k); V_Real derivative = VEC_DIV(numerator, denominator); V_Real patternWeight = VEC_LOAD(gPatternWeights + k); if (DoDerivatives) { VEC_STOREU(outDerivatives + k, derivative); } if (DoSum) { vSum = VEC_MADD(derivative, patternWeight, vSum); } if (DoSumSquared) { derivative = VEC_MULT(derivative, derivative); vSumSquared = VEC_MADD(derivative, patternWeight, vSumSquared); } } double sum; double sumSquared; if (DoSum) { sum = _mm_cvtsd_f64(VEC_ADD(vSum, VEC_SWAP(vSum))); } if (DoSumSquared) { sumSquared = _mm_cvtsd_f64(VEC_ADD(vSumSquared, VEC_SWAP(vSumSquared))); } for (; k < kPatternCount; ++k) { double derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
subq $0x98, %rsp movq %rdi, 0x30(%rsp) movq %rsi, 0x28(%rsp) movq %rdx, 0x20(%rsp) movq %rcx, 0x18(%rsp) movq 0x30(%rsp), %rax movq %rax, -0x80(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovapd %xmm0, 0x60(%rsp) vmovapd 0x60(%rsp), %xmm1 vmovapd %xmm1, (%rsp) vmovapd %xmm0, 0x50(%rsp) vmovapd 0x50(%rsp), %xmm0 vmovapd %xmm0, -0x10(%rsp) movl $0x0, -0x14(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax movl 0x14(%rcx), %ecx subl $0x1, %ecx cmpl %ecx, %eax jge 0x8f7b5 movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0x48(%rsp) movq 0x48(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x30(%rsp) movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0x40(%rsp) movq 0x40(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x40(%rsp) vmovapd -0x30(%rsp), %xmm1 vmovapd -0x40(%rsp), %xmm0 vmovapd %xmm1, 0x80(%rsp) vmovapd %xmm0, 0x70(%rsp) vmovapd 0x80(%rsp), %xmm0 vdivpd 0x70(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x50(%rsp) movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x38(%rsp) movq 0x38(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, -0x60(%rsp) movl -0x14(%rsp), %eax addl $0x2, %eax movl %eax, -0x14(%rsp) jmp 0x8f6e4 jmp 0x8f7b7 movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x8f7ff movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rax movslq -0x14(%rsp), %rcx vdivsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x78(%rsp) movl -0x14(%rsp), %eax addl $0x1, %eax movl %eax, -0x14(%rsp) jmp 0x8f7b7 addq $0x98, %rsp retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateSSEImpl.hpp
void beagle::cpu::BeagleCPU4StateSSEImpl<double, 2, 0>::accumulateDerivativesImpl<false, false, true>(double*, double*, double*)
void BeagleCPU4StateSSEImpl<BEAGLE_CPU_4_SSE_DOUBLE>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { V_Real vSum = VEC_SETZERO(); V_Real vSumSquared = VEC_SETZERO(); int k = 0; for (; k < kPatternCount - 1; k += 2) { V_Real numerator = VEC_LOAD(grandNumeratorDerivTmp + k); V_Real denominator = VEC_LOAD(grandDenominatorDerivTmp + k); V_Real derivative = VEC_DIV(numerator, denominator); V_Real patternWeight = VEC_LOAD(gPatternWeights + k); if (DoDerivatives) { VEC_STOREU(outDerivatives + k, derivative); } if (DoSum) { vSum = VEC_MADD(derivative, patternWeight, vSum); } if (DoSumSquared) { derivative = VEC_MULT(derivative, derivative); vSumSquared = VEC_MADD(derivative, patternWeight, vSumSquared); } } double sum; double sumSquared; if (DoSum) { sum = _mm_cvtsd_f64(VEC_ADD(vSum, VEC_SWAP(vSum))); } if (DoSumSquared) { sumSquared = _mm_cvtsd_f64(VEC_ADD(vSumSquared, VEC_SWAP(vSumSquared))); } for (; k < kPatternCount; ++k) { double derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
subq $0x128, %rsp # imm = 0x128 movq %rdi, 0x28(%rsp) movq %rsi, 0x20(%rsp) movq %rdx, 0x18(%rsp) movq %rcx, 0x10(%rsp) movq 0x28(%rsp), %rax movq %rax, -0x80(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovapd %xmm0, 0xf0(%rsp) vmovapd 0xf0(%rsp), %xmm1 vmovapd %xmm1, (%rsp) vmovapd %xmm0, 0xe0(%rsp) vmovapd 0xe0(%rsp), %xmm0 vmovapd %xmm0, -0x10(%rsp) movl $0x0, -0x14(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax movl 0x14(%rcx), %ecx subl $0x1, %ecx cmpl %ecx, %eax jge 0x8f9d7 movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0xd8(%rsp) movq 0xd8(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x30(%rsp) movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0xd0(%rsp) movq 0xd0(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x40(%rsp) vmovapd -0x30(%rsp), %xmm1 vmovapd -0x40(%rsp), %xmm0 vmovapd %xmm1, 0x110(%rsp) vmovapd %xmm0, 0x100(%rsp) vmovapd 0x110(%rsp), %xmm0 vdivpd 0x100(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x50(%rsp) movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0xc8(%rsp) movq 0xc8(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, -0x60(%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd -0x50(%rsp), %xmm0 vmovapd %xmm1, 0x60(%rsp) vmovapd %xmm0, 0x50(%rsp) vmovapd 0x60(%rsp), %xmm0 vmulpd 0x50(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x50(%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd -0x60(%rsp), %xmm0 vmovapd %xmm1, 0x40(%rsp) vmovapd %xmm0, 0x30(%rsp) vmovapd 0x40(%rsp), %xmm0 vmulpd 0x30(%rsp), %xmm0, %xmm1 vmovapd -0x10(%rsp), %xmm0 vmovapd %xmm1, 0xa0(%rsp) vmovapd %xmm0, 0x90(%rsp) vmovapd 0xa0(%rsp), %xmm0 vaddpd 0x90(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x10(%rsp) movl -0x14(%rsp), %eax addl $0x2, %eax movl %eax, -0x14(%rsp) jmp 0x8f870 vmovapd -0x10(%rsp), %xmm1 vpermilpd $0x1, %xmm1, %xmm0 # xmm0 = xmm1[1,0] vmovapd %xmm1, 0x80(%rsp) vmovapd %xmm0, 0x70(%rsp) vmovapd 0x80(%rsp), %xmm0 vmovapd 0x70(%rsp), %xmm1 vaddpd %xmm1, %xmm0, %xmm0 vmovapd %xmm0, 0xb0(%rsp) vmovsd 0xb0(%rsp), %xmm0 vmovsd %xmm0, -0x70(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x8fa93 movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx vdivsd (%rcx,%rdx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x78(%rsp) vmovsd -0x78(%rsp), %xmm0 vmulsd -0x78(%rsp), %xmm0, %xmm1 movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movl -0x14(%rsp), %eax addl $0x1, %eax movl %eax, -0x14(%rsp) jmp 0x8fa1d vmovsd -0x70(%rsp), %xmm0 movq 0x10(%rsp), %rax vmovsd %xmm0, (%rax) addq $0x128, %rsp # imm = 0x128 retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateSSEImpl.hpp
void beagle::cpu::BeagleCPU4StateSSEImpl<double, 2, 0>::accumulateDerivativesImpl<false, true, false>(double*, double*, double*)
void BeagleCPU4StateSSEImpl<BEAGLE_CPU_4_SSE_DOUBLE>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { V_Real vSum = VEC_SETZERO(); V_Real vSumSquared = VEC_SETZERO(); int k = 0; for (; k < kPatternCount - 1; k += 2) { V_Real numerator = VEC_LOAD(grandNumeratorDerivTmp + k); V_Real denominator = VEC_LOAD(grandDenominatorDerivTmp + k); V_Real derivative = VEC_DIV(numerator, denominator); V_Real patternWeight = VEC_LOAD(gPatternWeights + k); if (DoDerivatives) { VEC_STOREU(outDerivatives + k, derivative); } if (DoSum) { vSum = VEC_MADD(derivative, patternWeight, vSum); } if (DoSumSquared) { derivative = VEC_MULT(derivative, derivative); vSumSquared = VEC_MADD(derivative, patternWeight, vSumSquared); } } double sum; double sumSquared; if (DoSum) { sum = _mm_cvtsd_f64(VEC_ADD(vSum, VEC_SWAP(vSum))); } if (DoSumSquared) { sumSquared = _mm_cvtsd_f64(VEC_ADD(vSumSquared, VEC_SWAP(vSumSquared))); } for (; k < kPatternCount; ++k) { double derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
subq $0x108, %rsp # imm = 0x108 movq %rdi, 0x28(%rsp) movq %rsi, 0x20(%rsp) movq %rdx, 0x18(%rsp) movq %rcx, 0x10(%rsp) movq 0x28(%rsp), %rax movq %rax, -0x80(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovapd %xmm0, 0xd0(%rsp) vmovapd 0xd0(%rsp), %xmm1 vmovapd %xmm1, (%rsp) vmovapd %xmm0, 0xc0(%rsp) vmovapd 0xc0(%rsp), %xmm0 vmovapd %xmm0, -0x10(%rsp) movl $0x0, -0x14(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax movl 0x14(%rcx), %ecx subl $0x1, %ecx cmpl %ecx, %eax jge 0x8fc45 movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0xb8(%rsp) movq 0xb8(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x30(%rsp) movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0xb0(%rsp) movq 0xb0(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x40(%rsp) vmovapd -0x30(%rsp), %xmm1 vmovapd -0x40(%rsp), %xmm0 vmovapd %xmm1, 0xf0(%rsp) vmovapd %xmm0, 0xe0(%rsp) vmovapd 0xf0(%rsp), %xmm0 vdivpd 0xe0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x50(%rsp) movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0xa8(%rsp) movq 0xa8(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, -0x60(%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd -0x60(%rsp), %xmm0 vmovapd %xmm1, 0x40(%rsp) vmovapd %xmm0, 0x30(%rsp) vmovapd 0x40(%rsp), %xmm0 vmulpd 0x30(%rsp), %xmm0, %xmm1 vmovapd (%rsp), %xmm0 vmovapd %xmm1, 0x80(%rsp) vmovapd %xmm0, 0x70(%rsp) vmovapd 0x80(%rsp), %xmm0 vaddpd 0x70(%rsp), %xmm0, %xmm0 vmovapd %xmm0, (%rsp) movl -0x14(%rsp), %eax addl $0x2, %eax movl %eax, -0x14(%rsp) jmp 0x8fb10 vmovapd (%rsp), %xmm1 vpermilpd $0x1, %xmm1, %xmm0 # xmm0 = xmm1[1,0] vmovapd %xmm1, 0x60(%rsp) vmovapd %xmm0, 0x50(%rsp) vmovapd 0x60(%rsp), %xmm0 vmovapd 0x50(%rsp), %xmm1 vaddpd %xmm1, %xmm0, %xmm0 vmovapd %xmm0, 0x90(%rsp) vmovsd 0x90(%rsp), %xmm0 vmovsd %xmm0, -0x68(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x8fcf4 movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx vdivsd (%rcx,%rdx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x78(%rsp) vmovsd -0x78(%rsp), %xmm1 movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movl -0x14(%rsp), %eax addl $0x1, %eax movl %eax, -0x14(%rsp) jmp 0x8fc84 vmovsd -0x68(%rsp), %xmm0 movq 0x18(%rsp), %rax vmovsd %xmm0, (%rax) addq $0x108, %rsp # imm = 0x108 retq nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateSSEImpl.hpp
void beagle::cpu::BeagleCPU4StateSSEImpl<double, 2, 0>::accumulateDerivativesImpl<false, true, true>(double*, double*, double*)
void BeagleCPU4StateSSEImpl<BEAGLE_CPU_4_SSE_DOUBLE>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { V_Real vSum = VEC_SETZERO(); V_Real vSumSquared = VEC_SETZERO(); int k = 0; for (; k < kPatternCount - 1; k += 2) { V_Real numerator = VEC_LOAD(grandNumeratorDerivTmp + k); V_Real denominator = VEC_LOAD(grandDenominatorDerivTmp + k); V_Real derivative = VEC_DIV(numerator, denominator); V_Real patternWeight = VEC_LOAD(gPatternWeights + k); if (DoDerivatives) { VEC_STOREU(outDerivatives + k, derivative); } if (DoSum) { vSum = VEC_MADD(derivative, patternWeight, vSum); } if (DoSumSquared) { derivative = VEC_MULT(derivative, derivative); vSumSquared = VEC_MADD(derivative, patternWeight, vSumSquared); } } double sum; double sumSquared; if (DoSum) { sum = _mm_cvtsd_f64(VEC_ADD(vSum, VEC_SWAP(vSum))); } if (DoSumSquared) { sumSquared = _mm_cvtsd_f64(VEC_ADD(vSumSquared, VEC_SWAP(vSumSquared))); } for (; k < kPatternCount; ++k) { double derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
subq $0x198, %rsp # imm = 0x198 movq %rdi, 0x28(%rsp) movq %rsi, 0x20(%rsp) movq %rdx, 0x18(%rsp) movq %rcx, 0x10(%rsp) movq 0x28(%rsp), %rax movq %rax, -0x80(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovapd %xmm0, 0x160(%rsp) vmovapd 0x160(%rsp), %xmm1 vmovapd %xmm1, (%rsp) vmovapd %xmm0, 0x150(%rsp) vmovapd 0x150(%rsp), %xmm0 vmovapd %xmm0, -0x10(%rsp) movl $0x0, -0x14(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax movl 0x14(%rcx), %ecx subl $0x1, %ecx cmpl %ecx, %eax jge 0x8ff2f movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0x148(%rsp) movq 0x148(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x30(%rsp) movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0x140(%rsp) movq 0x140(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x40(%rsp) vmovapd -0x30(%rsp), %xmm1 vmovapd -0x40(%rsp), %xmm0 vmovapd %xmm1, 0x180(%rsp) vmovapd %xmm0, 0x170(%rsp) vmovapd 0x180(%rsp), %xmm0 vdivpd 0x170(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x50(%rsp) movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x138(%rsp) movq 0x138(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, -0x60(%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd -0x60(%rsp), %xmm0 vmovapd %xmm1, 0x80(%rsp) vmovapd %xmm0, 0x70(%rsp) vmovapd 0x80(%rsp), %xmm0 vmulpd 0x70(%rsp), %xmm0, %xmm1 vmovapd (%rsp), %xmm0 vmovapd %xmm1, 0x100(%rsp) vmovapd %xmm0, 0xf0(%rsp) vmovapd 0x100(%rsp), %xmm0 vaddpd 0xf0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, (%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd -0x50(%rsp), %xmm0 vmovapd %xmm1, 0x60(%rsp) vmovapd %xmm0, 0x50(%rsp) vmovapd 0x60(%rsp), %xmm0 vmulpd 0x50(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x50(%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd -0x60(%rsp), %xmm0 vmovapd %xmm1, 0x40(%rsp) vmovapd %xmm0, 0x30(%rsp) vmovapd 0x40(%rsp), %xmm0 vmulpd 0x30(%rsp), %xmm0, %xmm1 vmovapd -0x10(%rsp), %xmm0 vmovapd %xmm1, 0xe0(%rsp) vmovapd %xmm0, 0xd0(%rsp) vmovapd 0xe0(%rsp), %xmm0 vaddpd 0xd0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x10(%rsp) movl -0x14(%rsp), %eax addl $0x2, %eax movl %eax, -0x14(%rsp) jmp 0x8fd70 vmovapd (%rsp), %xmm1 vpermilpd $0x1, %xmm1, %xmm0 # xmm0 = xmm1[1,0] vmovapd %xmm1, 0xc0(%rsp) vmovapd %xmm0, 0xb0(%rsp) vmovapd 0xc0(%rsp), %xmm0 vmovapd 0xb0(%rsp), %xmm1 vaddpd %xmm1, %xmm0, %xmm0 vmovapd %xmm0, 0x120(%rsp) vmovsd 0x120(%rsp), %xmm0 vmovsd %xmm0, -0x68(%rsp) vmovapd -0x10(%rsp), %xmm1 vpermilpd $0x1, %xmm1, %xmm0 # xmm0 = xmm1[1,0] vmovapd %xmm1, 0xa0(%rsp) vmovapd %xmm0, 0x90(%rsp) vmovapd 0xa0(%rsp), %xmm0 vmovapd 0x90(%rsp), %xmm1 vaddpd %xmm1, %xmm0, %xmm0 vmovapd %xmm0, 0x110(%rsp) vmovsd 0x110(%rsp), %xmm0 vmovsd %xmm0, -0x70(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x9006b movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx vdivsd (%rcx,%rdx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x78(%rsp) vmovsd -0x78(%rsp), %xmm1 movq 0x80(%rax), %rcx movslq -0x14(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) vmovsd -0x78(%rsp), %xmm0 vmulsd -0x78(%rsp), %xmm0, %xmm1 movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movl -0x14(%rsp), %eax addl $0x1, %eax movl %eax, -0x14(%rsp) jmp 0x8ffc6 vmovsd -0x68(%rsp), %xmm0 movq 0x18(%rsp), %rax vmovsd %xmm0, (%rax) vmovsd -0x70(%rsp), %xmm0 movq 0x10(%rsp), %rax vmovsd %xmm0, (%rax) addq $0x198, %rsp # imm = 0x198 retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateSSEImpl.hpp
void beagle::cpu::BeagleCPU4StateSSEImpl<double, 2, 0>::accumulateDerivativesDispatch2<true, true>(double*, double*, double*)
void BeagleCPU4StateSSEImpl<BEAGLE_CPU_4_SSE_DOUBLE>::accumulateDerivativesDispatch2( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outSumSquaredDerivatives == NULL) { accumulateDerivativesImpl<DoDerivatives, DoSum, false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesImpl<DoDerivatives, DoSum, true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x8(%rsp) jne 0x90143 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x625c0 jmp 0x9015b movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x61dd0 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateSSEImpl.hpp
void beagle::cpu::BeagleCPU4StateSSEImpl<double, 2, 0>::accumulateDerivativesImpl<true, false, true>(double*, double*, double*)
void BeagleCPU4StateSSEImpl<BEAGLE_CPU_4_SSE_DOUBLE>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { V_Real vSum = VEC_SETZERO(); V_Real vSumSquared = VEC_SETZERO(); int k = 0; for (; k < kPatternCount - 1; k += 2) { V_Real numerator = VEC_LOAD(grandNumeratorDerivTmp + k); V_Real denominator = VEC_LOAD(grandDenominatorDerivTmp + k); V_Real derivative = VEC_DIV(numerator, denominator); V_Real patternWeight = VEC_LOAD(gPatternWeights + k); if (DoDerivatives) { VEC_STOREU(outDerivatives + k, derivative); } if (DoSum) { vSum = VEC_MADD(derivative, patternWeight, vSum); } if (DoSumSquared) { derivative = VEC_MULT(derivative, derivative); vSumSquared = VEC_MADD(derivative, patternWeight, vSumSquared); } } double sum; double sumSquared; if (DoSum) { sum = _mm_cvtsd_f64(VEC_ADD(vSum, VEC_SWAP(vSum))); } if (DoSumSquared) { sumSquared = _mm_cvtsd_f64(VEC_ADD(vSumSquared, VEC_SWAP(vSumSquared))); } for (; k < kPatternCount; ++k) { double derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
subq $0x138, %rsp # imm = 0x138 movq %rdi, 0x28(%rsp) movq %rsi, 0x20(%rsp) movq %rdx, 0x18(%rsp) movq %rcx, 0x10(%rsp) movq 0x28(%rsp), %rax movq %rax, -0x80(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovapd %xmm0, 0xf0(%rsp) vmovapd 0xf0(%rsp), %xmm1 vmovapd %xmm1, (%rsp) vmovapd %xmm0, 0xe0(%rsp) vmovapd 0xe0(%rsp), %xmm0 vmovapd %xmm0, -0x10(%rsp) movl $0x0, -0x14(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax movl 0x14(%rcx), %ecx subl $0x1, %ecx cmpl %ecx, %eax jge 0x90534 movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0xd8(%rsp) movq 0xd8(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x30(%rsp) movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx shlq $0x3, %rdx addq %rdx, %rcx movq %rcx, 0xd0(%rsp) movq 0xd0(%rsp), %rcx vmovapd (%rcx), %xmm0 vmovapd %xmm0, -0x40(%rsp) vmovapd -0x30(%rsp), %xmm1 vmovapd -0x40(%rsp), %xmm0 vmovapd %xmm1, 0x110(%rsp) vmovapd %xmm0, 0x100(%rsp) vmovapd 0x110(%rsp), %xmm0 vdivpd 0x100(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x50(%rsp) movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0xc8(%rsp) movq 0xc8(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm0, -0x60(%rsp) movq 0x20(%rsp), %rax movslq -0x14(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax vmovapd -0x50(%rsp), %xmm0 movq %rax, 0x130(%rsp) vmovapd %xmm0, 0x120(%rsp) vmovapd 0x120(%rsp), %xmm0 movq 0x130(%rsp), %rax vmovupd %xmm0, (%rax) vmovapd -0x50(%rsp), %xmm1 vmovapd -0x50(%rsp), %xmm0 vmovapd %xmm1, 0x60(%rsp) vmovapd %xmm0, 0x50(%rsp) vmovapd 0x60(%rsp), %xmm0 vmulpd 0x50(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x50(%rsp) vmovapd -0x50(%rsp), %xmm1 vmovapd -0x60(%rsp), %xmm0 vmovapd %xmm1, 0x40(%rsp) vmovapd %xmm0, 0x30(%rsp) vmovapd 0x40(%rsp), %xmm0 vmulpd 0x30(%rsp), %xmm0, %xmm1 vmovapd -0x10(%rsp), %xmm0 vmovapd %xmm1, 0xa0(%rsp) vmovapd %xmm0, 0x90(%rsp) vmovapd 0xa0(%rsp), %xmm0 vaddpd 0x90(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x10(%rsp) movl -0x14(%rsp), %eax addl $0x2, %eax movl %eax, -0x14(%rsp) jmp 0x90390 vmovapd -0x10(%rsp), %xmm1 vpermilpd $0x1, %xmm1, %xmm0 # xmm0 = xmm1[1,0] vmovapd %xmm1, 0x80(%rsp) vmovapd %xmm0, 0x70(%rsp) vmovapd 0x80(%rsp), %xmm0 vmovapd 0x70(%rsp), %xmm1 vaddpd %xmm1, %xmm0, %xmm0 vmovapd %xmm0, 0xb0(%rsp) vmovsd 0xb0(%rsp), %xmm0 vmovsd %xmm0, -0x70(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0x9060c movq -0x80(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x14(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rcx movslq -0x14(%rsp), %rdx vdivsd (%rcx,%rdx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x78(%rsp) vmovsd -0x78(%rsp), %xmm0 movq 0x20(%rsp), %rcx movslq -0x14(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) vmovsd -0x78(%rsp), %xmm0 vmulsd -0x78(%rsp), %xmm0, %xmm1 movq 0x80(%rax), %rax movslq -0x14(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movl -0x14(%rsp), %eax addl $0x1, %eax movl %eax, -0x14(%rsp) jmp 0x9057a vmovsd -0x70(%rsp), %xmm0 movq 0x10(%rsp), %rax vmovsd %xmm0, (%rax) addq $0x138, %rsp # imm = 0x138 retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPU4StateSSEImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 1, 1>::calcStatesStatesFixedScaling(double*, int const*, double const*, int const*, double const*, double const*, int, int)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcStatesStatesFixedScaling(REALTYPE* destP, const int* child1States, const REALTYPE* child1TransMat, const int* child2States, const REALTYPE* child2TransMat, const REALTYPE* scaleFactors, int startPattern, int endPattern) { #pragma omp parallel for num_threads(kCategoryCount) for (int l = 0; l < kCategoryCount; l++) { int v = l*kPartialsPaddedStateCount*kPatternCount + kPartialsPaddedStateCount*startPattern; for (int k = startPattern; k < endPattern; k++) { const int state1 = child1States[k]; const int state2 = child2States[k]; int w = l * kMatrixSize; REALTYPE scaleFactor = scaleFactors[k]; for (int i = 0; i < kStateCount; i++) { destP[v] = child1TransMat[w + state1] * child2TransMat[w + state2] / scaleFactor; v++; w += kTransPaddedStateCount; } if (P_PAD) { for (int pad = 0; pad < P_PAD; pad++) { destP[v] = 0.0; v++; } } } } }
movl 0x18(%rsp), %eax movl 0x10(%rsp), %eax movq 0x8(%rsp), %rax movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq %r8, -0x28(%rsp) movq %r9, -0x30(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x60(%rsp) movl $0x0, -0x34(%rsp) movq -0x60(%rsp), %rcx movl -0x34(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0xaa1d7 movq -0x60(%rsp), %rcx movl -0x34(%rsp), %eax imull 0x2c(%rcx), %eax imull 0x14(%rcx), %eax movl 0x2c(%rcx), %ecx imull 0x10(%rsp), %ecx addl %ecx, %eax movl %eax, -0x38(%rsp) movl 0x10(%rsp), %eax movl %eax, -0x3c(%rsp) movl -0x3c(%rsp), %eax cmpl 0x18(%rsp), %eax jge 0xaa1c5 movq -0x60(%rsp), %rcx movq -0x18(%rsp), %rax movslq -0x3c(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, -0x40(%rsp) movq -0x28(%rsp), %rax movslq -0x3c(%rsp), %rdx movl (%rax,%rdx,4), %eax movl %eax, -0x44(%rsp) movl -0x34(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, -0x48(%rsp) movq 0x8(%rsp), %rax movslq -0x3c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x50(%rsp) movl $0x0, -0x54(%rsp) movq -0x60(%rsp), %rcx movl -0x54(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0xaa179 movq -0x60(%rsp), %rax movq -0x20(%rsp), %rcx movl -0x48(%rsp), %edx addl -0x40(%rsp), %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq -0x30(%rsp), %rcx movl -0x48(%rsp), %edx addl -0x44(%rsp), %edx movslq %edx, %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm0 vdivsd -0x50(%rsp), %xmm0, %xmm0 movq -0x10(%rsp), %rcx movslq -0x38(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) movl -0x38(%rsp), %ecx addl $0x1, %ecx movl %ecx, -0x38(%rsp) movl 0x28(%rax), %eax addl -0x48(%rsp), %eax movl %eax, -0x48(%rsp) movl -0x54(%rsp), %eax addl $0x1, %eax movl %eax, -0x54(%rsp) jmp 0xaa104 movl $0x0, -0x58(%rsp) cmpl $0x1, -0x58(%rsp) jge 0xaa1b3 movq -0x10(%rsp), %rax movslq -0x38(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl -0x38(%rsp), %eax addl $0x1, %eax movl %eax, -0x38(%rsp) movl -0x58(%rsp), %eax addl $0x1, %eax movl %eax, -0x58(%rsp) jmp 0xaa181 jmp 0xaa1b5 movl -0x3c(%rsp), %eax addl $0x1, %eax movl %eax, -0x3c(%rsp) jmp 0xaa0a6 jmp 0xaa1c7 movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0xaa06d retq nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 1, 1>::calcStatesPartialsFixedScaling(double*, int const*, double const*, double const*, double const*, double const*, int, int)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcStatesPartialsFixedScaling(REALTYPE* destP, const int* states1, const REALTYPE* matrices1, const REALTYPE* partials2, const REALTYPE* matrices2, const REALTYPE* scaleFactors, int startPattern, int endPattern) { int matrixIncr = kStateCount; // increment for the extra column at the end matrixIncr += T_PAD; int stateCountModFour = (kStateCount / 4) * 4; #pragma omp parallel for num_threads(kCategoryCount) for (int l = 0; l < kCategoryCount; l++) { int v = l*kPartialsPaddedStateCount*kPatternCount + kPartialsPaddedStateCount*startPattern; int matrixOffset = l*kMatrixSize; const REALTYPE* partials2Ptr = &partials2[v]; REALTYPE* destPtr = &destP[v]; for (int k = startPattern; k < endPattern; k++) { int w = l * kMatrixSize; int state1 = states1[k]; REALTYPE oneOverScaleFactor = REALTYPE(1.0) / scaleFactors[k]; for (int i = 0; i < kStateCount; i++) { const REALTYPE* matrices2Ptr = matrices2 + matrixOffset + i * matrixIncr; REALTYPE tmp = matrices1[w + state1]; REALTYPE sumA = 0.0; REALTYPE sumB = 0.0; int j = 0; for (; j < stateCountModFour; j += 4) { sumA += matrices2Ptr[j + 0] * partials2Ptr[j + 0]; sumB += matrices2Ptr[j + 1] * partials2Ptr[j + 1]; sumA += matrices2Ptr[j + 2] * partials2Ptr[j + 2]; sumB += matrices2Ptr[j + 3] * partials2Ptr[j + 3]; } for (; j < kStateCount; j++) { sumA += matrices2Ptr[j] * partials2Ptr[j]; } w += matrixIncr; *(destPtr++) = tmp * (sumA + sumB) * oneOverScaleFactor; } if (P_PAD) { for (int pad = 0; pad < P_PAD; pad++) { *(destPtr++) = 0.0; } } partials2Ptr += kPartialsPaddedStateCount; } } }
subq $0x28, %rsp movl 0x40(%rsp), %eax movl 0x38(%rsp), %eax movq 0x30(%rsp), %rax movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq %r8, (%rsp) movq %r9, -0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, -0x80(%rsp) movl 0x24(%rax), %ecx movl %ecx, -0xc(%rsp) movl -0xc(%rsp), %ecx addl $0x1, %ecx movl %ecx, -0xc(%rsp) movl 0x24(%rax), %eax movl $0x4, %ecx cltd idivl %ecx shll $0x2, %eax movl %eax, -0x10(%rsp) movl $0x0, -0x14(%rsp) movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0xaa589 movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax imull 0x2c(%rcx), %eax imull 0x14(%rcx), %eax movl 0x2c(%rcx), %edx imull 0x38(%rsp), %edx addl %edx, %eax movl %eax, -0x18(%rsp) movl -0x14(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, -0x1c(%rsp) movq (%rsp), %rax movslq -0x18(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x28(%rsp) movq 0x18(%rsp), %rax movslq -0x18(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x30(%rsp) movl 0x38(%rsp), %eax movl %eax, -0x34(%rsp) movl -0x34(%rsp), %eax cmpl 0x40(%rsp), %eax jge 0xaa577 movq -0x80(%rsp), %rcx movl -0x14(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, -0x38(%rsp) movq 0x10(%rsp), %rax movslq -0x34(%rsp), %rcx movl (%rax,%rcx,4), %eax movl %eax, -0x3c(%rsp) movq 0x30(%rsp), %rax movslq -0x34(%rsp), %rcx vmovsd 0xed12(%rip), %xmm0 # 0xb9008 vdivsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x48(%rsp) movl $0x0, -0x4c(%rsp) movq -0x80(%rsp), %rcx movl -0x4c(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0xaa516 movq -0x8(%rsp), %rax movslq -0x1c(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movl -0x4c(%rsp), %ecx imull -0xc(%rsp), %ecx movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x58(%rsp) movq 0x8(%rsp), %rax movl -0x38(%rsp), %ecx addl -0x3c(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x60(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x68(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x70(%rsp) movl $0x0, -0x74(%rsp) movl -0x74(%rsp), %eax cmpl -0x10(%rsp), %eax jge 0xaa47d movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x28(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x28(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x28(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x28(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movl -0x74(%rsp), %eax addl $0x4, %eax movl %eax, -0x74(%rsp) jmp 0xaa37b jmp 0xaa47f movq -0x80(%rsp), %rcx movl -0x74(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0xaa4c9 movq -0x58(%rsp), %rax movslq -0x74(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x28(%rsp), %rax movslq -0x74(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movl -0x74(%rsp), %eax addl $0x1, %eax movl %eax, -0x74(%rsp) jmp 0xaa47f movl -0xc(%rsp), %eax addl -0x38(%rsp), %eax movl %eax, -0x38(%rsp) vmovsd -0x60(%rsp), %xmm0 vmovsd -0x68(%rsp), %xmm1 vaddsd -0x70(%rsp), %xmm1, %xmm1 vmulsd %xmm1, %xmm0, %xmm0 vmulsd -0x48(%rsp), %xmm0, %xmm0 movq -0x30(%rsp), %rax movq %rax, %rcx addq $0x8, %rcx movq %rcx, -0x30(%rsp) vmovsd %xmm0, (%rax) movl -0x4c(%rsp), %eax addl $0x1, %eax movl %eax, -0x4c(%rsp) jmp 0xaa309 movl $0x0, -0x78(%rsp) cmpl $0x1, -0x78(%rsp) jge 0xaa54b movq -0x30(%rsp), %rax movq %rax, %rcx addq $0x8, %rcx movq %rcx, -0x30(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl -0x78(%rsp), %eax addl $0x1, %eax movl %eax, -0x78(%rsp) jmp 0xaa51e movq -0x80(%rsp), %rax movl 0x2c(%rax), %ecx movq -0x28(%rsp), %rax movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x28(%rsp) movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0xaa2b4 jmp 0xaa579 movl -0x14(%rsp), %eax addl $0x1, %eax movl %eax, -0x14(%rsp) jmp 0xaa244 addq $0x28, %rsp retq nop
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 1, 1>::calcPreStatesPartialsFixedScaling(double*, int const*, double const*, double const*, double const*, double const*, int, int)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcPreStatesPartialsFixedScaling(REALTYPE* destP, const int* states1, const REALTYPE* matrices1, const REALTYPE* partials2, const REALTYPE* matrices2, const REALTYPE* scaleFactors, int startPattern, int endPattern) { int matrixIncr = kStateCount; // increment for the extra column at the end matrixIncr += T_PAD; int stateCountModFour = (kStateCount / 4) * 4; // //#pragma omp parallel for num_threads(kCategoryCount) // for (int l = 0; l < kCategoryCount; l++) { // int v = l*kPartialsPaddedStateCount*kPatternCount + kPartialsPaddedStateCount*startPattern; // int matrixOffset = l*kMatrixSize; // const REALTYPE* partials2Ptr = &partials2[v]; // REALTYPE* destPtr = &destP[v]; // for (int k = startPattern; k < endPattern; k++) { // int w = l * kMatrixSize; // int state1 = states1[k]; // REALTYPE oneOverScaleFactor = REALTYPE(1.0) / scaleFactors[k]; // for (int i = 0; i < kStateCount; i++) { // const REALTYPE* matrices2Ptr = matrices2 + matrixOffset + i * matrixIncr; // REALTYPE tmp = matrices1[w + state1]; // REALTYPE sumA = 0.0; // REALTYPE sumB = 0.0; // int j = 0; // for (; j < stateCountModFour; j += 4) { // sumA += matrices2Ptr[j + 0] * partials2Ptr[j + 0]; // sumB += matrices2Ptr[j + 1] * partials2Ptr[j + 1]; // sumA += matrices2Ptr[j + 2] * partials2Ptr[j + 2]; // sumB += matrices2Ptr[j + 3] * partials2Ptr[j + 3]; // } // for (; j < kStateCount; j++) { // sumA += matrices2Ptr[j] * partials2Ptr[j]; // } // // w += matrixIncr; // // *(destPtr++) = tmp * (sumA + sumB) * oneOverScaleFactor; // } // destPtr += P_PAD; // partials2Ptr += kPartialsPaddedStateCount; // } // } }
movl 0x18(%rsp), %eax movl 0x10(%rsp), %eax movq 0x8(%rsp), %rax movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq %r8, -0x28(%rsp) movq %r9, -0x30(%rsp) movq -0x8(%rsp), %rax movl 0x24(%rax), %ecx movl %ecx, -0x34(%rsp) movl -0x34(%rsp), %ecx addl $0x1, %ecx movl %ecx, -0x34(%rsp) movl 0x24(%rax), %eax movl $0x4, %ecx cltd idivl %ecx shll $0x2, %eax movl %eax, -0x38(%rsp) retq nopw %cs:(%rax,%rax) nop
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUSSEImpl<double, 1, 1>::calcPartialsPartialsAutoScaling(double*, double const*, double const*, double const*, double const*, int*)
BEAGLE_CPU_SSE_TEMPLATE void BeagleCPUSSEImpl<BEAGLE_CPU_SSE_DOUBLE>::calcPartialsPartialsAutoScaling(double* destP, const double* partials_q, const double* matrices_q, const double* partials_r, const double* matrices_r, int* activateScaling) { BeagleCPUImpl<BEAGLE_CPU_SSE_DOUBLE>::calcPartialsPartialsAutoScaling(destP, partials_q, matrices_q, partials_r, matrices_r, activateScaling); }
subq $0x38, %rsp movq 0x40(%rsp), %rax movq %rdi, 0x30(%rsp) movq %rsi, 0x28(%rsp) movq %rdx, 0x20(%rsp) movq %rcx, 0x18(%rsp) movq %r8, 0x10(%rsp) movq %r9, 0x8(%rsp) movq 0x30(%rsp), %rdi movq 0x28(%rsp), %rsi movq 0x20(%rsp), %rdx movq 0x18(%rsp), %rcx movq 0x10(%rsp), %r8 movq 0x8(%rsp), %r9 movq 0x40(%rsp), %rax movq %rax, (%rsp) callq 0x5f1c0 addq $0x38, %rsp retq nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUSSEImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 1, 1>::~BeagleCPUImpl()
BEAGLE_CPU_TEMPLATE BeagleCPUImpl<BEAGLE_CPU_GENERIC>::~BeagleCPUImpl() { // free all that stuff... // If you delete partials, make sure not to delete the last element // which is TEMP_SCRATCH_PARTIAL twice. for(unsigned int i=0; i<kEigenDecompCount; i++) { if (gCategoryWeights[i] != NULL) free(gCategoryWeights[i]); if (gStateFrequencies[i] != NULL) free(gStateFrequencies[i]); } for(unsigned int i=0; i<kMatrixCount; i++) { if (gTransitionMatrices[i] != NULL) free(gTransitionMatrices[i]); } free(gTransitionMatrices); for(unsigned int i=0; i<kBufferCount; i++) { if (gPartials[i] != NULL) free(gPartials[i]); if (gTipStates[i] != NULL) free(gTipStates[i]); } free(gPartials); free(gTipStates); if (kFlags & BEAGLE_FLAG_SCALING_AUTO) { for(unsigned int i=0; i<kScaleBufferCount; i++) { if (gAutoScaleBuffers[i] != NULL) free(gAutoScaleBuffers[i]); } if (gAutoScaleBuffers) free(gAutoScaleBuffers); free(gActiveScalingFactors); if (gScaleBuffers[0] != NULL) free(gScaleBuffers[0]); } else { for(unsigned int i=0; i<kScaleBufferCount; i++) { if (gScaleBuffers[i] != NULL) free(gScaleBuffers[i]); } } if (gScaleBuffers) free(gScaleBuffers); free(gCategoryRates); free(gPatternWeights); if (kPartitionsInitialised) { free(gPatternPartitions); free(gPatternPartitionsStartPatterns); if (kPatternsReordered) { free(gPatternsNewOrder); } } free(integrationTmp); free(firstDerivTmp); free(secondDerivTmp); // free(cLikelihoodTmp); free(grandDenominatorDerivTmp); // free(grandNumeratorUpperBoundDerivTmp); // free(grandNumeratorLowerBoundDerivTmp); free(grandNumeratorDerivTmp); if (crossProductNumeratorTmp != nullptr) { free(crossProductNumeratorTmp); } free(outLogLikelihoodsTmp); free(outFirstDerivativesTmp); free(outSecondDerivativesTmp); free(ones); free(zeros); delete gEigenDecomposition; if (kThreadingEnabled) { // Send stop signal to all threads and join them... for (int i = 0; i < kNumThreads; i++) { threadData* td = &gThreads[i]; std::unique_lock<std::mutex> l(td->m); td->stop = true; td->cv.notify_one(); } // Join all the threads for (int i = 0; i < kNumThreads; i++) { threadData* td = &gThreads[i]; td->t.join(); } delete[] gThreads; delete[] gFutures; for (int i=0; i<kNumThreads; i++) { free(gThreadOperations[i]); } free(gThreadOperations); free(gThreadOpCounts); } if (kAutoPartitioningEnabled) { free(gAutoPartitionOperations); if (kAutoRootPartitioningEnabled) { free(gAutoPartitionIndices); free(gAutoPartitionOutSumLogLikelihoods); } } }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq 0x10(%rsp), %rdi movq %rdi, 0x8(%rsp) callq 0x5ffe0 movq 0x8(%rsp), %rdi callq 0x61b60 addq $0x18, %rsp retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUImpl<double, 1, 1>::calcStatesPartials(double*, int const*, double const*, double const*, double const*, int, int)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::calcStatesPartials(REALTYPE* destP, const int* states1, const REALTYPE* matrices1, const REALTYPE* partials2, const REALTYPE* matrices2, int startPattern, int endPattern) { int matrixIncr = kStateCount; // increment for the extra column at the end matrixIncr += T_PAD; int stateCountModFour = (kStateCount / 4) * 4; #pragma omp parallel for num_threads(kCategoryCount) for (int l = 0; l < kCategoryCount; l++) { int v = l*kPartialsPaddedStateCount*kPatternCount + kPartialsPaddedStateCount*startPattern; int matrixOffset = l*kMatrixSize; const REALTYPE* partials2Ptr = &partials2[v]; REALTYPE* destPtr = &destP[v]; for (int k = startPattern; k < endPattern; k++) { int w = l * kMatrixSize; int state1 = states1[k]; for (int i = 0; i < kStateCount; i++) { const REALTYPE* matrices2Ptr = matrices2 + matrixOffset + i * matrixIncr; REALTYPE tmp = matrices1[w + state1]; REALTYPE sumA = 0.0; REALTYPE sumB = 0.0; int j = 0; for (; j < stateCountModFour; j += 4) { sumA += matrices2Ptr[j + 0] * partials2Ptr[j + 0]; sumB += matrices2Ptr[j + 1] * partials2Ptr[j + 1]; sumA += matrices2Ptr[j + 2] * partials2Ptr[j + 2]; sumB += matrices2Ptr[j + 3] * partials2Ptr[j + 3]; } for (; j < kStateCount; j++) { sumA += matrices2Ptr[j] * partials2Ptr[j]; } w += matrixIncr; *(destPtr++) = tmp * (sumA + sumB); } if (P_PAD) { for (int pad = 0; pad < P_PAD; pad++) { *(destPtr++) = 0.0; } } partials2Ptr += kPartialsPaddedStateCount; } } }
subq $0x18, %rsp movl 0x28(%rsp), %eax movl 0x20(%rsp), %eax movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq %rdx, (%rsp) movq %rcx, -0x8(%rsp) movq %r8, -0x10(%rsp) movq %r9, -0x18(%rsp) movq 0x10(%rsp), %rax movq %rax, -0x80(%rsp) movl 0x24(%rax), %ecx movl %ecx, -0x1c(%rsp) movl -0x1c(%rsp), %ecx addl $0x1, %ecx movl %ecx, -0x1c(%rsp) movl 0x24(%rax), %eax movl $0x4, %ecx cltd idivl %ecx shll $0x2, %eax movl %eax, -0x20(%rsp) movl $0x0, -0x24(%rsp) movq -0x80(%rsp), %rcx movl -0x24(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0xabe61 movq -0x80(%rsp), %rcx movl -0x24(%rsp), %eax imull 0x2c(%rcx), %eax imull 0x14(%rcx), %eax movl 0x2c(%rcx), %edx imull 0x20(%rsp), %edx addl %edx, %eax movl %eax, -0x28(%rsp) movl -0x24(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, -0x2c(%rsp) movq -0x10(%rsp), %rax movslq -0x28(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x38(%rsp) movq 0x8(%rsp), %rax movslq -0x28(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x40(%rsp) movl 0x20(%rsp), %eax movl %eax, -0x44(%rsp) movl -0x44(%rsp), %eax cmpl 0x28(%rsp), %eax jge 0xabe4f movq -0x80(%rsp), %rcx movl -0x24(%rsp), %eax imull 0x40(%rcx), %eax movl %eax, -0x48(%rsp) movq (%rsp), %rax movslq -0x44(%rsp), %rcx movl (%rax,%rcx,4), %eax movl %eax, -0x4c(%rsp) movl $0x0, -0x50(%rsp) movq -0x80(%rsp), %rcx movl -0x50(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0xabdee movq -0x18(%rsp), %rax movslq -0x2c(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movl -0x50(%rsp), %ecx imull -0x1c(%rsp), %ecx movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x58(%rsp) movq -0x8(%rsp), %rax movl -0x48(%rsp), %ecx addl -0x4c(%rsp), %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, -0x60(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x68(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x70(%rsp) movl $0x0, -0x74(%rsp) movl -0x74(%rsp), %eax cmpl -0x20(%rsp), %eax jge 0xabd5b movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x0, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x1, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x2, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movq -0x58(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movl -0x74(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x70(%rsp) movl -0x74(%rsp), %eax addl $0x4, %eax movl %eax, -0x74(%rsp) jmp 0xabc59 jmp 0xabd5d movq -0x80(%rsp), %rcx movl -0x74(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0xabda7 movq -0x58(%rsp), %rax movslq -0x74(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq -0x38(%rsp), %rax movslq -0x74(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x68(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x68(%rsp) movl -0x74(%rsp), %eax addl $0x1, %eax movl %eax, -0x74(%rsp) jmp 0xabd5d movl -0x1c(%rsp), %eax addl -0x48(%rsp), %eax movl %eax, -0x48(%rsp) vmovsd -0x60(%rsp), %xmm0 vmovsd -0x68(%rsp), %xmm1 vaddsd -0x70(%rsp), %xmm1, %xmm1 vmulsd %xmm1, %xmm0, %xmm0 movq -0x40(%rsp), %rax movq %rax, %rcx addq $0x8, %rcx movq %rcx, -0x40(%rsp) vmovsd %xmm0, (%rax) movl -0x50(%rsp), %eax addl $0x1, %eax movl %eax, -0x50(%rsp) jmp 0xabbe7 movl $0x0, -0x78(%rsp) cmpl $0x1, -0x78(%rsp) jge 0xabe23 movq -0x40(%rsp), %rax movq %rax, %rcx addq $0x8, %rcx movq %rcx, -0x40(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax) movl -0x78(%rsp), %eax addl $0x1, %eax movl %eax, -0x78(%rsp) jmp 0xabdf6 movq -0x80(%rsp), %rax movl 0x2c(%rax), %ecx movq -0x38(%rsp), %rax movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x38(%rsp) movl -0x44(%rsp), %eax addl $0x1, %eax movl %eax, -0x44(%rsp) jmp 0xabbb0 jmp 0xabe51 movl -0x24(%rsp), %eax addl $0x1, %eax movl %eax, -0x24(%rsp) jmp 0xabb3f addq $0x18, %rsp retq nopw %cs:(%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::EigenDecompositionSquare<double, 1>::~EigenDecompositionSquare()
BEAGLE_CPU_EIGEN_TEMPLATE EigenDecompositionSquare<BEAGLE_CPU_EIGEN_GENERIC>::~EigenDecompositionSquare() { for(int i=0; i<kEigenDecompCount; i++) { free(gEMatrices[i]); free(gIMatrices[i]); free(gEigenValues[i]); } free(gEMatrices); free(gIMatrices); free(gEigenValues); free(matrixTmp); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq 0x10(%rsp), %rdi movq %rdi, 0x8(%rsp) callq 0x61070 movq 0x8(%rsp), %rdi callq 0x61b60 addq $0x18, %rsp retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/EigenDecompositionSquare.hpp
beagle::cpu::EigenDecompositionCube<double, 1>::~EigenDecompositionCube()
BEAGLE_CPU_EIGEN_TEMPLATE EigenDecompositionCube<BEAGLE_CPU_EIGEN_GENERIC>::~EigenDecompositionCube() { for(int i=0; i<kEigenDecompCount; i++) { free(gCMatrices[i]); free(gEigenValues[i]); } free(gCMatrices); free(gEigenValues); free(matrixTmp); free(firstDerivTmp); free(secondDerivTmp); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq 0x10(%rsp), %rax movq %rax, (%rsp) movq 0x1f987(%rip), %rcx # 0xcdf40 addq $0x10, %rcx movq %rcx, (%rax) movl $0x0, 0xc(%rsp) movq (%rsp), %rcx movl 0xc(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0xae60e movq (%rsp), %rax movq 0x40(%rax), %rax movslq 0xc(%rsp), %rcx movq (%rax,%rcx,8), %rdi callq 0x62700 movq (%rsp), %rax movq 0x8(%rax), %rax movslq 0xc(%rsp), %rcx movq (%rax,%rcx,8), %rdi callq 0x62700 movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0xae5c8 movq (%rsp), %rax movq 0x40(%rax), %rdi callq 0x62700 movq (%rsp), %rax movq 0x8(%rax), %rdi callq 0x62700 movq (%rsp), %rax movq 0x28(%rax), %rdi callq 0x62700 movq (%rsp), %rax movq 0x30(%rax), %rdi callq 0x62700 movq (%rsp), %rax movq 0x38(%rax), %rdi callq 0x62700 movq (%rsp), %rdi callq 0x62e60 addq $0x18, %rsp retq nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/EigenDecompositionCube.hpp
beagle::cpu::EigenDecompositionCube<double, 1>::updateTransitionMatrices(int, int const*, int const*, int const*, double const*, double const*, double**, int)
BEAGLE_CPU_EIGEN_TEMPLATE void EigenDecompositionCube<BEAGLE_CPU_EIGEN_GENERIC>::updateTransitionMatrices(int eigenIndex, const int* probabilityIndices, const int* firstDerivativeIndices, const int* secondDerivativeIndices, const double* edgeLengths, const double* categoryRates, REALTYPE** transitionMatrices, int count) { #ifdef UNROLL int stateCountModFour = (kStateCount / 4) * 4; #endif // if (probabilityIndices == NULL) { // used to store derivative matrices for derivatives through pre-order traversals // // if (secondDerivativeIndices == NULL) { // for (int u = 0; u < count; u++) { // REALTYPE* firstDerivMat = transitionMatrices[firstDerivativeIndices[u]]; // int n = 0; // for (int l = 0; l < kCategoryCount; l++) { // // for (int i = 0; i < kStateCount; i++) { // firstDerivTmp[i] = gEigenValues[eigenIndex][i] * ((REALTYPE)categoryRates[l]); // } // // int m = 0; // for (int i = 0; i < kStateCount; i++) { // for (int j = 0; j < kStateCount; j++) { // REALTYPE sumD1 = 0.0; // for (int k = 0; k < kStateCount; k++) { // sumD1 += gCMatrices[eigenIndex][m] * firstDerivTmp[k]; // m++; // } // firstDerivMat[n] = sumD1; // n++; // } // if (T_PAD != 0) { // firstDerivMat[n] = 0.0; // n += T_PAD; // } // } // } // } // } else { // for (int u = 0; u < count; u++) { // REALTYPE* firstDerivMat = transitionMatrices[firstDerivativeIndices[u]]; // REALTYPE* secondDerivMat = transitionMatrices[secondDerivativeIndices[u]]; // int n = 0; // for (int l = 0; l < kCategoryCount; l++) { // // for (int i = 0; i < kStateCount; i++) { // REALTYPE scaledEigenValue = gEigenValues[eigenIndex][i] * ((REALTYPE)categoryRates[l]); // firstDerivTmp[i] = scaledEigenValue; // secondDerivTmp[i] = scaledEigenValue * ((REALTYPE)categoryRates[l]); // } // // int m = 0; // for (int i = 0; i < kStateCount; i++) { // for (int j = 0; j < kStateCount; j++) { // REALTYPE sumD1 = 0.0; // REALTYPE sumD2 = 0.0; // for (int k = 0; k < kStateCount; k++) { // sumD1 += gCMatrices[eigenIndex][m] * firstDerivTmp[k]; // sumD2 += gCMatrices[eigenIndex][m] * secondDerivTmp[k]; // m++; // } // firstDerivMat[n] = sumD1; // secondDerivMat[n] = sumD2; // n++; // } // if (T_PAD != 0) { // firstDerivMat[n] = 0.0; // secondDerivMat[n] = 0.0; // n += T_PAD; // } // } // } // } // } // // } else { if (firstDerivativeIndices == NULL &&secondDerivativeIndices == NULL) { for (int u = 0; u < count; u++) { REALTYPE* transitionMat = transitionMatrices[probabilityIndices[u]]; int n = 0; for (int l = 0; l < kCategoryCount; l++) { for (int i = 0; i < kStateCount; i++) { matrixTmp[i] = exp(gEigenValues[eigenIndex][i] * ((REALTYPE)edgeLengths[u] * categoryRates[l])); } REALTYPE* tmpCMatrices = gCMatrices[eigenIndex]; for (int i = 0; i < kStateCount; i++) { for (int j = 0; j < kStateCount; j++) { REALTYPE sum = 0.0; #ifdef UNROLL int k = 0; for (; k < stateCountModFour; k += 4) { sum += tmpCMatrices[k + 0] * matrixTmp[k + 0]; sum += tmpCMatrices[k + 1] * matrixTmp[k + 1]; sum += tmpCMatrices[k + 2] * matrixTmp[k + 2]; sum += tmpCMatrices[k + 3] * matrixTmp[k + 3]; } for (; k < kStateCount; k++) { sum += tmpCMatrices[k] * matrixTmp[k]; } tmpCMatrices += kStateCount; #else for (int k = 0; k < kStateCount; k++) { sum += *tmpCMatrices++ * matrixTmp[k]; } #endif if (sum > 0) transitionMat[n] = sum; else transitionMat[n] = 0; n++; } if (T_PAD != 0) { transitionMat[n] = 1.0; n += T_PAD; } } } if (DEBUGGING_OUTPUT) { int kMatrixSize = kStateCount * kStateCount; fprintf(stderr,"transitionMat index=%d brlen=%.5f\n", probabilityIndices[u], edgeLengths[u]); for ( int w = 0; w < (20 > kMatrixSize ? 20 : kMatrixSize); ++w) fprintf(stderr,"transitionMat[%d] = %.5f\n", w, transitionMat[w]); } } } else if (secondDerivativeIndices == NULL) { for (int u = 0; u < count; u++) { REALTYPE* transitionMat = transitionMatrices[probabilityIndices[u]]; REALTYPE* firstDerivMat = transitionMatrices[firstDerivativeIndices[u]]; int n = 0; for (int l = 0; l < kCategoryCount; l++) { for (int i = 0; i < kStateCount; i++) { REALTYPE scaledEigenValue = gEigenValues[eigenIndex][i] * ((REALTYPE)categoryRates[l]); matrixTmp[i] = exp(scaledEigenValue * ((REALTYPE)edgeLengths[u])); firstDerivTmp[i] = scaledEigenValue * matrixTmp[i]; } int m = 0; for (int i = 0; i < kStateCount; i++) { for (int j = 0; j < kStateCount; j++) { REALTYPE sum = 0.0; REALTYPE sumD1 = 0.0; for (int k = 0; k < kStateCount; k++) { sum += gCMatrices[eigenIndex][m] * matrixTmp[k]; ///gCMatrices[eigenIndex] = eigenVectors \otimes InveigenVectors sumD1 += gCMatrices[eigenIndex][m] * firstDerivTmp[k]; m++; } if (sum > 0) transitionMat[n] = sum; else transitionMat[n] = 0; firstDerivMat[n] = sumD1; n++; } if (T_PAD != 0) { transitionMat[n] = 1.0; firstDerivMat[n] = 0.0; n += T_PAD; } } } } } else { for (int u = 0; u < count; u++) { REALTYPE* transitionMat = transitionMatrices[probabilityIndices[u]]; REALTYPE* firstDerivMat = transitionMatrices[firstDerivativeIndices[u]]; REALTYPE* secondDerivMat = transitionMatrices[secondDerivativeIndices[u]]; int n = 0; for (int l = 0; l < kCategoryCount; l++) { for (int i = 0; i < kStateCount; i++) { REALTYPE scaledEigenValue = gEigenValues[eigenIndex][i] * ((REALTYPE)categoryRates[l]); matrixTmp[i] = exp(scaledEigenValue * ((REALTYPE)edgeLengths[u])); firstDerivTmp[i] = scaledEigenValue * matrixTmp[i]; secondDerivTmp[i] = scaledEigenValue * firstDerivTmp[i]; } int m = 0; for (int i = 0; i < kStateCount; i++) { for (int j = 0; j < kStateCount; j++) { REALTYPE sum = 0.0; REALTYPE sumD1 = 0.0; REALTYPE sumD2 = 0.0; for (int k = 0; k < kStateCount; k++) { sum += gCMatrices[eigenIndex][m] * matrixTmp[k]; sumD1 += gCMatrices[eigenIndex][m] * firstDerivTmp[k]; sumD2 += gCMatrices[eigenIndex][m] * secondDerivTmp[k]; m++; } if (sum > 0) transitionMat[n] = sum; else transitionMat[n] = 0; firstDerivMat[n] = sumD1; secondDerivMat[n] = sumD2; n++; } if (T_PAD != 0) { transitionMat[n] = 1.0; firstDerivMat[n] = 0.0; secondDerivMat[n] = 0.0; n += T_PAD; } } } } } // } }
subq $0x128, %rsp # imm = 0x128 movl 0x140(%rsp), %eax movq 0x138(%rsp), %rax movq 0x130(%rsp), %rax movq %rdi, 0x120(%rsp) movl %esi, 0x11c(%rsp) movq %rdx, 0x110(%rsp) movq %rcx, 0x108(%rsp) movq %r8, 0x100(%rsp) movq %r9, 0xf8(%rsp) movq 0x120(%rsp), %rax movq %rax, (%rsp) movl 0x10(%rax), %eax movl $0x4, %ecx cltd idivl %ecx shll $0x2, %eax movl %eax, 0xf4(%rsp) cmpq $0x0, 0x108(%rsp) jne 0xaeda6 cmpq $0x0, 0x100(%rsp) jne 0xaeda6 movl $0x0, 0xf0(%rsp) movl 0xf0(%rsp), %eax cmpl 0x140(%rsp), %eax jge 0xaeda1 movq 0x138(%rsp), %rax movq 0x110(%rsp), %rcx movslq 0xf0(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0xe8(%rsp) movl $0x0, 0xe4(%rsp) movl $0x0, 0xe0(%rsp) movq (%rsp), %rcx movl 0xe0(%rsp), %eax cmpl 0x18(%rcx), %eax jge 0xaed89 movl $0x0, 0xdc(%rsp) movq (%rsp), %rcx movl 0xdc(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaea77 movq (%rsp), %rax movq 0x8(%rax), %rax movslq 0x11c(%rsp), %rcx movq (%rax,%rcx,8), %rax movslq 0xdc(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0xf8(%rsp), %rax movslq 0xf0(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm1 movq 0x130(%rsp), %rax movslq 0xe0(%rsp), %rcx vmulsd (%rax,%rcx,8), %xmm1, %xmm1 vmulsd %xmm1, %xmm0, %xmm0 callq 0x645e0 movq (%rsp), %rax movq 0x28(%rax), %rax movslq 0xdc(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0xdc(%rsp), %eax addl $0x1, %eax movl %eax, 0xdc(%rsp) jmp 0xae9e8 movq (%rsp), %rax movq 0x40(%rax), %rax movslq 0x11c(%rsp), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0xd0(%rsp) movl $0x0, 0xcc(%rsp) movq (%rsp), %rcx movl 0xcc(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaed71 movl $0x0, 0xc8(%rsp) movq (%rsp), %rcx movl 0xc8(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaed2d vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0xc0(%rsp) movl $0x0, 0xbc(%rsp) movl 0xbc(%rsp), %eax cmpl 0xf4(%rsp), %eax jge 0xaec33 movq (%rsp), %rax movq 0xd0(%rsp), %rcx movl 0xbc(%rsp), %edx addl $0x0, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x28(%rax), %rcx movl 0xbc(%rsp), %edx addl $0x0, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd 0xc0(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xc0(%rsp) movq 0xd0(%rsp), %rcx movl 0xbc(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x28(%rax), %rcx movl 0xbc(%rsp), %edx addl $0x1, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd 0xc0(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xc0(%rsp) movq 0xd0(%rsp), %rcx movl 0xbc(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x28(%rax), %rcx movl 0xbc(%rsp), %edx addl $0x2, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd 0xc0(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xc0(%rsp) movq 0xd0(%rsp), %rcx movl 0xbc(%rsp), %edx addl $0x3, %edx movslq %edx, %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x28(%rax), %rax movl 0xbc(%rsp), %ecx addl $0x3, %ecx movslq %ecx, %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0xc0(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xc0(%rsp) movl 0xbc(%rsp), %eax addl $0x4, %eax movl %eax, 0xbc(%rsp) jmp 0xaeae9 jmp 0xaec35 movq (%rsp), %rcx movl 0xbc(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaec99 movq (%rsp), %rax movq 0xd0(%rsp), %rcx movslq 0xbc(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x28(%rax), %rax movslq 0xbc(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0xc0(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0xc0(%rsp) movl 0xbc(%rsp), %eax addl $0x1, %eax movl %eax, 0xbc(%rsp) jmp 0xaec35 movq (%rsp), %rax movl 0x10(%rax), %ecx movq 0xd0(%rsp), %rax movslq %ecx, %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0xd0(%rsp) vmovsd 0xc0(%rsp), %xmm0 vpxor %xmm1, %xmm1, %xmm1 vucomisd %xmm1, %xmm0 jbe 0xaeced vmovsd 0xc0(%rsp), %xmm0 movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) jmp 0xaed06 movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0xe4(%rsp), %eax addl $0x1, %eax movl %eax, 0xe4(%rsp) movl 0xc8(%rsp), %eax addl $0x1, %eax movl %eax, 0xc8(%rsp) jmp 0xaeabd movq 0xe8(%rsp), %rax movslq 0xe4(%rsp), %rcx vmovsd 0xa2c3(%rip), %xmm0 # 0xb9008 vmovsd %xmm0, (%rax,%rcx,8) movl 0xe4(%rsp), %eax addl $0x1, %eax movl %eax, 0xe4(%rsp) movl 0xcc(%rsp), %eax addl $0x1, %eax movl %eax, 0xcc(%rsp) jmp 0xaea9e jmp 0xaed73 movl 0xe0(%rsp), %eax addl $0x1, %eax movl %eax, 0xe0(%rsp) jmp 0xae9c9 jmp 0xaed8b movl 0xf0(%rsp), %eax addl $0x1, %eax movl %eax, 0xf0(%rsp) jmp 0xae977 jmp 0xaf4fe cmpq $0x0, 0x100(%rsp) jne 0xaf152 movl $0x0, 0xb8(%rsp) movl 0xb8(%rsp), %eax cmpl 0x140(%rsp), %eax jge 0xaf14d movq 0x138(%rsp), %rax movq 0x110(%rsp), %rcx movslq 0xb8(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0xb0(%rsp) movq 0x138(%rsp), %rax movq 0x108(%rsp), %rcx movslq 0xb8(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0xa8(%rsp) movl $0x0, 0xa4(%rsp) movl $0x0, 0xa0(%rsp) movq (%rsp), %rcx movl 0xa0(%rsp), %eax cmpl 0x18(%rcx), %eax jge 0xaf135 movl $0x0, 0x9c(%rsp) movq (%rsp), %rcx movl 0x9c(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaef25 movq (%rsp), %rax movq 0x8(%rax), %rax movslq 0x11c(%rsp), %rcx movq (%rax,%rcx,8), %rax movslq 0x9c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x130(%rsp), %rax movslq 0xa0(%rsp), %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, 0x90(%rsp) vmovsd 0x90(%rsp), %xmm0 movq 0xf8(%rsp), %rax movslq 0xb8(%rsp), %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 callq 0x645e0 movq (%rsp), %rax movq 0x28(%rax), %rcx movslq 0x9c(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x90(%rsp), %xmm0 movq 0x28(%rax), %rcx movslq 0x9c(%rsp), %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm0 movq 0x30(%rax), %rax movslq 0x9c(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x9c(%rsp), %eax addl $0x1, %eax movl %eax, 0x9c(%rsp) jmp 0xaee59 movl $0x0, 0x8c(%rsp) movl $0x0, 0x88(%rsp) movq (%rsp), %rcx movl 0x88(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaf11d movl $0x0, 0x84(%rsp) movq (%rsp), %rcx movl 0x84(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaf0c0 vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x78(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x70(%rsp) movl $0x0, 0x6c(%rsp) movq (%rsp), %rcx movl 0x6c(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaf038 movq (%rsp), %rax movq 0x40(%rax), %rcx movslq 0x11c(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movslq 0x8c(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x28(%rax), %rcx movslq 0x6c(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd 0x78(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x78(%rsp) movq 0x40(%rax), %rcx movslq 0x11c(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movslq 0x8c(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x30(%rax), %rax movslq 0x6c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x70(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x70(%rsp) movl 0x8c(%rsp), %eax addl $0x1, %eax movl %eax, 0x8c(%rsp) movl 0x6c(%rsp), %eax addl $0x1, %eax movl %eax, 0x6c(%rsp) jmp 0xaef8a vmovsd 0x78(%rsp), %xmm0 vpxor %xmm1, %xmm1, %xmm1 vucomisd %xmm1, %xmm0 jbe 0xaf065 vmovsd 0x78(%rsp), %xmm0 movq 0xb0(%rsp), %rax movslq 0xa4(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) jmp 0xaf07e movq 0xb0(%rsp), %rax movslq 0xa4(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) vmovsd 0x70(%rsp), %xmm0 movq 0xa8(%rsp), %rax movslq 0xa4(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0xa4(%rsp), %eax addl $0x1, %eax movl %eax, 0xa4(%rsp) movl 0x84(%rsp), %eax addl $0x1, %eax movl %eax, 0x84(%rsp) jmp 0xaef5a movq 0xb0(%rsp), %rax movslq 0xa4(%rsp), %rcx vmovsd 0x9f30(%rip), %xmm0 # 0xb9008 vmovsd %xmm0, (%rax,%rcx,8) movq 0xa8(%rsp), %rax movslq 0xa4(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0xa4(%rsp), %eax addl $0x1, %eax movl %eax, 0xa4(%rsp) movl 0x88(%rsp), %eax addl $0x1, %eax movl %eax, 0x88(%rsp) jmp 0xaef3b jmp 0xaf11f movl 0xa0(%rsp), %eax addl $0x1, %eax movl %eax, 0xa0(%rsp) jmp 0xaee3a jmp 0xaf137 movl 0xb8(%rsp), %eax addl $0x1, %eax movl %eax, 0xb8(%rsp) jmp 0xaedc0 jmp 0xaf4fc movl $0x0, 0x68(%rsp) movl 0x68(%rsp), %eax cmpl 0x140(%rsp), %eax jge 0xaf4fa movq 0x138(%rsp), %rax movq 0x110(%rsp), %rcx movslq 0x68(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x60(%rsp) movq 0x138(%rsp), %rax movq 0x108(%rsp), %rcx movslq 0x68(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x58(%rsp) movq 0x138(%rsp), %rax movq 0x100(%rsp), %rcx movslq 0x68(%rsp), %rdx movslq (%rcx,%rdx,4), %rcx movq (%rax,%rcx,8), %rax movq %rax, 0x50(%rsp) movl $0x0, 0x4c(%rsp) movl $0x0, 0x48(%rsp) movq (%rsp), %rcx movl 0x48(%rsp), %eax cmpl 0x18(%rcx), %eax jge 0xaf4e8 movl $0x0, 0x44(%rsp) movq (%rsp), %rcx movl 0x44(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaf2c4 movq (%rsp), %rax movq 0x8(%rax), %rax movslq 0x11c(%rsp), %rcx movq (%rax,%rcx,8), %rax movslq 0x44(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 movq 0x130(%rsp), %rax movslq 0x48(%rsp), %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, 0x38(%rsp) vmovsd 0x38(%rsp), %xmm0 movq 0xf8(%rsp), %rax movslq 0x68(%rsp), %rcx vmulsd (%rax,%rcx,8), %xmm0, %xmm0 callq 0x645e0 movq (%rsp), %rax movq 0x28(%rax), %rcx movslq 0x44(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x38(%rsp), %xmm0 movq 0x28(%rax), %rcx movslq 0x44(%rsp), %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm0 movq 0x30(%rax), %rcx movslq 0x44(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) vmovsd 0x38(%rsp), %xmm0 movq 0x30(%rax), %rcx movslq 0x44(%rsp), %rdx vmulsd (%rcx,%rdx,8), %xmm0, %xmm0 movq 0x38(%rax), %rax movslq 0x44(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x44(%rsp), %eax addl $0x1, %eax movl %eax, 0x44(%rsp) jmp 0xaf1fa movl $0x0, 0x34(%rsp) movl $0x0, 0x30(%rsp) movq (%rsp), %rcx movl 0x30(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaf4d6 movl $0x0, 0x2c(%rsp) movq (%rsp), %rcx movl 0x2c(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaf47e vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x20(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x18(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, 0x10(%rsp) movl $0x0, 0xc(%rsp) movq (%rsp), %rcx movl 0xc(%rsp), %eax cmpl 0x10(%rcx), %eax jge 0xaf3ff movq (%rsp), %rax movq 0x40(%rax), %rcx movslq 0x11c(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movslq 0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x28(%rax), %rcx movslq 0xc(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd 0x20(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x20(%rsp) movq 0x40(%rax), %rcx movslq 0x11c(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movslq 0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x30(%rax), %rcx movslq 0xc(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd 0x18(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x18(%rsp) movq 0x40(%rax), %rcx movslq 0x11c(%rsp), %rdx movq (%rcx,%rdx,8), %rcx movslq 0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm1 movq 0x38(%rax), %rax movslq 0xc(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd 0x10(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, 0x10(%rsp) movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0xaf324 vmovsd 0x20(%rsp), %xmm0 vpxor %xmm1, %xmm1, %xmm1 vucomisd %xmm1, %xmm0 jbe 0xaf426 vmovsd 0x20(%rsp), %xmm0 movq 0x60(%rsp), %rax movslq 0x4c(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) jmp 0xaf439 movq 0x60(%rsp), %rax movslq 0x4c(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) vmovsd 0x18(%rsp), %xmm0 movq 0x58(%rsp), %rax movslq 0x4c(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) vmovsd 0x10(%rsp), %xmm0 movq 0x50(%rsp), %rax movslq 0x4c(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl 0x4c(%rsp), %eax addl $0x1, %eax movl %eax, 0x4c(%rsp) movl 0x2c(%rsp), %eax addl $0x1, %eax movl %eax, 0x2c(%rsp) jmp 0xaf2ed movq 0x60(%rsp), %rax movslq 0x4c(%rsp), %rcx vmovsd 0x9b78(%rip), %xmm0 # 0xb9008 vmovsd %xmm0, (%rax,%rcx,8) movq 0x58(%rsp), %rax movslq 0x4c(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movq 0x50(%rsp), %rax movslq 0x4c(%rsp), %rcx vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, (%rax,%rcx,8) movl 0x4c(%rsp), %eax addl $0x1, %eax movl %eax, 0x4c(%rsp) movl 0x30(%rsp), %eax addl $0x1, %eax movl %eax, 0x30(%rsp) jmp 0xaf2d4 jmp 0xaf4d8 movl 0x48(%rsp), %eax addl $0x1, %eax movl %eax, 0x48(%rsp) jmp 0xaf1e1 jmp 0xaf4ea movl 0x68(%rsp), %eax addl $0x1, %eax movl %eax, 0x68(%rsp) jmp 0xaf15a jmp 0xaf4fc jmp 0xaf4fe addq $0x128, %rsp # imm = 0x128 retq nopw %cs:(%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/EigenDecompositionCube.hpp
beagle::cpu::BeagleCPUImpl<double, 1, 1>::threadWaiting(beagle::cpu::BeagleCPUImpl<double, 1, 1>::threadData*)
BEAGLE_CPU_TEMPLATE void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::threadWaiting(threadData* tData) { std::unique_lock<std::mutex> l(tData->m, std::defer_lock); while (true) { l.lock(); // Wait until the queue won't be empty or stop is signaled tData->cv.wait(l, [tData] () { return (tData->stop || !tData->jobs.empty()); }); // Stop was signaled, let's exit the thread if (tData->stop) { return; } // Pop one task from the queue... std::packaged_task<void()> j = std::move(tData->jobs.front()); tData->jobs.pop(); l.unlock(); // Execute the task! j(); } }
subq $0x58, %rsp movq %rdi, 0x50(%rsp) movq %rsi, 0x48(%rsp) movq 0x48(%rsp), %rsi addq $0x88, %rsi leaq 0x38(%rsp), %rdi callq 0x64bd0 jmp 0xb01f6 leaq 0x38(%rsp), %rdi callq 0x62550 jmp 0xb0202 movq 0x48(%rsp), %rax movq %rax, %rdi addq $0x58, %rdi movq %rax, 0x18(%rsp) movq 0x18(%rsp), %rdx leaq 0x38(%rsp), %rsi callq 0x62820 jmp 0xb0224 movq 0x48(%rsp), %rax testb $0x1, 0xb0(%rax) je 0xb0251 leaq 0x38(%rsp), %rdi callq 0x5fd10 addq $0x58, %rsp retq movq %rax, %rcx movl %edx, %eax movq %rcx, 0x28(%rsp) movl %eax, 0x24(%rsp) jmp 0xb02b9 movq 0x48(%rsp), %rdi addq $0x8, %rdi callq 0x5fc80 movq %rax, %rsi leaq 0x8(%rsp), %rdi callq 0x5f7b0 movq 0x48(%rsp), %rdi addq $0x8, %rdi callq 0x61e70 leaq 0x38(%rsp), %rdi callq 0x5f090 jmp 0xb0286 leaq 0x8(%rsp), %rdi callq 0x63b50 jmp 0xb0292 leaq 0x8(%rsp), %rdi callq 0x60300 jmp 0xb01f4 movq %rax, %rcx movl %edx, %eax movq %rcx, 0x28(%rsp) movl %eax, 0x24(%rsp) leaq 0x8(%rsp), %rdi callq 0x60300 leaq 0x38(%rsp), %rdi callq 0x5fd10 movq 0x28(%rsp), %rdi callq 0x644b0 nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesDispatch1<false>(double*, double*, double*)
BEAGLE_CPU_TEMPLATE template <bool DoDerivatives> void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesDispatch1( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outSumDerivatives == NULL) { accumulateDerivativesDispatch2<DoDerivatives, false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesDispatch2<DoDerivatives, true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x10(%rsp) jne 0xb0923 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x63670 jmp 0xb093b movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x63f00 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesDispatch1<true>(double*, double*, double*)
BEAGLE_CPU_TEMPLATE template <bool DoDerivatives> void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesDispatch1( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outSumDerivatives == NULL) { accumulateDerivativesDispatch2<DoDerivatives, false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesDispatch2<DoDerivatives, true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x10(%rsp) jne 0xb0983 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x627a0 jmp 0xb099b movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x64360 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesDispatch2<false, false>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesDispatch2( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outSumSquaredDerivatives == NULL) { accumulateDerivativesImpl<DoDerivatives, DoSum, false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesImpl<DoDerivatives, DoSum, true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x8(%rsp) jne 0xb09e3 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x60a90 jmp 0xb09fb movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x64560 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesDispatch2<false, true>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesDispatch2( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outSumSquaredDerivatives == NULL) { accumulateDerivativesImpl<DoDerivatives, DoSum, false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesImpl<DoDerivatives, DoSum, true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x8(%rsp) jne 0xb0a43 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x632d0 jmp 0xb0a5b movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x622f0 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesImpl<false, false, false>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { REALTYPE sum = 0.0; REALTYPE sumSquared = 0.0; for (int k = 0; k < kPatternCount; k++) { REALTYPE derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { // TODO Confirm that these are compile-time sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x48(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x28(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x30(%rsp) movl $0x0, -0x34(%rsp) movq -0x48(%rsp), %rcx movl -0x34(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0xb0ae2 movq -0x48(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rax movslq -0x34(%rsp), %rcx vdivsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x40(%rsp) movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0xb0a9a retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesImpl<false, false, true>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { REALTYPE sum = 0.0; REALTYPE sumSquared = 0.0; for (int k = 0; k < kPatternCount; k++) { REALTYPE derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { // TODO Confirm that these are compile-time sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x48(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x28(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x30(%rsp) movl $0x0, -0x34(%rsp) movq -0x48(%rsp), %rcx movl -0x34(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0xb0ba0 movq -0x48(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rcx movslq -0x34(%rsp), %rdx vdivsd (%rcx,%rdx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x40(%rsp) vmovsd -0x40(%rsp), %xmm0 vmulsd -0x40(%rsp), %xmm0, %xmm1 movq 0x80(%rax), %rax movslq -0x34(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x30(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x30(%rsp) movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0xb0b2a vmovsd -0x30(%rsp), %xmm0 movq -0x20(%rsp), %rax vmovsd %xmm0, (%rax) retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesImpl<false, true, false>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { REALTYPE sum = 0.0; REALTYPE sumSquared = 0.0; for (int k = 0; k < kPatternCount; k++) { REALTYPE derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { // TODO Confirm that these are compile-time sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x48(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x28(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x30(%rsp) movl $0x0, -0x34(%rsp) movq -0x48(%rsp), %rcx movl -0x34(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0xb0c5a movq -0x48(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rcx movslq -0x34(%rsp), %rdx vdivsd (%rcx,%rdx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x40(%rsp) vmovsd -0x40(%rsp), %xmm1 movq 0x80(%rax), %rax movslq -0x34(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x28(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x28(%rsp) movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0xb0bea vmovsd -0x28(%rsp), %xmm0 movq -0x18(%rsp), %rax vmovsd %xmm0, (%rax) retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesDispatch2<true, true>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesDispatch2( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { if (outSumSquaredDerivatives == NULL) { accumulateDerivativesImpl<DoDerivatives, DoSum, false>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } else { accumulateDerivativesImpl<DoDerivatives, DoSum, true>( outDerivatives, outSumDerivatives, outSumSquaredDerivatives); } }
subq $0x28, %rsp movq %rdi, 0x20(%rsp) movq %rsi, 0x18(%rsp) movq %rdx, 0x10(%rsp) movq %rcx, 0x8(%rsp) movq 0x20(%rsp), %rax movq %rax, (%rsp) cmpq $0x0, 0x8(%rsp) jne 0xb0e13 movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x61b90 jmp 0xb0e2b movq (%rsp), %rdi movq 0x18(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x8(%rsp), %rcx callq 0x648d0 addq $0x28, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesImpl<true, false, false>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { REALTYPE sum = 0.0; REALTYPE sumSquared = 0.0; for (int k = 0; k < kPatternCount; k++) { REALTYPE derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { // TODO Confirm that these are compile-time sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x48(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x28(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x30(%rsp) movl $0x0, -0x34(%rsp) movq -0x48(%rsp), %rcx movl -0x34(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0xb0ec7 movq -0x48(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rax movslq -0x34(%rsp), %rcx vdivsd (%rax,%rcx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x40(%rsp) vmovsd -0x40(%rsp), %xmm0 movq -0x10(%rsp), %rax movslq -0x34(%rsp), %rcx vmovsd %xmm0, (%rax,%rcx,8) movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0xb0e6a retq nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesImpl<true, true, false>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { REALTYPE sum = 0.0; REALTYPE sumSquared = 0.0; for (int k = 0; k < kPatternCount; k++) { REALTYPE derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { // TODO Confirm that these are compile-time sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x48(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x28(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x30(%rsp) movl $0x0, -0x34(%rsp) movq -0x48(%rsp), %rcx movl -0x34(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0xb1072 movq -0x48(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rcx movslq -0x34(%rsp), %rdx vdivsd (%rcx,%rdx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x40(%rsp) vmovsd -0x40(%rsp), %xmm0 movq -0x10(%rsp), %rcx movslq -0x34(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) vmovsd -0x40(%rsp), %xmm1 movq 0x80(%rax), %rax movslq -0x34(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x28(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x28(%rsp) movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0xb0fea vmovsd -0x28(%rsp), %xmm0 movq -0x18(%rsp), %rax vmovsd %xmm0, (%rax) retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
void beagle::cpu::BeagleCPUImpl<double, 1, 1>::accumulateDerivativesImpl<true, true, true>(double*, double*, double*)
void BeagleCPUImpl<BEAGLE_CPU_GENERIC>::accumulateDerivativesImpl( double* outDerivatives, double* outSumDerivatives, double* outSumSquaredDerivatives) { REALTYPE sum = 0.0; REALTYPE sumSquared = 0.0; for (int k = 0; k < kPatternCount; k++) { REALTYPE derivative = grandNumeratorDerivTmp[k] / grandDenominatorDerivTmp[k]; if (DoDerivatives) { outDerivatives[k] = derivative; } if (DoSum) { // TODO Confirm that these are compile-time sum += derivative * gPatternWeights[k]; } if (DoSumSquared) { sumSquared += derivative * derivative * gPatternWeights[k]; } } if (DoSum) { *outSumDerivatives = sum; } if (DoSumSquared) { *outSumSquaredDerivatives = sumSquared; } }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq %rdx, -0x18(%rsp) movq %rcx, -0x20(%rsp) movq -0x8(%rsp), %rax movq %rax, -0x48(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x28(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovsd %xmm0, -0x30(%rsp) movl $0x0, -0x34(%rsp) movq -0x48(%rsp), %rcx movl -0x34(%rsp), %eax cmpl 0x14(%rcx), %eax jge 0xb1184 movq -0x48(%rsp), %rax movq 0x100(%rax), %rcx movslq -0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 movq 0xf8(%rax), %rcx movslq -0x34(%rsp), %rdx vdivsd (%rcx,%rdx,8), %xmm0, %xmm0 vmovsd %xmm0, -0x40(%rsp) vmovsd -0x40(%rsp), %xmm0 movq -0x10(%rsp), %rcx movslq -0x34(%rsp), %rdx vmovsd %xmm0, (%rcx,%rdx,8) vmovsd -0x40(%rsp), %xmm1 movq 0x80(%rax), %rcx movslq -0x34(%rsp), %rdx vmovsd (%rcx,%rdx,8), %xmm0 vmovsd -0x28(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x28(%rsp) vmovsd -0x40(%rsp), %xmm0 vmulsd -0x40(%rsp), %xmm0, %xmm1 movq 0x80(%rax), %rax movslq -0x34(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd -0x30(%rsp), %xmm2 vfmadd213sd %xmm2, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm2 vmovsd %xmm0, -0x30(%rsp) movl -0x34(%rsp), %eax addl $0x1, %eax movl %eax, -0x34(%rsp) jmp 0xb10ca vmovsd -0x28(%rsp), %xmm0 movq -0x18(%rsp), %rax vmovsd %xmm0, (%rax) vmovsd -0x30(%rsp), %xmm0 movq -0x20(%rsp), %rax vmovsd %xmm0, (%rax) retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUImpl.hpp
beagle::cpu::BeagleCPUSSEImpl<double, 2, 0>::calcStatesStates(double*, int const*, double const*, int const*, double const*, int, int)
BEAGLE_CPU_SSE_TEMPLATE void BeagleCPUSSEImpl<BEAGLE_CPU_SSE_DOUBLE>::calcStatesStates(double* destP, const int* states_q, const double* matrices_q, const int* states_r, const double* matrices_r, int startPattern, int endPattern) { BeagleCPUImpl<BEAGLE_CPU_SSE_DOUBLE>::calcStatesStates(destP, states_q, matrices_q, states_r, matrices_r, startPattern, endPattern); }
subq $0x48, %rsp movl 0x58(%rsp), %eax movl 0x50(%rsp), %eax movq %rdi, 0x40(%rsp) movq %rsi, 0x38(%rsp) movq %rdx, 0x30(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x20(%rsp) movq %r9, 0x18(%rsp) movq 0x40(%rsp), %rdi movq 0x38(%rsp), %rsi movq 0x30(%rsp), %rdx movq 0x28(%rsp), %rcx movq 0x20(%rsp), %r8 movq 0x18(%rsp), %r9 movl 0x50(%rsp), %r10d movl 0x58(%rsp), %eax movl %r10d, (%rsp) movl %eax, 0x8(%rsp) callq 0x62610 addq $0x48, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUSSEImpl.hpp
beagle::cpu::BeagleCPUSSEImpl<double, 2, 0>::calcEdgeLogLikelihoods(int, int, int, int, int, int, double*)
BEAGLE_CPU_SSE_TEMPLATE int BeagleCPUSSEImpl<BEAGLE_CPU_SSE_DOUBLE>::calcEdgeLogLikelihoods(const int parIndex, const int childIndex, const int probIndex, const int categoryWeightsIndex, const int stateFrequenciesIndex, const int scalingFactorsIndex, double* outSumLogLikelihood) { return BeagleCPUImpl<BEAGLE_CPU_SSE_DOUBLE>::calcEdgeLogLikelihoods( parIndex, childIndex, probIndex, categoryWeightsIndex, stateFrequenciesIndex, scalingFactorsIndex, outSumLogLikelihood); }
subq $0x38, %rsp movq 0x48(%rsp), %rax movl 0x40(%rsp), %eax movq %rdi, 0x30(%rsp) movl %esi, 0x2c(%rsp) movl %edx, 0x28(%rsp) movl %ecx, 0x24(%rsp) movl %r8d, 0x20(%rsp) movl %r9d, 0x1c(%rsp) movq 0x30(%rsp), %rdi movl 0x2c(%rsp), %esi movl 0x28(%rsp), %edx movl 0x24(%rsp), %ecx movl 0x20(%rsp), %r8d movl 0x1c(%rsp), %r9d movl 0x40(%rsp), %r10d movq 0x48(%rsp), %rax movl %r10d, (%rsp) movq %rax, 0x8(%rsp) callq 0x5f610 addq $0x38, %rsp retq
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUSSEImpl.hpp
beagle::cpu::BeagleCPUSSEImpl<double, 2, 0>::calcPartialsPartialsFixedScaling(double*, double const*, double const*, double const*, double const*, double const*, int, int)
BEAGLE_CPU_SSE_TEMPLATE void BeagleCPUSSEImpl<BEAGLE_CPU_SSE_DOUBLE>::calcPartialsPartialsFixedScaling(double* __restrict destP, const double* __restrict partials1, const double* __restrict matrices1, const double* __restrict partials2, const double* __restrict matrices2, const double* __restrict scaleFactors, int startPattern, int endPattern) { int stateCountMinusOne = kPartialsPaddedStateCount - 1; #pragma omp parallel for num_threads(kCategoryCount) for (int l = 0; l < kCategoryCount; l++) { int v = l*kPartialsPaddedStateCount*kPatternCount + kPartialsPaddedStateCount*startPattern; double* destPu = destP + v; for (int k = startPattern; k < endPattern; k++) { int w = l * kMatrixSize; const V_Real scalar = VEC_SPLAT(scaleFactors[k]); for (int i = 0; i < kStateCount; i++) { V_Real sum1_vec; V_Real sum2_vec; int j = 0; sum1_vec = VEC_SETZERO(); sum2_vec = VEC_SETZERO(); for ( ; j < stateCountMinusOne; j += 2) { sum1_vec = VEC_MADD( VEC_LOAD(matrices1 + w + j), // TODO This only works if w is even VEC_LOAD(partials1 + v + j), // TODO This only works if v is even sum1_vec); sum2_vec = VEC_MADD( VEC_LOAD(matrices2 + w + j), VEC_LOAD(partials2 + v + j), sum2_vec); } VEC_STORE_SCALAR(destPu, VEC_DIV(VEC_MULT( VEC_ADD(sum1_vec, VEC_SWAP(sum1_vec)), VEC_ADD(sum2_vec, VEC_SWAP(sum2_vec)) ), scalar)); // increment for the extra column at the end w += kStateCount + T_PAD; destPu++; } if (P_PAD) { for (int pad = 0; pad < P_PAD; pad++) { *(destPu++) = 0.0; } } v += kPartialsPaddedStateCount; } } }
subq $0x198, %rsp # imm = 0x198 movl 0x1b0(%rsp), %eax movl 0x1a8(%rsp), %eax movq 0x1a0(%rsp), %rax movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rdx, 0x8(%rsp) movq %rcx, (%rsp) movq %r8, -0x8(%rsp) movq %r9, -0x10(%rsp) movq 0x18(%rsp), %rax movq %rax, -0x80(%rsp) movl 0x2c(%rax), %eax subl $0x1, %eax movl %eax, -0x14(%rsp) movl $0x0, -0x18(%rsp) movq -0x80(%rsp), %rcx movl -0x18(%rsp), %eax cmpl 0x34(%rcx), %eax jge 0xb7a52 movq -0x80(%rsp), %rcx movl -0x18(%rsp), %eax imull 0x2c(%rcx), %eax imull 0x14(%rcx), %eax movl 0x2c(%rcx), %ecx imull 0x1a8(%rsp), %ecx addl %ecx, %eax movl %eax, -0x1c(%rsp) movq 0x10(%rsp), %rax movslq -0x1c(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, -0x28(%rsp) movl 0x1a8(%rsp), %eax movl %eax, -0x2c(%rsp) movl -0x2c(%rsp), %eax cmpl 0x1b0(%rsp), %eax jge 0xb7a40 movq -0x80(%rsp), %rcx movl -0x18(%rsp), %eax movl 0x40(%rcx), %ecx imull %ecx, %eax movl %eax, -0x30(%rsp) movq 0x1a0(%rsp), %rax movslq -0x2c(%rsp), %rcx vmovsd (%rax,%rcx,8), %xmm0 vmovsd %xmm0, 0x38(%rsp) vmovddup 0x38(%rsp), %xmm0 # xmm0 = mem[0,0] vmovapd %xmm0, 0x20(%rsp) vmovapd 0x20(%rsp), %xmm0 vmovapd %xmm0, -0x40(%rsp) movl $0x0, -0x44(%rsp) movq -0x80(%rsp), %rcx movl -0x44(%rsp), %eax cmpl 0x24(%rcx), %eax jge 0xb7a20 movl $0x0, -0x74(%rsp) vpxor %xmm0, %xmm0, %xmm0 vmovapd %xmm0, 0x150(%rsp) vmovapd 0x150(%rsp), %xmm1 vmovapd %xmm1, -0x60(%rsp) vmovapd %xmm0, 0x140(%rsp) vmovapd 0x140(%rsp), %xmm0 vmovapd %xmm0, -0x70(%rsp) movl -0x74(%rsp), %eax cmpl -0x14(%rsp), %eax jge 0xb7912 movq (%rsp), %rax movslq -0x30(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movslq -0x74(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x138(%rsp) movq 0x138(%rsp), %rax vmovapd (%rax), %xmm1 movq 0x8(%rsp), %rax movslq -0x1c(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movslq -0x74(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x130(%rsp) movq 0x130(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm1, 0x90(%rsp) vmovapd %xmm0, 0x80(%rsp) vmovapd 0x90(%rsp), %xmm0 vmulpd 0x80(%rsp), %xmm0, %xmm1 vmovapd -0x60(%rsp), %xmm0 vmovapd %xmm1, 0x110(%rsp) vmovapd %xmm0, 0x100(%rsp) vmovapd 0x110(%rsp), %xmm0 vaddpd 0x100(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x60(%rsp) movq -0x10(%rsp), %rax movslq -0x30(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movslq -0x74(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x128(%rsp) movq 0x128(%rsp), %rax vmovapd (%rax), %xmm1 movq -0x8(%rsp), %rax movslq -0x1c(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movslq -0x74(%rsp), %rcx shlq $0x3, %rcx addq %rcx, %rax movq %rax, 0x120(%rsp) movq 0x120(%rsp), %rax vmovapd (%rax), %xmm0 vmovapd %xmm1, 0x70(%rsp) vmovapd %xmm0, 0x60(%rsp) vmovapd 0x70(%rsp), %xmm0 vmulpd 0x60(%rsp), %xmm0, %xmm1 vmovapd -0x70(%rsp), %xmm0 vmovapd %xmm1, 0xf0(%rsp) vmovapd %xmm0, 0xe0(%rsp) vmovapd 0xf0(%rsp), %xmm0 vaddpd 0xe0(%rsp), %xmm0, %xmm0 vmovapd %xmm0, -0x70(%rsp) movl -0x74(%rsp), %eax addl $0x2, %eax movl %eax, -0x74(%rsp) jmp 0xb7795 movq -0x80(%rsp), %rax movq -0x28(%rsp), %rcx vmovapd -0x60(%rsp), %xmm1 vpermilpd $0x1, %xmm1, %xmm0 # xmm0 = xmm1[1,0] vmovapd %xmm1, 0xd0(%rsp) vmovapd %xmm0, 0xc0(%rsp) vmovapd 0xd0(%rsp), %xmm0 vmovapd 0xc0(%rsp), %xmm1 vaddpd %xmm1, %xmm0, %xmm1 vmovapd -0x70(%rsp), %xmm2 vpermilpd $0x1, %xmm2, %xmm0 # xmm0 = xmm2[1,0] vmovapd %xmm2, 0xb0(%rsp) vmovapd %xmm0, 0xa0(%rsp) vmovapd 0xb0(%rsp), %xmm0 vmovapd 0xa0(%rsp), %xmm2 vaddpd %xmm2, %xmm0, %xmm0 vmovapd %xmm1, 0x50(%rsp) vmovapd %xmm0, 0x40(%rsp) vmovapd 0x50(%rsp), %xmm0 vmovapd 0x40(%rsp), %xmm1 vmulpd %xmm1, %xmm0, %xmm1 vmovapd -0x40(%rsp), %xmm0 vmovapd %xmm1, 0x170(%rsp) vmovapd %xmm0, 0x160(%rsp) vmovapd 0x170(%rsp), %xmm0 vmovapd 0x160(%rsp), %xmm1 vdivpd %xmm1, %xmm0, %xmm0 movq %rcx, 0x190(%rsp) vmovapd %xmm0, 0x180(%rsp) vmovsd 0x180(%rsp), %xmm0 movq 0x190(%rsp), %rcx vmovsd %xmm0, (%rcx) movl 0x24(%rax), %eax addl $0x2, %eax addl -0x30(%rsp), %eax movl %eax, -0x30(%rsp) movq -0x28(%rsp), %rax addq $0x8, %rax movq %rax, -0x28(%rsp) movl -0x44(%rsp), %eax addl $0x1, %eax movl %eax, -0x44(%rsp) jmp 0xb7747 movq -0x80(%rsp), %rax movl 0x2c(%rax), %eax addl -0x1c(%rsp), %eax movl %eax, -0x1c(%rsp) movl -0x2c(%rsp), %eax addl $0x1, %eax movl %eax, -0x2c(%rsp) jmp 0xb76eb jmp 0xb7a42 movl -0x18(%rsp), %eax addl $0x1, %eax movl %eax, -0x18(%rsp) jmp 0xb7696 addq $0x198, %rsp # imm = 0x198 retq nopw (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUSSEImpl.hpp
beagle::cpu::BeagleCPUSSEImpl<double, 2, 0>::calcPartialsPartialsAutoScaling(double*, double const*, double const*, double const*, double const*, int*)
BEAGLE_CPU_SSE_TEMPLATE void BeagleCPUSSEImpl<BEAGLE_CPU_SSE_DOUBLE>::calcPartialsPartialsAutoScaling(double* destP, const double* partials_q, const double* matrices_q, const double* partials_r, const double* matrices_r, int* activateScaling) { BeagleCPUImpl<BEAGLE_CPU_SSE_DOUBLE>::calcPartialsPartialsAutoScaling(destP, partials_q, matrices_q, partials_r, matrices_r, activateScaling); }
subq $0x38, %rsp movq 0x40(%rsp), %rax movq %rdi, 0x30(%rsp) movq %rsi, 0x28(%rsp) movq %rdx, 0x20(%rsp) movq %rcx, 0x18(%rsp) movq %r8, 0x10(%rsp) movq %r9, 0x8(%rsp) movq 0x30(%rsp), %rdi movq 0x28(%rsp), %rsi movq 0x20(%rsp), %rdx movq 0x18(%rsp), %rcx movq 0x10(%rsp), %r8 movq 0x8(%rsp), %r9 movq 0x40(%rsp), %rax movq %rax, (%rsp) callq 0x628e0 addq $0x38, %rsp retq nopl (%rax,%rax)
/beagle-dev[P]beagle-lib/libhmsbeagle/CPU/BeagleCPUSSEImpl.hpp
Sequence::process_itemset(std::shared_ptr<Itemset>, unsigned int, int)
void Sequence::process_itemset(Itemset_S iset, unsigned int templ, int iter) { if (iter > numLargeItemset->size()) return; ostringstream &logger = env.logger; ostringstream &seqstrm = env.seqstrm; int i, it2; int lsup, esup; unsigned int ntpl; Itemset_S iset2, ejoin, ljoin; int it = (*iset)[0]; if (args->maxiter < iter) args->maxiter = iter; if (invdb->get_eqgraph_item(it)) { for (i = 0; i < invdb->get_eqgraph_item(it)->num_elements(); i++) { it2 = invdb->get_eqgraph_item(it)->get_element(i); ntpl = templ; iset2 = invdb->get_item(it2); ljoin = nullptr; ejoin = prune_decision(iset2, iset, ntpl, EJOIN); if (args->pruning_type > 1) pre_pruning(ejoin, ntpl, iset, iset2, 0); if (ejoin) get_tmpnewf_intersect(ljoin, ejoin, ljoin, lsup, esup, lsup, iset2, iset, iter); if (ejoin) fill_join(ejoin, iset, iset2); if (args->pruning_type > 1) post_pruning(ejoin, ntpl); if (ejoin) { numLargeItemset->at(iter - 1)++; //fill_join(ejoin, iset, iset2); //if (iter > 3) ejoin->print_seq(seqstrm, ntpl); if (args->outputfreq) ejoin->print_seq(seqstrm, ntpl); process_itemset(ejoin, ntpl, iter + 1); ejoin = nullptr; } } for (i = 0; i < invdb->get_eqgraph_item(it)->seqnum_elements(); i++) { it2 = invdb->get_eqgraph_item(it)->seqget_element(i); ntpl = SETBIT(templ, 1, iter - 2); iset2 = invdb->get_item(it2); ejoin = nullptr; ljoin = prune_decision(iset2, iset, ntpl, LJOIN); if (args->pruning_type > 1) pre_pruning(ljoin, ntpl, iset, iset2, 1); if (ljoin) get_tmpnewf_intersect(ljoin, ejoin, ejoin, lsup, esup, esup, iset2, iset, iter); if (ljoin) fill_join(ljoin, iset, iset2); if (args->pruning_type > 1) post_pruning(ljoin, ntpl); if (ljoin) { numLargeItemset->at(iter - 1)++; //fill_join(ljoin, iset, iset2); //if (iter > 3) ljoin->print_seq(seqstrm, ntpl); if (args->outputfreq) ljoin->print_seq(seqstrm, ntpl); process_itemset(ljoin, ntpl, iter + 1); ljoin = nullptr; } } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x98, %rsp movl %edx, %ebp movq %rcx, 0x28(%rsp) movslq %ecx, %rax movq 0x98(%rdi), %rcx movq 0x8(%rcx), %rdx subq (%rcx), %rdx sarq $0x2, %rdx cmpq %rax, %rdx jb 0x12946 movq %rsi, %r14 movq %rdi, %r15 movq (%rdi), %rax movq %rax, 0x68(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x50(%rsp) movaps %xmm0, 0x10(%rsp) movaps %xmm0, (%rsp) movq (%rsi), %rdi xorl %esi, %esi callq 0x1353a movq 0x8(%r15), %rcx movq 0x28(%rsp), %rdx cmpl %edx, 0xd8(%rcx) jge 0x124d5 movq 0x28(%rsp), %rdx movl %edx, 0xd8(%rcx) movq 0x38(%r15), %rcx movslq %eax, %rdx movq 0xa0(%rcx), %rax shlq $0x4, %rdx movq %rdx, 0x70(%rsp) movq (%rax,%rdx), %rdi testq %rdi, %rdi je 0x12919 movq 0x28(%rsp), %rcx leal -0x1(%rcx), %eax movslq %eax, %r13 leal 0x1(%rcx), %eax movl %eax, 0x4c(%rsp) xorl %ebx, %ebx leaq 0x50(%rsp), %r12 movq (%rdi), %rcx movl $0x0, %eax testq %rcx, %rcx je 0x12522 movl 0x10(%rcx), %eax cmpl %eax, %ebx jge 0x12717 movl %ebx, %esi callq 0x13774 movq 0x38(%r15), %rsi leaq 0x30(%rsp), %rdi movl %eax, %edx callq 0x139b4 movaps 0x30(%rsp), %xmm0 xorps %xmm1, %xmm1 movaps %xmm1, 0x30(%rsp) movq 0x58(%rsp), %rdi movaps %xmm0, 0x50(%rsp) testq %rdi, %rdi je 0x12571 callq 0x13bcc movq 0x38(%rsp), %rdi testq %rdi, %rdi je 0x12571 callq 0x13bcc movq 0x8(%rsp), %rdi xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) testq %rdi, %rdi je 0x12587 callq 0x13bcc leaq 0x30(%rsp), %rdi movq %r15, %rsi movq %r12, %rdx movq %r14, %rcx movl %ebp, %r8d movl $0x1, %r9d callq 0xf640 movq 0x18(%rsp), %rdi movaps 0x30(%rsp), %xmm0 movaps %xmm0, 0x10(%rsp) testq %rdi, %rdi je 0x125bc callq 0x13bcc movq 0x8(%r15), %rax cmpl $0x2, (%rax) jl 0x125dd movq %r15, %rdi leaq 0x10(%rsp), %rsi movl %ebp, %edx movq %r14, %rcx movq %r12, %r8 xorl %r9d, %r9d callq 0xe334 cmpq $0x0, 0x10(%rsp) je 0x12628 movq %r15, %rdi movq %rsp, %rcx movq %rcx, %rsi leaq 0x10(%rsp), %rdx leaq 0x64(%rsp), %r8 leaq 0x60(%rsp), %r9 pushq 0x28(%rsp) pushq %r14 pushq %r12 pushq %r8 callq 0xd99c addq $0x20, %rsp cmpq $0x0, 0x10(%rsp) je 0x12628 leaq 0x10(%rsp), %rsi movq %r14, %rdx movq %r12, %rcx callq 0xf5e2 movq 0x8(%r15), %rax cmpl $0x2, (%rax) jl 0x12640 movq %r15, %rdi leaq 0x10(%rsp), %rsi movl %ebp, %edx callq 0xe598 movq 0x10(%rsp), %rdi testq %rdi, %rdi je 0x126fc movq 0x98(%r15), %rcx movq (%rcx), %rax movq 0x8(%rcx), %rdx subq %rax, %rdx sarq $0x2, %rdx cmpq %r13, %rdx jbe 0x12958 incl (%rax,%r13,4) movq 0x8(%r15), %rax cmpb $0x0, 0xed(%rax) je 0x1268e movq 0x68(%rsp), %rsi movl %ebp, %edx callq 0x17348 movq 0x10(%rsp), %rdi movq %rdi, 0x88(%rsp) movq 0x18(%rsp), %rax movq %rax, 0x90(%rsp) testq %rax, %rax je 0x126bd movq 0x1a911(%rip), %rcx # 0x2cfc0 cmpb $0x0, (%rcx) je 0x126b9 incl 0x8(%rax) jmp 0x126bd lock incl 0x8(%rax) movq %r15, %rdi leaq 0x88(%rsp), %rsi movl %ebp, %edx movl 0x4c(%rsp), %ecx callq 0x1245a movq 0x90(%rsp), %rdi testq %rdi, %rdi je 0x126e5 callq 0x13bcc movq 0x18(%rsp), %rdi xorps %xmm0, %xmm0 movaps %xmm0, 0x10(%rsp) testq %rdi, %rdi je 0x126fc callq 0x13bcc incl %ebx movq 0x38(%r15), %rax movq 0xa0(%rax), %rax movq 0x70(%rsp), %rcx movq (%rax,%rcx), %rdi jmp 0x12512 movq 0x28(%rsp), %rax addl $-0x2, %eax btsl %eax, %ebp xorl %ebx, %ebx movq 0x10(%rdi), %rcx movl $0x0, %eax testq %rcx, %rcx je 0x12735 movl 0x10(%rcx), %eax cmpl %eax, %ebx jge 0x12919 movl %ebx, %esi callq 0x136c0 movq 0x38(%r15), %rsi leaq 0x30(%rsp), %rdi movl %eax, %edx callq 0x139b4 movaps 0x30(%rsp), %xmm0 xorps %xmm1, %xmm1 movaps %xmm1, 0x30(%rsp) movq 0x58(%rsp), %rdi movaps %xmm0, 0x50(%rsp) testq %rdi, %rdi je 0x12784 callq 0x13bcc movq 0x38(%rsp), %rdi testq %rdi, %rdi je 0x12784 callq 0x13bcc movq 0x18(%rsp), %rdi xorps %xmm0, %xmm0 movaps %xmm0, 0x10(%rsp) testq %rdi, %rdi je 0x1279b callq 0x13bcc leaq 0x30(%rsp), %rdi movq %r15, %rsi movq %r12, %rdx movq %r14, %rcx movl %ebp, %r8d xorl %r9d, %r9d callq 0xf640 movq 0x8(%rsp), %rdi movaps 0x30(%rsp), %xmm0 movaps %xmm0, (%rsp) testq %rdi, %rdi je 0x127cc callq 0x13bcc movq 0x8(%r15), %rax cmpl $0x2, (%rax) jl 0x127ee movq %r15, %rdi movq %rsp, %rsi movl %ebp, %edx movq %r14, %rcx movq %r12, %r8 movl $0x1, %r9d callq 0xe334 cmpq $0x0, (%rsp) je 0x12835 movq %r15, %rdi movq %rsp, %rsi leaq 0x10(%rsp), %rcx movq %rcx, %rdx leaq 0x64(%rsp), %r8 leaq 0x60(%rsp), %r9 pushq 0x28(%rsp) pushq %r14 pushq %r12 pushq %r9 callq 0xd99c addq $0x20, %rsp cmpq $0x0, (%rsp) je 0x12835 movq %rsp, %rsi movq %r14, %rdx movq %r12, %rcx callq 0xf5e2 movq 0x8(%r15), %rax cmpl $0x2, (%rax) jl 0x1284b movq %r15, %rdi movq %rsp, %rsi movl %ebp, %edx callq 0xe598 movq (%rsp), %rdi testq %rdi, %rdi je 0x128fe movq 0x98(%r15), %rcx movq (%rcx), %rax movq 0x8(%rcx), %rdx subq %rax, %rdx sarq $0x2, %rdx cmpq %r13, %rdx jbe 0x12958 incl (%rax,%r13,4) movq 0x8(%r15), %rax cmpb $0x0, 0xed(%rax) je 0x12897 movq 0x68(%rsp), %rsi movl %ebp, %edx callq 0x17348 movq (%rsp), %rdi movq %rdi, 0x78(%rsp) movq 0x8(%rsp), %rax movq %rax, 0x80(%rsp) testq %rax, %rax je 0x128c3 movq 0x1a70b(%rip), %rcx # 0x2cfc0 cmpb $0x0, (%rcx) je 0x128bf incl 0x8(%rax) jmp 0x128c3 lock incl 0x8(%rax) movq %r15, %rdi leaq 0x78(%rsp), %rsi movl %ebp, %edx movl 0x4c(%rsp), %ecx callq 0x1245a movq 0x80(%rsp), %rdi testq %rdi, %rdi je 0x128e8 callq 0x13bcc movq 0x8(%rsp), %rdi xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) testq %rdi, %rdi je 0x128fe callq 0x13bcc incl %ebx movq 0x38(%r15), %rax movq 0xa0(%rax), %rax movq 0x70(%rsp), %rcx movq (%rax,%rcx), %rdi jmp 0x12724 movq 0x8(%rsp), %rdi testq %rdi, %rdi je 0x12928 callq 0x13bcc movq 0x18(%rsp), %rdi testq %rdi, %rdi je 0x12937 callq 0x13bcc movq 0x58(%rsp), %rdi testq %rdi, %rdi je 0x12946 callq 0x13bcc addq $0x98, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq leaq 0xec55(%rip), %rdi # 0x215b4 movq %r13, %rsi xorl %eax, %eax callq 0x55a0 movq %rax, %rbx movq 0x80(%rsp), %rdi testq %rdi, %rdi je 0x129a1 jmp 0x1298b movq %rax, %rbx movq 0x90(%rsp), %rdi testq %rdi, %rdi je 0x129a1 callq 0x13bcc jmp 0x129a1 jmp 0x1299e jmp 0x1299e jmp 0x1299e jmp 0x1299e jmp 0x1299e jmp 0x1299e movq %rax, %rbx movq 0x8(%rsp), %rdi testq %rdi, %rdi je 0x129b0 callq 0x13bcc movq 0x18(%rsp), %rdi testq %rdi, %rdi je 0x129bf callq 0x13bcc movq 0x58(%rsp), %rdi testq %rdi, %rdi je 0x129ce callq 0x13bcc movq %rbx, %rdi callq 0x5700
/fzyukio[P]cspade-full/Sequence.cc
ncnn::fastMalloc(unsigned long)
static inline void* fastMalloc(size_t size) { #if _MSC_VER return _aligned_malloc(size, MALLOC_ALIGN); #elif _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17) void* ptr = 0; if (posix_memalign(&ptr, MALLOC_ALIGN, size)) ptr = 0; return ptr; #elif __ANDROID__ && __ANDROID_API__ < 17 return memalign(MALLOC_ALIGN, size); #else unsigned char* udata = (unsigned char*)malloc(size + sizeof(void*) + MALLOC_ALIGN); if (!udata) return 0; unsigned char** adata = alignPtr((unsigned char**)udata + 1, MALLOC_ALIGN); adata[-1] = udata; return adata; #endif }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq $0x0, 0x8(%rsp) movq 0x10(%rsp), %rdx leaq 0x8(%rsp), %rdi movl $0x10, %esi callq 0x140d0 cmpl $0x0, %eax je 0x156d4 movq $0x0, 0x8(%rsp) movq 0x8(%rsp), %rax addq $0x18, %rsp retq nop
/tongxiaobin[P]ncnn/benchmark/../src/allocator.h
ncnn::fastFree(void*)
static inline void fastFree(void* ptr) { if (ptr) { #if _MSC_VER _aligned_free(ptr); #elif _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17) free(ptr); #elif __ANDROID__ && __ANDROID_API__ < 17 free(ptr); #else unsigned char* udata = ((unsigned char**)ptr)[-1]; free(udata); #endif } }
pushq %rax movq %rdi, (%rsp) cmpq $0x0, (%rsp) je 0x156f5 movq (%rsp), %rdi callq 0x142b0 popq %rax retq nopw (%rax,%rax)
/tongxiaobin[P]ncnn/benchmark/../src/allocator.h
ncnn::BenchNet::~BenchNet()
int load_model() { // load file int ret = 0; Option opt; opt.vulkan_compute = use_vulkan_compute; ModelBinFromEmpty mb; for (size_t i=0; i<layers.size(); i++) { Layer* layer = layers[i]; int lret = layer->load_model(mb); if (lret != 0) { fprintf(stderr, "layer load_model %d failed\n", (int)i); ret = -1; break; } int cret = layer->create_pipeline(opt); if (cret != 0) { fprintf(stderr, "layer create_pipeline %d failed\n", (int)i); ret = -1; break; } } #if NCNN_VULKAN if (use_vulkan_compute) { upload_model(); create_pipeline(); } #endif // NCNN_VULKAN return ret; }
pushq %rax movq %rdi, (%rsp) movq (%rsp), %rdi callq 0x284f0 popq %rax retq
/tongxiaobin[P]ncnn/benchmark/benchncnn.cpp
ncnn::Mat::create(int, unsigned long, ncnn::Allocator*)
inline void Mat::create(int _w, size_t _elemsize, Allocator* _allocator) { if (dims == 1 && w == _w && elemsize == _elemsize && packing == 1 && allocator == _allocator) return; release(); elemsize = _elemsize; packing = 1; allocator = _allocator; dims = 1; w = _w; h = 1; c = 1; cstep = w; if (total() > 0) { size_t totalsize = alignSize(total() * elemsize, 4); if (allocator) data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount)); else data = fastMalloc(totalsize + (int)sizeof(*refcount)); refcount = (int*)(((unsigned char*)data) + totalsize); *refcount = 1; } }
subq $0x38, %rsp movq %rdi, 0x30(%rsp) movl %esi, 0x2c(%rsp) movq %rdx, 0x20(%rsp) movq %rcx, 0x18(%rsp) movq 0x30(%rsp), %rax movq %rax, 0x8(%rsp) cmpl $0x1, 0x28(%rax) jne 0x15bc5 movq 0x8(%rsp), %rax movl 0x2c(%rax), %eax cmpl 0x2c(%rsp), %eax jne 0x15bc5 movq 0x8(%rsp), %rax movq 0x10(%rax), %rax cmpq 0x20(%rsp), %rax jne 0x15bc5 movq 0x8(%rsp), %rax cmpl $0x1, 0x18(%rax) jne 0x15bc5 movq 0x8(%rsp), %rax movq 0x20(%rax), %rax cmpq 0x18(%rsp), %rax jne 0x15bc5 jmp 0x15cae movq 0x8(%rsp), %rdi callq 0x15cc0 movq 0x8(%rsp), %rdi movq 0x20(%rsp), %rax movq %rax, 0x10(%rdi) movl $0x1, 0x18(%rdi) movq 0x18(%rsp), %rax movq %rax, 0x20(%rdi) movl $0x1, 0x28(%rdi) movl 0x2c(%rsp), %eax movl %eax, 0x2c(%rdi) movl $0x1, 0x30(%rdi) movl $0x1, 0x34(%rdi) movslq 0x2c(%rdi), %rax movq %rax, 0x38(%rdi) callq 0x15d80 cmpq $0x0, %rax jbe 0x15cae movq 0x8(%rsp), %rdi callq 0x15d80 movq %rax, %rdi movq 0x8(%rsp), %rax imulq 0x10(%rax), %rdi movl $0x4, %esi callq 0x15670 movq %rax, %rcx movq 0x8(%rsp), %rax movq %rcx, 0x10(%rsp) cmpq $0x0, 0x20(%rax) je 0x15c7a movq 0x8(%rsp), %rax movq 0x20(%rax), %rdi movq 0x10(%rsp), %rsi addq $0x4, %rsi movq (%rdi), %rax callq *0x10(%rax) movq %rax, %rcx movq 0x8(%rsp), %rax movq %rcx, (%rax) jmp 0x15c93 movq 0x10(%rsp), %rdi addq $0x4, %rdi callq 0x156a0 movq %rax, %rcx movq 0x8(%rsp), %rax movq %rcx, (%rax) movq 0x8(%rsp), %rax movq (%rax), %rcx addq 0x10(%rsp), %rcx movq %rcx, 0x8(%rax) movq 0x8(%rax), %rax movl $0x1, (%rax) addq $0x38, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax)
/tongxiaobin[P]ncnn/benchmark/../src/mat.h
ncnn::Mat::create(int, int, int, unsigned long, ncnn::Allocator*)
inline void Mat::create(int _w, int _h, int _c, size_t _elemsize, Allocator* _allocator) { if (dims == 3 && w == _w && h == _h && c == _c && elemsize == _elemsize && packing == 1 && allocator == _allocator) return; release(); elemsize = _elemsize; packing = 1; allocator = _allocator; dims = 3; w = _w; h = _h; c = _c; cstep = alignSize(w * h * elemsize, 16) / elemsize; if (total() > 0) { size_t totalsize = alignSize(total() * elemsize, 4); if (allocator) data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount)); else data = fastMalloc(totalsize + (int)sizeof(*refcount)); refcount = (int*)(((unsigned char*)data) + totalsize); *refcount = 1; } }
subq $0x38, %rsp movq %rdi, 0x30(%rsp) movl %esi, 0x2c(%rsp) movl %edx, 0x28(%rsp) movl %ecx, 0x24(%rsp) movq %r8, 0x18(%rsp) movq %r9, 0x10(%rsp) movq 0x30(%rsp), %rax movq %rax, (%rsp) cmpl $0x3, 0x28(%rax) jne 0x16062 movq (%rsp), %rax movl 0x2c(%rax), %eax cmpl 0x2c(%rsp), %eax jne 0x16062 movq (%rsp), %rax movl 0x30(%rax), %eax cmpl 0x28(%rsp), %eax jne 0x16062 movq (%rsp), %rax movl 0x34(%rax), %eax cmpl 0x24(%rsp), %eax jne 0x16062 movq (%rsp), %rax movq 0x10(%rax), %rax cmpq 0x18(%rsp), %rax jne 0x16062 movq (%rsp), %rax cmpl $0x1, 0x18(%rax) jne 0x16062 movq (%rsp), %rax movq 0x20(%rax), %rax cmpq 0x10(%rsp), %rax jne 0x16062 jmp 0x16163 movq (%rsp), %rdi callq 0x15cc0 movq (%rsp), %rax movq 0x18(%rsp), %rcx movq %rcx, 0x10(%rax) movl $0x1, 0x18(%rax) movq 0x10(%rsp), %rcx movq %rcx, 0x20(%rax) movl $0x3, 0x28(%rax) movl 0x2c(%rsp), %ecx movl %ecx, 0x2c(%rax) movl 0x28(%rsp), %ecx movl %ecx, 0x30(%rax) movl 0x24(%rsp), %ecx movl %ecx, 0x34(%rax) movl 0x2c(%rax), %ecx imull 0x30(%rax), %ecx movslq %ecx, %rdi imulq 0x10(%rax), %rdi movl $0x10, %esi callq 0x15670 movq (%rsp), %rdi xorl %ecx, %ecx movl %ecx, %edx divq 0x10(%rdi) movq %rax, 0x38(%rdi) callq 0x15d80 cmpq $0x0, %rax jbe 0x16163 movq (%rsp), %rdi callq 0x15d80 movq %rax, %rdi movq (%rsp), %rax imulq 0x10(%rax), %rdi movl $0x4, %esi callq 0x15670 movq %rax, %rcx movq (%rsp), %rax movq %rcx, 0x8(%rsp) cmpq $0x0, 0x20(%rax) je 0x16131 movq (%rsp), %rax movq 0x20(%rax), %rdi movq 0x8(%rsp), %rsi addq $0x4, %rsi movq (%rdi), %rax callq *0x10(%rax) movq %rax, %rcx movq (%rsp), %rax movq %rcx, (%rax) jmp 0x16149 movq 0x8(%rsp), %rdi addq $0x4, %rdi callq 0x156a0 movq %rax, %rcx movq (%rsp), %rax movq %rcx, (%rax) movq (%rsp), %rax movq (%rax), %rcx addq 0x8(%rsp), %rcx movq %rcx, 0x8(%rax) movq 0x8(%rax), %rax movl $0x1, (%rax) addq $0x38, %rsp retq nopl (%rax,%rax)
/tongxiaobin[P]ncnn/benchmark/../src/mat.h
ncnn::PoolAllocator::~PoolAllocator()
PoolAllocator::~PoolAllocator() { clear(); if (!payouts.empty()) { fprintf(stderr, "FATAL ERROR! pool allocator destroyed too early\n"); std::list< std::pair<size_t, void*> >::iterator it = payouts.begin(); for (; it != payouts.end(); it++) { void* ptr = it->second; fprintf(stderr, "%p still in use\n", ptr); } } }
subq $0x38, %rsp movq %rdi, 0x30(%rsp) movq 0x30(%rsp), %rdi movq %rdi, 0x8(%rsp) leaq 0xa3026(%rip), %rax # 0xb9280 movq %rax, (%rdi) callq 0x16370 jmp 0x16264 movq 0x8(%rsp), %rdi addq $0x78, %rdi callq 0x16ec0 testb $0x1, %al jne 0x16312 movq 0xa9d5f(%rip), %rax # 0xbffe0 movq (%rax), %rdi leaq 0x87fa1(%rip), %rsi # 0x9e22c movb $0x0, %al callq 0x14310 movq 0x8(%rsp), %rdi addq $0x78, %rdi callq 0x16ee0 movq %rax, 0x28(%rsp) movq 0x8(%rsp), %rdi addq $0x78, %rdi callq 0x16f30 movq %rax, 0x20(%rsp) leaq 0x28(%rsp), %rdi leaq 0x20(%rsp), %rsi callq 0x16f10 testb $0x1, %al jne 0x162cd jmp 0x16310 leaq 0x28(%rsp), %rdi callq 0x16f60 movq 0x8(%rax), %rax movq %rax, 0x18(%rsp) movq 0xa9cf9(%rip), %rax # 0xbffe0 movq (%rax), %rdi movq 0x18(%rsp), %rdx leaq 0x87f67(%rip), %rsi # 0x9e25d movb $0x0, %al callq 0x14310 leaq 0x28(%rsp), %rdi xorl %esi, %esi callq 0x16f80 movq %rax, 0x10(%rsp) jmp 0x162a5 jmp 0x16312 movq 0x8(%rsp), %rdi addq $0x78, %rdi callq 0x16fb0 movq 0x8(%rsp), %rdi addq $0x60, %rdi callq 0x16fb0 movq 0x8(%rsp), %rdi addq $0x30, %rdi callq 0x16eb0 movq 0x8(%rsp), %rdi addq $0x8, %rdi callq 0x16eb0 movq 0x8(%rsp), %rdi callq 0x16170 addq $0x38, %rsp retq movq %rax, %rdi callq 0x15e40 nopw %cs:(%rax,%rax)
/tongxiaobin[P]ncnn/src/allocator.cpp
ncnn::PoolAllocator::clear()
void PoolAllocator::clear() { budgets_lock.lock(); std::list< std::pair<size_t, void*> >::iterator it = budgets.begin(); for (; it != budgets.end(); it++) { void* ptr = it->second; ncnn::fastFree(ptr); } budgets.clear(); budgets_lock.unlock(); }
subq $0x38, %rsp movq %rdi, 0x30(%rsp) movq 0x30(%rsp), %rdi movq %rdi, 0x8(%rsp) addq $0x8, %rdi callq 0x16fc0 movq 0x8(%rsp), %rdi addq $0x60, %rdi callq 0x16ee0 movq %rax, 0x28(%rsp) movq 0x8(%rsp), %rdi addq $0x60, %rdi callq 0x16f30 movq %rax, 0x20(%rsp) leaq 0x28(%rsp), %rdi leaq 0x20(%rsp), %rsi callq 0x16f10 testb $0x1, %al jne 0x163c7 jmp 0x163f7 leaq 0x28(%rsp), %rdi callq 0x16f60 movq 0x8(%rax), %rax movq %rax, 0x18(%rsp) movq 0x18(%rsp), %rdi callq 0x16450 leaq 0x28(%rsp), %rdi xorl %esi, %esi callq 0x16f80 movq %rax, 0x10(%rsp) jmp 0x1639f movq 0x8(%rsp), %rdi addq $0x60, %rdi callq 0x16fd0 movq 0x8(%rsp), %rdi addq $0x8, %rdi callq 0x17000 addq $0x38, %rsp retq nopl (%rax,%rax)
/tongxiaobin[P]ncnn/src/allocator.cpp
ncnn::PoolAllocator::~PoolAllocator()
PoolAllocator::~PoolAllocator() { clear(); if (!payouts.empty()) { fprintf(stderr, "FATAL ERROR! pool allocator destroyed too early\n"); std::list< std::pair<size_t, void*> >::iterator it = payouts.begin(); for (; it != payouts.end(); it++) { void* ptr = it->second; fprintf(stderr, "%p still in use\n", ptr); } } }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq 0x10(%rsp), %rdi movq %rdi, 0x8(%rsp) callq 0x16240 movq 0x8(%rsp), %rdi movl $0x90, %esi callq 0x14220 addq $0x18, %rsp retq nopl (%rax)
/tongxiaobin[P]ncnn/src/allocator.cpp
ncnn::fastFree(void*)
static inline void fastFree(void* ptr) { if (ptr) { #if _MSC_VER _aligned_free(ptr); #elif _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17) free(ptr); #elif __ANDROID__ && __ANDROID_API__ < 17 free(ptr); #else unsigned char* udata = ((unsigned char**)ptr)[-1]; free(udata); #endif } }
pushq %rax movq %rdi, (%rsp) cmpq $0x0, (%rsp) je 0x16465 movq (%rsp), %rdi callq 0x142b0 popq %rax retq nopw (%rax,%rax)
/tongxiaobin[P]ncnn/src/allocator.h
ncnn::PoolAllocator::fastMalloc(unsigned long)
void* PoolAllocator::fastMalloc(size_t size) { budgets_lock.lock(); // find free budget std::list< std::pair<size_t, void*> >::iterator it = budgets.begin(); for (; it != budgets.end(); it++) { size_t bs = it->first; // size_compare_ratio ~ 100% if (bs >= size && ((bs * size_compare_ratio) >> 8) <= size) { void* ptr = it->second; budgets.erase(it); budgets_lock.unlock(); payouts_lock.lock(); payouts.push_back(std::make_pair(bs, ptr)); payouts_lock.unlock(); return ptr; } } budgets_lock.unlock(); // new void* ptr = ncnn::fastMalloc(size); payouts_lock.lock(); payouts.push_back(std::make_pair(size, ptr)); payouts_lock.unlock(); return ptr; }
subq $0x98, %rsp movq %rdi, 0x88(%rsp) movq %rsi, 0x80(%rsp) movq 0x88(%rsp), %rdi movq %rdi, 0x18(%rsp) addq $0x8, %rdi callq 0x16fc0 movq 0x18(%rsp), %rdi addq $0x60, %rdi callq 0x16ee0 movq %rax, 0x78(%rsp) movq 0x18(%rsp), %rdi addq $0x60, %rdi callq 0x16f30 movq %rax, 0x70(%rsp) leaq 0x78(%rsp), %rdi leaq 0x70(%rsp), %rsi callq 0x16f10 testb $0x1, %al jne 0x1655b jmp 0x16674 leaq 0x78(%rsp), %rdi callq 0x16f60 movq (%rax), %rax movq %rax, 0x68(%rsp) movq 0x68(%rsp), %rax cmpq 0x80(%rsp), %rax jb 0x1665c movq 0x18(%rsp), %rcx movq 0x68(%rsp), %rax movl 0x58(%rcx), %ecx imulq %rcx, %rax shrq $0x8, %rax cmpq 0x80(%rsp), %rax ja 0x1665c leaq 0x78(%rsp), %rdi callq 0x16f60 movq %rax, %rcx movq 0x18(%rsp), %rax movq 0x8(%rcx), %rcx movq %rcx, 0x60(%rsp) addq $0x60, %rax movq %rax, 0x8(%rsp) leaq 0x58(%rsp), %rdi leaq 0x78(%rsp), %rsi callq 0x17060 movq 0x8(%rsp), %rdi movq 0x58(%rsp), %rsi callq 0x17010 movq 0x18(%rsp), %rdi movq %rax, 0x50(%rsp) addq $0x8, %rdi callq 0x17000 movq 0x18(%rsp), %rdi addq $0x30, %rdi callq 0x16fc0 movq 0x18(%rsp), %rax addq $0x78, %rax movq %rax, 0x10(%rsp) leaq 0x68(%rsp), %rdi leaq 0x60(%rsp), %rsi callq 0x170c0 movq 0x10(%rsp), %rdi movq %rax, 0x40(%rsp) movq %rdx, 0x48(%rsp) leaq 0x40(%rsp), %rsi callq 0x17080 movq 0x18(%rsp), %rdi addq $0x30, %rdi callq 0x17000 movq 0x60(%rsp), %rax movq %rax, 0x90(%rsp) jmp 0x166f4 jmp 0x1665e leaq 0x78(%rsp), %rdi xorl %esi, %esi callq 0x16f80 movq %rax, 0x38(%rsp) jmp 0x16530 movq 0x18(%rsp), %rdi addq $0x8, %rdi callq 0x17000 movq 0x80(%rsp), %rdi callq 0x16710 movq 0x18(%rsp), %rdi movq %rax, 0x30(%rsp) addq $0x30, %rdi callq 0x16fc0 movq 0x18(%rsp), %rax addq $0x78, %rax movq %rax, (%rsp) leaq 0x80(%rsp), %rdi leaq 0x30(%rsp), %rsi callq 0x170c0 movq (%rsp), %rdi movq %rax, 0x20(%rsp) movq %rdx, 0x28(%rsp) leaq 0x20(%rsp), %rsi callq 0x17080 movq 0x18(%rsp), %rdi addq $0x30, %rdi callq 0x17000 movq 0x30(%rsp), %rax movq %rax, 0x90(%rsp) movq 0x90(%rsp), %rax addq $0x98, %rsp retq nopw %cs:(%rax,%rax)
/tongxiaobin[P]ncnn/src/allocator.cpp
ncnn::fastMalloc(unsigned long)
static inline void* fastMalloc(size_t size) { #if _MSC_VER return _aligned_malloc(size, MALLOC_ALIGN); #elif _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17) void* ptr = 0; if (posix_memalign(&ptr, MALLOC_ALIGN, size)) ptr = 0; return ptr; #elif __ANDROID__ && __ANDROID_API__ < 17 return memalign(MALLOC_ALIGN, size); #else unsigned char* udata = (unsigned char*)malloc(size + sizeof(void*) + MALLOC_ALIGN); if (!udata) return 0; unsigned char** adata = alignPtr((unsigned char**)udata + 1, MALLOC_ALIGN); adata[-1] = udata; return adata; #endif }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq $0x0, 0x8(%rsp) movq 0x10(%rsp), %rdx leaq 0x8(%rsp), %rdi movl $0x10, %esi callq 0x140d0 cmpl $0x0, %eax je 0x16744 movq $0x0, 0x8(%rsp) movq 0x8(%rsp), %rax addq $0x18, %rsp retq nop
/tongxiaobin[P]ncnn/src/allocator.h
ncnn::PoolAllocator::fastFree(void*)
void PoolAllocator::fastFree(void* ptr) { payouts_lock.lock(); // return to budgets std::list< std::pair<size_t, void*> >::iterator it = payouts.begin(); for (; it != payouts.end(); it++) { if (it->second == ptr) { size_t size = it->first; payouts.erase(it); payouts_lock.unlock(); budgets_lock.lock(); budgets.push_back(std::make_pair(size, ptr)); budgets_lock.unlock(); return; } } payouts_lock.unlock(); fprintf(stderr, "FATAL ERROR! pool allocator get wild %p\n", ptr); ncnn::fastFree(ptr); }
subq $0x68, %rsp movq %rdi, 0x60(%rsp) movq %rsi, 0x58(%rsp) movq 0x60(%rsp), %rdi movq %rdi, 0x10(%rsp) addq $0x30, %rdi callq 0x16fc0 movq 0x10(%rsp), %rdi addq $0x78, %rdi callq 0x16ee0 movq %rax, 0x50(%rsp) movq 0x10(%rsp), %rdi addq $0x78, %rdi callq 0x16f30 movq %rax, 0x48(%rsp) leaq 0x50(%rsp), %rdi leaq 0x48(%rsp), %rsi callq 0x16f10 testb $0x1, %al jne 0x167af jmp 0x16886 leaq 0x50(%rsp), %rdi callq 0x16f60 movq 0x8(%rax), %rax cmpq 0x58(%rsp), %rax jne 0x1686e leaq 0x50(%rsp), %rdi callq 0x16f60 movq %rax, %rcx movq 0x10(%rsp), %rax movq (%rcx), %rcx movq %rcx, 0x40(%rsp) addq $0x78, %rax movq %rax, (%rsp) leaq 0x38(%rsp), %rdi leaq 0x50(%rsp), %rsi callq 0x17060 movq (%rsp), %rdi movq 0x38(%rsp), %rsi callq 0x17010 movq 0x10(%rsp), %rdi movq %rax, 0x30(%rsp) addq $0x30, %rdi callq 0x17000 movq 0x10(%rsp), %rdi addq $0x8, %rdi callq 0x16fc0 movq 0x10(%rsp), %rax addq $0x60, %rax movq %rax, 0x8(%rsp) leaq 0x40(%rsp), %rdi leaq 0x58(%rsp), %rsi callq 0x170c0 movq 0x8(%rsp), %rdi movq %rax, 0x20(%rsp) movq %rdx, 0x28(%rsp) leaq 0x20(%rsp), %rsi callq 0x17080 movq 0x10(%rsp), %rdi addq $0x8, %rdi callq 0x17000 jmp 0x168bb jmp 0x16870 leaq 0x50(%rsp), %rdi xorl %esi, %esi callq 0x16f80 movq %rax, 0x18(%rsp) jmp 0x16784 movq 0x10(%rsp), %rdi addq $0x30, %rdi callq 0x17000 movq 0xa9745(%rip), %rax # 0xbffe0 movq (%rax), %rdi movq 0x58(%rsp), %rdx leaq 0x879e3(%rip), %rsi # 0x9e28d movb $0x0, %al callq 0x14310 movq 0x58(%rsp), %rdi callq 0x16450 addq $0x68, %rsp retq
/tongxiaobin[P]ncnn/src/allocator.cpp
ncnn::cpu_support_arm_neon()
int cpu_support_arm_neon() { #ifdef __ANDROID__ #if __aarch64__ return g_hwcaps & HWCAP_ASIMD; #else return g_hwcaps & HWCAP_NEON; #endif #elif __IOS__ #if __aarch64__ return g_hw_cputype == CPU_TYPE_ARM64; #else return g_hw_cputype == CPU_TYPE_ARM && g_hw_cpusubtype > CPU_SUBTYPE_ARM_V7; #endif #else return 0; #endif }
xorl %eax, %eax retq nopw %cs:(%rax,%rax)
/tongxiaobin[P]ncnn/src/cpu.cpp
ncnn::set_cpu_powersave(int)
int set_cpu_powersave(int powersave) { #ifdef __ANDROID__ static std::vector<int> sorted_cpuids; static int little_cluster_offset = 0; if (sorted_cpuids.empty()) { // 0 ~ g_cpucount sorted_cpuids.resize(g_cpucount); for (int i=0; i<g_cpucount; i++) { sorted_cpuids[i] = i; } // descent sort by max frequency sort_cpuid_by_max_frequency(sorted_cpuids, &little_cluster_offset); } if (little_cluster_offset == 0 && powersave != 0) { powersave = 0; fprintf(stderr, "SMP cpu powersave not supported\n"); } // prepare affinity cpuid std::vector<int> cpuids; if (powersave == 0) { cpuids = sorted_cpuids; } else if (powersave == 1) { cpuids = std::vector<int>(sorted_cpuids.begin() + little_cluster_offset, sorted_cpuids.end()); } else if (powersave == 2) { cpuids = std::vector<int>(sorted_cpuids.begin(), sorted_cpuids.begin() + little_cluster_offset); } else { fprintf(stderr, "powersave %d not supported\n", powersave); return -1; } #ifdef _OPENMP // set affinity for each thread int num_threads = cpuids.size(); omp_set_num_threads(num_threads); std::vector<int> ssarets(num_threads, 0); #pragma omp parallel for for (int i=0; i<num_threads; i++) { ssarets[i] = set_sched_affinity(cpuids); } for (int i=0; i<num_threads; i++) { if (ssarets[i] != 0) { return -1; } } #else int ssaret = set_sched_affinity(cpuids); if (ssaret != 0) { return -1; } #endif g_powersave = powersave; return 0; #elif __IOS__ // thread affinity not supported on ios return -1; #else // TODO (void) powersave; // Avoid unused parameter warning. return -1; #endif }
movl %edi, -0x4(%rsp) movl $0xffffffff, %eax # imm = 0xFFFFFFFF retq nopw (%rax,%rax)
/tongxiaobin[P]ncnn/src/cpu.cpp