name
string
code
string
asm
string
file
string
SocketInternals::Close()
bool SocketInternals::Close() { if (SocketFD != INVALID_SOCKET) { #ifdef _MSC_VER if (closesocket(SocketFD) != 0) { outStr << "Close: failed to close socket, Error: " << WSAGetLastError() <<std::endl; return false; } WSACleanup(); #else if (close(SocketFD) != 0) { outStr << "Close: failed to close socket, Error: " << errno << std::endl; return false; } #endif SocketFD = INVALID_SOCKET; } return true; }
pushq %rbp movq %rsp, %rbp subq $0x20, %rsp movq %rdi, -0x10(%rbp) movq -0x10(%rbp), %rax movq %rax, -0x18(%rbp) cmpl $-0x1, 0x8(%rax) je 0x25cd2 movq -0x18(%rbp), %rax movl 0x8(%rax), %edi callq 0x68d0 cmpl $0x0, %eax je 0x25cc7 movq -0x18(%rbp), %rax movq (%rax), %rdi leaq 0x5ad5(%rip), %rsi # 0x2b76e callq 0x6420 movq %rax, -0x20(%rbp) callq 0x6060 movq -0x20(%rbp), %rdi movl (%rax), %esi callq 0x67d0 movq %rax, %rdi movq 0x102f4(%rip), %rsi # 0x35fb0 callq 0x6490 movb $0x0, -0x1(%rbp) jmp 0x25cd6 movq -0x18(%rbp), %rax movl $0xffffffff, 0x8(%rax) # imm = 0xFFFFFFFF movb $0x1, -0x1(%rbp) movb -0x1(%rbp), %al andb $0x1, %al addq $0x20, %rsp popq %rbp retq nopw %cs:(%rax,%rax)
/jhu-cisst[P]mechatronics-software/lib/code/EthUdpPort.cpp
SocketInternals::Open(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, unsigned short)
bool SocketInternals::Open(const std::string &host, const std::string &multicast, unsigned short port) { #ifdef _MSC_VER WSADATA wsaData; int retval = WSAStartup(WINSOCKVERSION, &wsaData); if (retval != 0) { outStr << "WSAStartup failed with error code " << retval << std::endl; return false; } #endif ServerAddr.sin_family = AF_INET; ServerAddr.sin_port = htons(port); ServerAddr.sin_addr.s_addr = EthUdpPort::IP_ULong(host); // Open UDP socket SocketFD = socket(PF_INET, SOCK_DGRAM, 0); if (SocketFD == INVALID_SOCKET) { outStr << "Failed to open UDP socket" << std::endl; return false; } // Enable broadcasts #ifdef _MSC_VER BOOL broadcastEnable = TRUE; DWORD packetInfo = 1; if (setsockopt(SocketFD, SOL_SOCKET, SO_BROADCAST, reinterpret_cast<const char *>(&broadcastEnable), sizeof(broadcastEnable)) != 0) { outStr << "Open: Failed to set SOCKET broadcast option" << std::endl; return false; } if (setsockopt(SocketFD, IPPROTO_IP, IP_PKTINFO, reinterpret_cast<const char *>(&packetInfo), sizeof(packetInfo)) != 0) { outStr << "Open: Failed to set SOCKET packet info option" << std::endl; } #else int broadcastEnable = 1; int packetInfo = 1; if (setsockopt(SocketFD, SOL_SOCKET, SO_BROADCAST, &broadcastEnable, sizeof(broadcastEnable)) != 0) { outStr << "Open: Failed to set SOCKET broadcast option" << std::endl; return false; } if (setsockopt(SocketFD, IPPROTO_IP, IP_PKTINFO, &packetInfo, sizeof(packetInfo)) != 0) { outStr << "Open: Failed to set SOCKET packet info option" << std::endl; } #endif // Determine the broadcast address. For simplicity, we assume that this is a Class A, B, or C network, // as defined by the InterNIC: // Class A: first byte is 0-127, default subnet mask is 255.0.0.0 // Class B: first byte is 128-191, default subnet mask is 255.255.0.0 // Class C: first byte is 192-223, default subnet mask is 255.255.255.0 // If the first byte is greater than 223, we use the global broadcast address, 255.255.255.255 ServerAddrBroadcast.sin_family = AF_INET; ServerAddrBroadcast.sin_port = htons(port); unsigned char firstByte = static_cast<unsigned char>(ServerAddr.sin_addr.s_addr&0x000000ff); if (firstByte < 128) ServerAddrBroadcast.sin_addr.s_addr = ServerAddr.sin_addr.s_addr|0xffffff00; else if (firstByte < 192) ServerAddrBroadcast.sin_addr.s_addr = ServerAddr.sin_addr.s_addr|0xffff0000; else if (firstByte < 224) ServerAddrBroadcast.sin_addr.s_addr = ServerAddr.sin_addr.s_addr|0xff000000; else ServerAddrBroadcast.sin_addr.s_addr = 0xffffffff; // UDP Multicast address ServerAddrMulticast.sin_family = AF_INET; ServerAddrMulticast.sin_port = htons(port); ServerAddrMulticast.sin_addr.s_addr = EthUdpPort::IP_ULong(multicast); outStr << "Server IP: " << host << ", Port: " << std::dec << port << std::endl; outStr << "Broadcast IP: " << EthUdpPort::IP_String(ServerAddrBroadcast.sin_addr.s_addr) << std::endl; outStr << "Multicast IP: " << EthUdpPort::IP_String(ServerAddrMulticast.sin_addr.s_addr) << std::endl; return true; }
pushq %rbp movq %rsp, %rbp subq $0xc0, %rsp movw %cx, %ax movq %rdi, -0x10(%rbp) movq %rsi, -0x18(%rbp) movq %rdx, -0x20(%rbp) movw %ax, -0x22(%rbp) movq -0x10(%rbp), %rax movq %rax, -0x88(%rbp) movw $0x2, 0x34(%rax) movzwl -0x22(%rbp), %edi callq 0x6080 movw %ax, %cx movq -0x88(%rbp), %rax movw %cx, 0x36(%rax) movq -0x18(%rbp), %rdi callq 0x260b0 movl %eax, %ecx movq -0x88(%rbp), %rax movl %ecx, 0x38(%rax) movl $0x2, %esi xorl %edx, %edx movl %esi, %edi callq 0x6130 movl %eax, %ecx movq -0x88(%rbp), %rax movl %ecx, 0x8(%rax) cmpl $-0x1, 0x8(%rax) jne 0x25d99 movq -0x88(%rbp), %rax movq (%rax), %rdi leaq 0x594b(%rip), %rsi # 0x2b6c7 callq 0x6420 movq %rax, %rdi movq 0x10225(%rip), %rsi # 0x35fb0 callq 0x6490 movb $0x0, -0x1(%rbp) jmp 0x26091 movq -0x88(%rbp), %rax movl $0x1, -0x28(%rbp) movl $0x1, -0x2c(%rbp) movl 0x8(%rax), %edi movl $0x1, %esi movl $0x6, %edx leaq -0x28(%rbp), %rcx movl $0x4, %r8d callq 0x6810 cmpl $0x0, %eax je 0x25dfd movq -0x88(%rbp), %rax movq (%rax), %rdi leaq 0x5901(%rip), %rsi # 0x2b6e1 callq 0x6420 movq %rax, %rdi movq 0x101c1(%rip), %rsi # 0x35fb0 callq 0x6490 movb $0x0, -0x1(%rbp) jmp 0x26091 movq -0x88(%rbp), %rax movl 0x8(%rax), %edi xorl %esi, %esi movl $0x8, %edx leaq -0x2c(%rbp), %rcx movl $0x4, %r8d callq 0x6810 cmpl $0x0, %eax je 0x25e47 movq -0x88(%rbp), %rax movq (%rax), %rdi leaq 0x58da(%rip), %rsi # 0x2b70d callq 0x6420 movq %rax, %rdi movq 0x1016e(%rip), %rsi # 0x35fb0 callq 0x6490 movq -0x88(%rbp), %rax movw $0x2, 0x44(%rax) movzwl -0x22(%rbp), %edi callq 0x6080 movw %ax, %cx movq -0x88(%rbp), %rax movw %cx, 0x46(%rax) movl 0x38(%rax), %eax andl $0xff, %eax movb %al, -0x2d(%rbp) movzbl -0x2d(%rbp), %eax cmpl $0x80, %eax jge 0x25e96 movq -0x88(%rbp), %rax movl 0x38(%rax), %ecx orl $0xffffff00, %ecx # imm = 0xFFFFFF00 movl %ecx, 0x48(%rax) jmp 0x25ee8 movzbl -0x2d(%rbp), %eax cmpl $0xc0, %eax jge 0x25eb6 movq -0x88(%rbp), %rax movl 0x38(%rax), %ecx orl $0xffff0000, %ecx # imm = 0xFFFF0000 movl %ecx, 0x48(%rax) jmp 0x25ee6 movzbl -0x2d(%rbp), %eax cmpl $0xe0, %eax jge 0x25ed6 movq -0x88(%rbp), %rax movl 0x38(%rax), %ecx orl $0xff000000, %ecx # imm = 0xFF000000 movl %ecx, 0x48(%rax) jmp 0x25ee4 movq -0x88(%rbp), %rax movl $0xffffffff, 0x48(%rax) # imm = 0xFFFFFFFF jmp 0x25ee6 jmp 0x25ee8 movq -0x88(%rbp), %rax movw $0x2, 0x54(%rax) movzwl -0x22(%rbp), %edi callq 0x6080 movw %ax, %cx movq -0x88(%rbp), %rax movw %cx, 0x56(%rax) movq -0x20(%rbp), %rdi callq 0x260b0 movl %eax, %ecx movq -0x88(%rbp), %rax movl %ecx, 0x58(%rax) movq (%rax), %rdi leaq 0x5810(%rip), %rsi # 0x2b73b callq 0x6420 movq %rax, %rdi movq -0x18(%rbp), %rsi callq 0x63b0 movq %rax, %rdi leaq 0x5801(%rip), %rsi # 0x2b747 callq 0x6420 movq %rax, %rdi leaq -0x1a315(%rip), %rsi # 0xbc40 callq 0x61f0 movq %rax, %rdi movzwl -0x22(%rbp), %esi callq 0x6770 movq %rax, %rdi movq 0x10040(%rip), %rsi # 0x35fb0 callq 0x6490 movq -0x88(%rbp), %rax movq (%rax), %rdi leaq 0x57ca(%rip), %rsi # 0x2b750 callq 0x6420 movq %rax, %rcx movq -0x88(%rbp), %rax movq %rcx, -0xa0(%rbp) movl 0x48(%rax), %esi leaq -0x50(%rbp), %rdi movq %rdi, -0x98(%rbp) callq 0x260e0 movq -0xa0(%rbp), %rdi movq -0x98(%rbp), %rsi callq 0x63b0 movq %rax, -0x90(%rbp) jmp 0x25fcb movq -0x90(%rbp), %rdi movq 0xffd7(%rip), %rsi # 0x35fb0 callq 0x6490 jmp 0x25fe0 leaq -0x50(%rbp), %rdi callq 0x62d0 movq -0x88(%rbp), %rax movq (%rax), %rdi leaq 0x5765(%rip), %rsi # 0x2b75f callq 0x6420 movq %rax, %rcx movq -0x88(%rbp), %rax movq %rcx, -0xb8(%rbp) movl 0x58(%rax), %esi leaq -0x80(%rbp), %rdi movq %rdi, -0xb0(%rbp) callq 0x260e0 movq -0xb8(%rbp), %rdi movq -0xb0(%rbp), %rsi callq 0x63b0 movq %rax, -0xa8(%rbp) jmp 0x2603f movq -0xa8(%rbp), %rdi movq 0xff63(%rip), %rsi # 0x35fb0 callq 0x6490 jmp 0x26054 leaq -0x80(%rbp), %rdi callq 0x62d0 movb $0x1, -0x1(%rbp) jmp 0x26091 movq %rax, %rcx movl %edx, %eax movq %rcx, -0x58(%rbp) movl %eax, -0x5c(%rbp) leaq -0x50(%rbp), %rdi callq 0x62d0 jmp 0x2609f movq %rax, %rcx movl %edx, %eax movq %rcx, -0x58(%rbp) movl %eax, -0x5c(%rbp) leaq -0x80(%rbp), %rdi callq 0x62d0 jmp 0x2609f movb -0x1(%rbp), %al andb $0x1, %al addq $0xc0, %rsp popq %rbp retq movq -0x58(%rbp), %rdi callq 0x67e0 nopl (%rax,%rax)
/jhu-cisst[P]mechatronics-software/lib/code/EthUdpPort.cpp
EthUdpPort::IP_String[abi:cxx11](unsigned int)
std::string EthUdpPort::IP_String(uint32_t IPaddr) { char IPstr[INET_ADDRSTRLEN]; #ifdef _MSC_VER // Windows does not provide inet_ntop prior to Vista, so we use inet_ntoa. strncpy(IPstr, inet_ntoa(*reinterpret_cast<const struct in_addr *>(&IPaddr)), INET_ADDRSTRLEN); #else inet_ntop(AF_INET, reinterpret_cast<const struct in_addr *>(&IPaddr), IPstr, INET_ADDRSTRLEN); #endif return std::string(IPstr); }
pushq %rbp movq %rsp, %rbp subq $0x60, %rsp movq %rdi, -0x50(%rbp) movq %rdi, %rax movq %rax, -0x58(%rbp) movq %rdi, -0x8(%rbp) movl %esi, -0xc(%rbp) movl $0x2, %edi leaq -0xc(%rbp), %rsi leaq -0x20(%rbp), %rdx movq %rdx, -0x48(%rbp) movl $0x10, %ecx callq 0x64b0 leaq -0x21(%rbp), %rdi movq %rdi, -0x40(%rbp) callq 0x67f0 movq -0x50(%rbp), %rdi movq -0x48(%rbp), %rsi movq -0x40(%rbp), %rdx callq 0xbb90 jmp 0x26135 leaq -0x21(%rbp), %rdi callq 0x64c0 movq -0x58(%rbp), %rax addq $0x60, %rsp popq %rbp retq movq %rax, %rcx movl %edx, %eax movq %rcx, -0x30(%rbp) movl %eax, -0x34(%rbp) leaq -0x21(%rbp), %rdi callq 0x64c0 movq -0x30(%rbp), %rdi callq 0x67e0 nopw %cs:(%rax,%rax)
/jhu-cisst[P]mechatronics-software/lib/code/EthUdpPort.cpp
SocketInternals::SetMulticastIP(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
bool SocketInternals::SetMulticastIP(const std::string &multicast) { unsigned long s_addr_new = EthUdpPort::IP_ULong(multicast); unsigned long first_byte = s_addr_new & 0x000000ff; if ((first_byte >= 224) && (first_byte <= 239)) { ServerAddrBroadcast.sin_addr.s_addr = s_addr_new; return true; } return false; }
pushq %rbp movq %rsp, %rbp subq $0x30, %rsp movq %rdi, -0x10(%rbp) movq %rsi, -0x18(%rbp) movq -0x10(%rbp), %rax movq %rax, -0x30(%rbp) movq -0x18(%rbp), %rdi callq 0x260b0 movl %eax, %eax movq %rax, -0x20(%rbp) movq -0x20(%rbp), %rax andq $0xff, %rax movq %rax, -0x28(%rbp) cmpq $0xe0, -0x28(%rbp) jb 0x261ca cmpq $0xef, -0x28(%rbp) ja 0x261ca movq -0x30(%rbp), %rax movq -0x20(%rbp), %rcx movl %ecx, 0x48(%rax) movb $0x1, -0x1(%rbp) jmp 0x261ce movb $0x0, -0x1(%rbp) movb -0x1(%rbp), %al andb $0x1, %al addq $0x30, %rsp popq %rbp retq nopl (%rax)
/jhu-cisst[P]mechatronics-software/lib/code/EthUdpPort.cpp
SocketInternals::Send(unsigned char const*, unsigned long, SocketInternals::EthDestType, unsigned char)
int SocketInternals::Send(const unsigned char *bufsend, size_t msglen, EthDestType destType, unsigned char ip_offset) { int retval; if (destType == DEST_BROADCAST) { retval = sendto(SocketFD, reinterpret_cast<const char *>(bufsend), msglen, 0, reinterpret_cast<struct sockaddr *>(&ServerAddrBroadcast), sizeof(ServerAddrBroadcast)); } else if (destType == DEST_MULTICAST) { retval = sendto(SocketFD, reinterpret_cast<const char *>(bufsend), msglen, 0, reinterpret_cast<struct sockaddr *>(&ServerAddrMulticast), sizeof(ServerAddrMulticast)); } else { // Save base address (e.g., 169.254.0.100) uint32_t s_addr_saved = ServerAddr.sin_addr.s_addr; // Update IP address if needed (Firmware Rev 9+) if (ip_offset > 0) ServerAddr.sin_addr.s_addr += (ip_offset<<24); retval = sendto(SocketFD, reinterpret_cast<const char *>(bufsend), msglen, 0, reinterpret_cast<struct sockaddr *>(&ServerAddr), sizeof(ServerAddr)); // Restore base address ServerAddr.sin_addr.s_addr = s_addr_saved; } if (retval == SOCKET_ERROR) { #ifdef _MSC_VER outStr << "Send: failed to send: " << WSAGetLastError() << std::endl; #else outStr << "Send: failed to send: " << strerror(errno) << std::endl; #endif return -1; } else if (retval != static_cast<int>(msglen)) { outStr << "Send: failed to send the whole message" << std::endl; } return retval; }
pushq %rbp movq %rsp, %rbp subq $0x40, %rsp movb %r8b, %al movq %rdi, -0x10(%rbp) movq %rsi, -0x18(%rbp) movq %rdx, -0x20(%rbp) movl %ecx, -0x24(%rbp) movb %al, -0x25(%rbp) movq -0x10(%rbp), %rax movq %rax, -0x38(%rbp) cmpl $0x2, -0x24(%rbp) jne 0x26234 movq -0x38(%rbp), %r8 movl 0x8(%r8), %edi movq -0x18(%rbp), %rsi movq -0x20(%rbp), %rdx addq $0x44, %r8 xorl %ecx, %ecx movl $0x10, %r9d callq 0x6090 movl %eax, -0x2c(%rbp) jmp 0x262b7 cmpl $0x1, -0x24(%rbp) jne 0x26260 movq -0x38(%rbp), %r8 movl 0x8(%r8), %edi movq -0x18(%rbp), %rsi movq -0x20(%rbp), %rdx addq $0x54, %r8 xorl %ecx, %ecx movl $0x10, %r9d callq 0x6090 movl %eax, -0x2c(%rbp) jmp 0x262b5 movq -0x38(%rbp), %rax movl 0x38(%rax), %eax movl %eax, -0x30(%rbp) movzbl -0x25(%rbp), %eax cmpl $0x0, %eax jle 0x26284 movq -0x38(%rbp), %rax movzbl -0x25(%rbp), %ecx shll $0x18, %ecx addl 0x38(%rax), %ecx movl %ecx, 0x38(%rax) movq -0x38(%rbp), %r8 movl 0x8(%r8), %edi movq -0x18(%rbp), %rsi movq -0x20(%rbp), %rdx addq $0x34, %r8 xorl %ecx, %ecx movl $0x10, %r9d callq 0x6090 movq %rax, %rcx movq -0x38(%rbp), %rax movl %ecx, -0x2c(%rbp) movl -0x30(%rbp), %ecx movl %ecx, 0x38(%rax) jmp 0x262b7 cmpl $-0x1, -0x2c(%rbp) jne 0x26304 movq -0x38(%rbp), %rax movq (%rax), %rdi leaq 0x54ca(%rip), %rsi # 0x2b795 callq 0x6420 movq %rax, -0x40(%rbp) callq 0x6060 movl (%rax), %edi callq 0x61e0 movq -0x40(%rbp), %rdi movq %rax, %rsi callq 0x6420 movq %rax, %rdi movq 0xfcba(%rip), %rsi # 0x35fb0 callq 0x6490 movl $0xffffffff, -0x4(%rbp) # imm = 0xFFFFFFFF jmp 0x26339 movl -0x2c(%rbp), %eax movq -0x20(%rbp), %rcx cmpl %ecx, %eax je 0x26331 movq -0x38(%rbp), %rax movq (%rax), %rdi leaq 0x548f(%rip), %rsi # 0x2b7ac callq 0x6420 movq %rax, %rdi movq 0xfc84(%rip), %rsi # 0x35fb0 callq 0x6490 jmp 0x26333 movl -0x2c(%rbp), %eax movl %eax, -0x4(%rbp) movl -0x4(%rbp), %eax addq $0x40, %rsp popq %rbp retq nopw %cs:(%rax,%rax)
/jhu-cisst[P]mechatronics-software/lib/code/EthUdpPort.cpp
SocketInternals::FlushRecv()
int SocketInternals::FlushRecv(void) { unsigned char buffer[FW_QRESPONSE_SIZE]; int numFlushed = 0; // If the packet is larger than FW_QRESPONSE_SIZE, the excess bytes will be discarded. while (Recv(buffer, FW_QRESPONSE_SIZE, 0.0) > 0) numFlushed++; return numFlushed; }
pushq %rbp movq %rsp, %rbp subq $0x30, %rsp movq %rdi, -0x8(%rbp) movq -0x8(%rbp), %rax movq %rax, -0x30(%rbp) movl $0x0, -0x24(%rbp) movq -0x30(%rbp), %rdi leaq -0x20(%rbp), %rsi movl $0x14, %edx xorps %xmm0, %xmm0 callq 0x26350 cmpl $0x0, %eax jle 0x268b0 movl -0x24(%rbp), %eax addl $0x1, %eax movl %eax, -0x24(%rbp) jmp 0x2688b movl -0x24(%rbp), %eax addq $0x30, %rsp popq %rbp retq nopl (%rax)
/jhu-cisst[P]mechatronics-software/lib/code/EthUdpPort.cpp
EthUdpPort::EthUdpPort(int, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, bool, std::ostream&, bool (*)(EthBasePort&, unsigned char, std::ostream&))
EthUdpPort::EthUdpPort(int portNum, const std::string &serverIP, bool forceFwBridge, std::ostream &debugStream, EthCallbackType cb): EthBasePort(portNum, forceFwBridge, debugStream, cb), ServerIP(serverIP), MulticastIP(ETH_UDP_MULTICAST_DEFAULT_IP), UDP_port(1394) { sockPtr = new SocketInternals(debugStream); if (Init()) outStr << "Initialization done" << std::endl; else outStr << "Initialization failed" << std::endl; }
pushq %rbp movq %rsp, %rbp subq $0x80, %rsp movb %cl, %al movq %rdi, -0x8(%rbp) movl %esi, -0xc(%rbp) movq %rdx, -0x18(%rbp) andb $0x1, %al movb %al, -0x19(%rbp) movq %r8, -0x28(%rbp) movq %r9, -0x30(%rbp) movq -0x8(%rbp), %rdi movq %rdi, -0x50(%rbp) movl -0xc(%rbp), %esi movq -0x28(%rbp), %rcx movq -0x30(%rbp), %r8 movzbl -0x19(%rbp), %edx andl $0x1, %edx callq 0x1b120 movq -0x50(%rbp), %rdi leaq 0xf279(%rip), %rax # 0x35b88 addq $0x10, %rax movq %rax, (%rdi) addq $0x528, %rdi # imm = 0x528 movq %rdi, -0x48(%rbp) movq -0x18(%rbp), %rsi callq 0x6260 jmp 0x2692c movq -0x50(%rbp), %rax addq $0x548, %rax # imm = 0x548 movq %rax, -0x60(%rbp) leaq -0x3d(%rbp), %rdi movq %rdi, -0x58(%rbp) callq 0x67f0 movq -0x60(%rbp), %rdi movq -0x58(%rbp), %rdx leaq 0x4ee5(%rip), %rsi # 0x2b83b callq 0xbb90 jmp 0x2695d leaq -0x3d(%rbp), %rdi callq 0x64c0 movq -0x50(%rbp), %rax movw $0x572, 0x568(%rax) # imm = 0x572 movl $0x68, %edi callq 0x6430 movq %rax, -0x68(%rbp) jmp 0x26983 movq -0x68(%rbp), %rdi movq -0x28(%rbp), %rsi callq 0x25b50 jmp 0x26992 movq -0x50(%rbp), %rdi movq -0x68(%rbp), %rax movq %rax, 0x520(%rdi) movq (%rdi), %rax movq (%rax), %rax callq *%rax movb %al, -0x69(%rbp) jmp 0x269ae movb -0x69(%rbp), %al testb $0x1, %al jne 0x269ba jmp 0x26a3d movq -0x50(%rbp), %rax movq 0x8(%rax), %rdi leaq 0x4e7e(%rip), %rsi # 0x2b847 callq 0x6420 movq %rax, -0x78(%rbp) jmp 0x269d4 movq -0x78(%rbp), %rdi movq 0xf5d1(%rip), %rsi # 0x35fb0 callq 0x6490 jmp 0x269e6 jmp 0x26a6b movq %rax, %rcx movl %edx, %eax movq %rcx, -0x38(%rbp) movl %eax, -0x3c(%rbp) jmp 0x26a86 movq %rax, %rcx movl %edx, %eax movq %rcx, -0x38(%rbp) movl %eax, -0x3c(%rbp) leaq -0x3d(%rbp), %rdi callq 0x64c0 jmp 0x26a7d movq %rax, %rcx movl %edx, %eax movq %rcx, -0x38(%rbp) movl %eax, -0x3c(%rbp) jmp 0x26a74 movq -0x68(%rbp), %rdi movq %rax, %rcx movl %edx, %eax movq %rcx, -0x38(%rbp) movl %eax, -0x3c(%rbp) movl $0x68, %esi callq 0x6440 jmp 0x26a74 movq -0x50(%rbp), %rax movq 0x8(%rax), %rdi leaq 0x4e0f(%rip), %rsi # 0x2b85b callq 0x6420 movq %rax, -0x80(%rbp) jmp 0x26a57 movq -0x80(%rbp), %rdi movq 0xf54e(%rip), %rsi # 0x35fb0 callq 0x6490 jmp 0x26a69 jmp 0x26a6b addq $0x80, %rsp popq %rbp retq movq -0x60(%rbp), %rdi callq 0x62d0 movq -0x48(%rbp), %rdi callq 0x62d0 movq -0x50(%rbp), %rdi callq 0x1b1d0 movq -0x38(%rbp), %rdi callq 0x67e0 nopl (%rax,%rax)
/jhu-cisst[P]mechatronics-software/lib/code/EthUdpPort.cpp
roaring_bitmap_aligned_free
static void roaring_bitmap_aligned_free(void* memblock) { #ifdef _MSC_VER _aligned_free(memblock); #elif defined(__MINGW32__) || defined(__MINGW64__) __mingw_aligned_free(memblock); #else free(memblock); #endif }
jmp 0x1030
/lucaderi[P]CRoaring/src/memory.c
roaring_bitmap_lazy_or_inplace
void roaring_bitmap_lazy_or_inplace(roaring_bitmap_t *x1, const roaring_bitmap_t *x2, const bool bitsetconversion) { uint8_t result_type = 0; int length1 = x1->high_low_container.size; const int length2 = x2->high_low_container.size; if (0 == length2) return; if (0 == length1) { roaring_bitmap_overwrite(x1, x2); return; } int pos1 = 0, pos2 = 0; uint8_t type1, type2; uint16_t s1 = ra_get_key_at_index(&x1->high_low_container, (uint16_t)pos1); uint16_t s2 = ra_get_key_at_index(&x2->high_low_container, (uint16_t)pos2); while (true) { if (s1 == s2) { container_t *c1 = ra_get_container_at_index(&x1->high_low_container, (uint16_t)pos1, &type1); if (!container_is_full(c1, type1)) { if ((bitsetconversion == false) || (get_container_type(c1, type1) == BITSET_CONTAINER_TYPE)) { c1 = get_writable_copy_if_shared(c1, &type1); } else { // convert to bitset container_t *old_c1 = c1; uint8_t old_type1 = type1; c1 = container_mutable_unwrap_shared(c1, &type1); c1 = container_to_bitset(c1, type1); container_free(old_c1, old_type1); type1 = BITSET_CONTAINER_TYPE; } container_t *c2 = ra_get_container_at_index( &x2->high_low_container, (uint16_t)pos2, &type2); container_t *c = container_lazy_ior(c1, type1, c2, type2, &result_type); if (c != c1) { // in this instance a new container was created, // and we need to free the old one container_free(c1, type1); } ra_set_container_at_index(&x1->high_low_container, pos1, c, result_type); } ++pos1; ++pos2; if (pos1 == length1) break; if (pos2 == length2) break; s1 = ra_get_key_at_index(&x1->high_low_container, (uint16_t)pos1); s2 = ra_get_key_at_index(&x2->high_low_container, (uint16_t)pos2); } else if (s1 < s2) { // s1 < s2 pos1++; if (pos1 == length1) break; s1 = ra_get_key_at_index(&x1->high_low_container, (uint16_t)pos1); } else { // s1 > s2 container_t *c2 = ra_get_container_at_index(&x2->high_low_container, (uint16_t)pos2, &type2); // container_t *c2_clone = container_clone(c2, type2); c2 = get_copy_of_container(c2, &type2, is_cow(x2)); if (is_cow(x2)) { ra_set_container_at_index(&x2->high_low_container, pos2, c2, type2); } ra_insert_new_key_value_at(&x1->high_low_container, pos1, s2, c2, type2); pos1++; length1++; pos2++; if (pos2 == length2) break; s2 = ra_get_key_at_index(&x2->high_low_container, (uint16_t)pos2); } } if (pos1 == length1) { ra_append_copy_range(&x1->high_low_container, &x2->high_low_container, pos2, length2, is_cow(x2)); } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x58, %rsp movb $0x0, 0xb(%rsp) movl (%rsi), %ecx testl %ecx, %ecx je 0x5ea3 movq %rsi, %rbx movl (%rdi), %eax testl %eax, %eax je 0x5eb2 movl %edx, 0x34(%rsp) movq %rax, 0x28(%rsp) movq %rdi, 0x20(%rsp) movq 0x10(%rdi), %rax movzwl (%rax), %eax movw %ax, 0xe(%rsp) movq 0x10(%rbx), %rax movzwl (%rax), %eax movw %ax, 0xc(%rsp) movl %ecx, 0x30(%rsp) movl %ecx, %r8d negl %r8d xorl %r11d, %r11d xorl %eax, %eax movq %r8, 0x38(%rsp) movl %r11d, %ecx movq %rcx, 0x10(%rsp) leal (%r8,%r11), %r14d movq %r11, 0x48(%rsp) movslq %r11d, %rcx movq %rcx, 0x18(%rsp) leaq (,%rcx,8), %rcx movq %rcx, 0x50(%rsp) movl %eax, %r10d xorl %ebp, %ebp movq 0x10(%rsp), %rax leaq (%rax,%rbp), %r12 leaq (%r10,%rbp), %r15 movzwl 0xe(%rsp), %eax cmpw 0xc(%rsp), %ax je 0x5c4e jb 0x5ce3 movq %r10, %r13 movq 0x18(%rbx), %rax movzwl %r12w, %ecx movb (%rax,%rcx), %al movb %al, 0xa(%rsp) movq 0x8(%rbx), %rax movl %ecx, %ecx movq (%rax,%rcx,8), %rdi movzbl 0x20(%rbx), %edx andl $0x1, %edx leaq 0xa(%rsp), %rsi callq 0x198df testb $0x1, 0x20(%rbx) je 0x5bf9 movb 0xa(%rsp), %cl movq 0x8(%rbx), %rdx addq 0x50(%rsp), %rdx movq %rax, (%rdx,%rbp,8) movq 0x18(%rbx), %rdx addq 0x18(%rsp), %rdx movb %cl, (%rbp,%rdx) movzbl 0xa(%rsp), %r8d movzwl 0xc(%rsp), %edx movq 0x20(%rsp), %rdi movl %r15d, %esi movq %rax, %rcx callq 0xcbe3 leal (%r14,%rbp), %eax cmpl $-0x1, %eax je 0x5c38 movq 0x10(%rsp), %rax addq %rbp, %rax incq %rax movq 0x10(%rbx), %rcx movzwl %ax, %eax movzwl (%rcx,%rax,2), %eax movw %ax, 0xc(%rsp) incq %rbp movl %ebp, %eax addl %r14d, %eax movq %r13, %r10 jne 0x5b85 jmp 0x5e70 movq 0x20(%rsp), %r14 movq 0x18(%r14), %rax leal (%r10,%rbp), %ecx movzwl %cx, %ecx movb (%rax,%rcx), %r15b movb %r15b, 0x9(%rsp) movq 0x8(%r14), %rax movq (%rax,%rcx,8), %r13 movl %r15d, %ecx movq %r13, %rax cmpb $0x4, %r15b jne 0x5c83 movb 0x8(%r13), %cl movq (%r13), %rax cmpb $0x3, %cl je 0x5d24 movzbl %cl, %ecx cmpl $0x2, %ecx cmpl $0x10000, (%rax) # imm = 0x10000 sete %al testb %al, %al jne 0x5dfc cmpb $0x0, 0x34(%rsp) movq %r10, 0x40(%rsp) je 0x5cc4 movl %r15d, %eax cmpb $0x4, %r15b jne 0x5cbc movb 0x8(%r13), %al cmpb $0x1, %al jne 0x5d45 cmpb $0x4, %r15b jne 0x5d97 movq %r13, %rdi leaq 0x9(%rsp), %rsi callq 0x19973 movq %rax, %r13 jmp 0x5d97 leaq (%r10,%rbp), %rax incq %rax movq 0x28(%rsp), %rdi leal (%rdi,%rbp), %ecx cmpl %eax, %ecx je 0x5eda movq 0x10(%rsp), %rsi addq %rbp, %rsi movq 0x20(%rsp), %rcx movq 0x10(%rcx), %rcx movzwl %ax, %edx movzwl (%rcx,%rdx,2), %ecx movw %cx, 0xe(%rsp) movl %esi, %r11d movq 0x38(%rsp), %r8 jmp 0x5e64 cmpl $0x1, (%rax) jne 0x5ca3 movq 0x8(%rax), %rax cmpw $0x0, (%rax) jne 0x5ca3 cmpw $-0x1, 0x2(%rax) jmp 0x5c98 movq %r13, %rdi cmpb $0x4, %r15b jne 0x5d5a movb 0x8(%r13), %al movb %al, 0x9(%rsp) movq (%r13), %rdi movzbl 0x9(%rsp), %eax cmpl $0x1, %eax je 0x5d70 cmpl $0x3, %eax jne 0x5d77 callq 0x1a382 jmp 0x5d7c movq %rdi, 0x18(%rsp) jmp 0x5d81 callq 0x1a334 movq %rax, 0x18(%rsp) movzbl %r15b, %esi movq %r13, %rdi callq 0x197ac movb $0x1, 0x9(%rsp) movq 0x18(%rsp), %r13 movq 0x18(%rbx), %rax movzwl %r12w, %edx movzbl (%rax,%rdx), %ecx movb %cl, 0xa(%rsp) movq 0x8(%rbx), %rax movl %edx, %edx movq (%rax,%rdx,8), %rdx movzbl 0x9(%rsp), %esi movq %r13, %rdi leaq 0xb(%rsp), %r8 callq 0xa33b movq %rax, %r15 cmpq %r13, %rax je 0x5dd8 movzbl 0x9(%rsp), %esi movq %r13, %rdi callq 0x197ac movb 0xb(%rsp), %al movq 0x40(%rsp), %r10 movslq %r10d, %rcx leaq (,%rcx,8), %rdx addq 0x8(%r14), %rdx movq %r15, (%rdx,%rbp,8) addq 0x18(%r14), %rcx movb %al, (%rbp,%rcx) movq 0x28(%rsp), %rdi movl %edi, %ecx addq %rbp, %rcx leaq (%r10,%rbp), %rax incq %rax movq 0x10(%rsp), %rdx leaq 0x1(%rdx,%rbp), %r11 movq 0x38(%rsp), %r8 addl %r8d, %edx addl %ebp, %edx cmpl %eax, %ecx setne %sil cmpl $-0x1, %edx setne %cl andb %sil, %cl cmpb $0x1, %cl jne 0x5e60 movq 0x10(%r14), %rdx movl %eax, %esi movl $0xffff, %r9d # imm = 0xFFFF andl %r9d, %esi movzwl (%rdx,%rsi,2), %edx movw %dx, 0xe(%rsp) movq 0x10(%rbx), %rdx movl %r11d, %esi andl %r9d, %esi movzwl (%rdx,%rsi,2), %edx movw %dx, 0xc(%rsp) testb %cl, %cl je 0x5ec8 addl %ebp, %edi movq %rdi, 0x28(%rsp) jmp 0x5b5a movq 0x48(%rsp), %rdx addl %ebp, %edx addl %ebp, %r10d movq 0x28(%rsp), %rdi addl %ebp, %edi movq 0x20(%rsp), %r14 cmpl %edi, %r10d jne 0x5ea3 movzbl 0x20(%rbx), %r8d andl $0x1, %r8d movq %r14, %rdi movq %rbx, %rsi movl 0x30(%rsp), %ecx callq 0xc82e addq $0x58, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rbx, %rsi addq $0x58, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0x4561 movq 0x10(%rsp), %rax leal (%rax,%rbp), %edx incl %edx addl %ebp, %r10d incl %r10d jmp 0x5eed movq 0x10(%rsp), %rax leal (%rax,%rbp), %edx addl %ebp, %r10d incl %r10d movq 0x20(%rsp), %r14 addl %ebp, %edi jmp 0x5e86
/lucaderi[P]CRoaring/src/roaring.c
roaring_bitmap_rank
uint64_t roaring_bitmap_rank(const roaring_bitmap_t *bm, uint32_t x) { uint64_t size = 0; uint32_t xhigh = x >> 16; for (int i = 0; i < bm->high_low_container.size; i++) { uint32_t key = bm->high_low_container.keys[i]; if (xhigh > key) { size += container_get_cardinality(bm->high_low_container.containers[i], bm->high_low_container.typecodes[i]); } else if (xhigh == key) { return size + container_rank(bm->high_low_container.containers[i], bm->high_low_container.typecodes[i], x & 0xFFFF); } else { return size; } } return size; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax cmpl $0x0, (%rdi) jle 0xa752 movq %rdi, %r14 movl %esi, 0x4(%rsp) movl %esi, %ebp shrl $0x10, %ebp xorl %r12d, %r12d xorl %ebx, %ebx movq 0x10(%r14), %rax movzwl (%rax,%r12,2), %r13d cmpl %r13d, %ebp jbe 0xa68e movq 0x8(%r14), %rax movq 0x18(%r14), %rcx movq (%rax,%r12,8), %rdi movb (%rcx,%r12), %al cmpb $0x4, %al jne 0xa67c movb 0x8(%rdi), %al movq (%rdi), %rdi cmpb $0x3, %al je 0xa706 movzbl %al, %eax cmpl $0x2, %eax movl (%rdi), %eax jmp 0xa70b jne 0xa701 movq 0x8(%r14), %rax movq 0x18(%r14), %rcx movq (%rax,%r12,8), %rdi movb (%rcx,%r12), %al cmpb $0x4, %al jne 0xa6aa movb 0x8(%rdi), %al movq (%rdi), %rdi movl 0x4(%rsp), %r9d cmpb $0x3, %al je 0xa712 movzbl %al, %eax cmpl $0x2, %eax jne 0xa71d movq 0x8(%rdi), %rcx movl (%rdi), %edx decl %edx xorl %esi, %esi cmpl %edx, %esi jg 0xa728 leal (%rsi,%rdx), %r8d movl %r8d, %edi shrl %edi andl $-0x2, %r8d movzwl (%rcx,%r8), %r8d cmpw %r9w, %r8w jae 0xa6ea incl %edi movb $0x1, %r8b movl %edi, %esi jmp 0xa6fa jbe 0xa6f5 decl %edi movb $0x1, %r8b movl %edi, %edx jmp 0xa6fa xorl %r8d, %r8d movl %edi, %eax testb %r8b, %r8b jne 0xa6c5 jmp 0xa72c movq %rbx, %r15 jmp 0xa73c callq 0x1fa90 cltq addq %rax, %rbx jmp 0xa73c movzwl %r9w, %esi callq 0x20228 jmp 0xa726 movzwl %r9w, %esi callq 0x193e6 jmp 0xa736 notl %esi movl %esi, %eax leal 0x1(%rax), %ecx testl %eax, %eax notl %eax cmovnsl %ecx, %eax movslq %eax, %r15 addq %rbx, %r15 cmpl %r13d, %ebp jbe 0xa756 incq %r12 movslq (%r14), %rax cmpq %rax, %r12 jl 0xa654 jmp 0xa759 xorl %ebx, %ebx jmp 0xa759 movq %r15, %rbx movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/roaring.c
roaring_bitmap_rank_many
void roaring_bitmap_rank_many(const roaring_bitmap_t *bm, const uint32_t *begin, const uint32_t *end, uint64_t *ans) { uint64_t size = 0; int i = 0; const uint32_t *iter = begin; while (i < bm->high_low_container.size && iter != end) { uint32_t x = *iter; uint32_t xhigh = x >> 16; uint32_t key = bm->high_low_container.keys[i]; if (xhigh > key) { size += container_get_cardinality(bm->high_low_container.containers[i], bm->high_low_container.typecodes[i]); i++; } else if (xhigh == key) { uint32_t consumed = container_rank_many( bm->high_low_container.containers[i], bm->high_low_container.typecodes[i], size, iter, end, ans); iter += consumed; ans += consumed; } else { *(ans++) = size; iter++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x38, %rsp movq %rcx, %rbx cmpl $0x0, (%rdi) setle %al cmpq %rdx, %rsi sete %cl orb %al, %cl jne 0xa990 movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %r8 xorl %r12d, %r12d xorl %r9d, %r9d movq %rdi, 0x8(%rsp) movq %rdx, 0x10(%rsp) movl (%r15), %ecx movl %ecx, %edx shrl $0x10, %edx movq 0x10(%r8), %rsi movslq %r9d, %rax movzwl (%rsi,%rax,2), %esi cmpl %esi, %edx jbe 0xa7ee movq 0x8(%r8), %rcx movq 0x18(%r8), %rdx movq (%rcx,%rax,8), %rdi movb (%rdx,%rax), %al cmpb $0x4, %al jne 0xa7d9 movb 0x8(%rdi), %al movq (%rdi), %rdi cmpb $0x3, %al je 0xa930 movzbl %al, %eax cmpl $0x2, %eax movl (%rdi), %eax jmp 0xa940 jne 0xa923 movq 0x8(%r8), %rdx movq 0x18(%r8), %rsi movq (%rdx,%rax,8), %rdi movb (%rsi,%rax), %al cmpb $0x4, %al movl %r9d, 0x4(%rsp) jne 0xa812 movb 0x8(%rdi), %al movq (%rdi), %rdi cmpb $0x3, %al je 0xa94a movzbl %al, %eax cmpl $0x2, %eax jne 0xa95d movq %r15, %rdx cmpq %r14, %r15 je 0xa918 xorl %esi, %esi movq %rbx, %r8 movq %r15, %rdx movq %rbx, 0x28(%rsp) movq %r12, 0x20(%rsp) movq %rdi, 0x18(%rsp) movl (%rdx), %r11d movl %r11d, %r9d xorl %ecx, %r9d cmpl $0x10000, %r9d # imm = 0x10000 jae 0xa8b6 movq %r8, 0x30(%rsp) movl %esi, %r10d leaq (%r10,%r10), %r13 addq 0x8(%rdi), %r13 movl %esi, %r14d notl %r14d addl (%rdi), %r14d xorl %ebx, %ebx cmpl %r14d, %ebx jg 0xa8c2 leal (%rbx,%r14), %r12d movl %r12d, %ebp shrl %ebp andl $-0x2, %r12d movzwl (%r13,%r12), %r12d cmpw %r11w, %r12w jae 0xa89d incl %ebp movb $0x1, %r12b movl %ebp, %ebx jmp 0xa8af jbe 0xa8a9 decl %ebp movb $0x1, %r12b movl %ebp, %r14d jmp 0xa8af xorl %r12d, %r12d movl %ebp, %r8d testb %r12b, %r12b jne 0xa876 jmp 0xa8c7 movq %rdx, %rax subq %r15, %rax shrq $0x2, %rax jmp 0xa902 notl %ebx movl %ebx, %r8d movq 0x20(%rsp), %r12 addq %r12, %r10 leal 0x1(%r8), %r11d testl %r8d, %r8d notl %r8d cmovnsl %r11d, %r8d cmovnsl %r11d, %esi addq %r10, %r8 movq 0x30(%rsp), %rdi movq %r8, (%rdi) movq %rdi, %r8 addq $0x8, %r8 movq 0x28(%rsp), %rbx movq 0x10(%rsp), %r14 movq 0x18(%rsp), %rdi cmpl $0xffff, %r9d # imm = 0xFFFF ja 0xa96e addq $0x4, %rdx cmpq %r14, %rdx jne 0xa849 subq %r15, %rdx shrq $0x2, %rdx movl %edx, %eax jmp 0xa96e movq %r12, (%rbx) addq $0x8, %rbx addq $0x4, %r15 jmp 0xa982 movl %r9d, %ebp callq 0x1fa90 movl %ebp, %r9d movq 0x8(%rsp), %r8 cltq addq %rax, %r12 incl %r9d jmp 0xa982 movq %r12, %rsi movq %r15, %rdx movq %r14, %rcx movq %rbx, %r8 callq 0x20292 jmp 0xa96e movq %r12, %rsi movq %r15, %rdx movq %r14, %rcx movq %rbx, %r8 callq 0x194c1 movl %eax, %eax leaq (%r15,%rax,4), %r15 leaq (%rbx,%rax,8), %rbx movq 0x8(%rsp), %r8 movl 0x4(%rsp), %r9d cmpl (%r8), %r9d jge 0xa990 cmpq %r14, %r15 jne 0xa7a9 addq $0x38, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/roaring.c
roaring_bitmap_get_index
int64_t roaring_bitmap_get_index(const roaring_bitmap_t *bm, uint32_t x) { int64_t index = 0; const uint16_t xhigh = x >> 16; int32_t high_idx = ra_get_index(&bm->high_low_container, xhigh); if (high_idx < 0) return -1; for (int i = 0; i < bm->high_low_container.size; i++) { uint32_t key = bm->high_low_container.keys[i]; if (xhigh > key) { index += container_get_cardinality(bm->high_low_container.containers[i], bm->high_low_container.typecodes[i]); } else if (xhigh == key) { int32_t low_idx = container_get_index( bm->high_low_container.containers[high_idx], bm->high_low_container.typecodes[high_idx], x & 0xFFFF); if (low_idx < 0) return -1; return index + low_idx; } else { return -1; } } return index; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %rdi, %r14 movl %esi, %ebp shrl $0x10, %ebp movslq (%rdi), %rax testq %rax, %rax je 0xa9cc movq 0x10(%r14), %rcx cmpw %bp, -0x2(%rcx,%rax,2) jne 0xab11 decl %eax testl %eax, %eax js 0xab08 cmpl $0x0, (%r14) jle 0xab59 movl %esi, 0xc(%rsp) movl %eax, %eax movq %rax, 0x10(%rsp) xorl %r13d, %r13d xorl %ebx, %ebx movq 0x10(%r14), %rax movzwl (%rax,%r13,2), %r12d cmpl %r12d, %ebp jbe 0xaa2d movq 0x8(%r14), %rax movq 0x18(%r14), %rcx movq (%rax,%r13,8), %rdi movb (%rcx,%r13), %al cmpb $0x4, %al jne 0xaa18 movb 0x8(%rdi), %al movq (%rdi), %rdi cmpb $0x3, %al je 0xaaaf movzbl %al, %eax cmpl $0x2, %eax movl (%rdi), %eax jmp 0xaab4 movq $-0x1, %r15 jne 0xaaf2 movq 0x8(%r14), %rax movq 0x18(%r14), %rcx movq 0x10(%rsp), %rdx movq (%rax,%rdx,8), %rdi movb (%rcx,%rdx), %al cmpb $0x4, %al jne 0xaa58 movb 0x8(%rdi), %al movq (%rdi), %rdi movl 0xc(%rsp), %r9d cmpb $0x3, %al je 0xaabb movzbl %al, %eax cmpl $0x2, %eax jne 0xaac6 movq 0x8(%rdi), %rcx movl (%rdi), %edx decl %edx xorl %esi, %esi cmpl %edx, %esi jg 0xaad1 leal (%rsi,%rdx), %r8d movl %r8d, %edi shrl %edi andl $-0x2, %r8d movzwl (%rcx,%r8), %r8d cmpw %r9w, %r8w jae 0xaa98 incl %edi movb $0x1, %r8b movl %edi, %esi jmp 0xaaa8 jbe 0xaaa3 decl %edi movb $0x1, %r8b movl %edi, %edx jmp 0xaaa8 xorl %r8d, %r8d movl %edi, %eax testb %r8b, %r8b jne 0xaa73 jmp 0xaad5 callq 0x1fa90 cltq addq %rax, %rbx jmp 0xaaf2 movzwl %r9w, %esi callq 0x20395 jmp 0xaadf movzwl %r9w, %esi callq 0x19673 jmp 0xaadf notl %esi movl %esi, %eax testl %eax, %eax movl $0xffffffff, %ecx # imm = 0xFFFFFFFF cmovsl %ecx, %eax movl %eax, %r15d addq %rbx, %r15 testl %eax, %eax movq $-0x1, %rax cmovsq %rax, %r15 cmpl %r12d, %ebp jbe 0xab5d incq %r13 movslq (%r14), %rax cmpq %rax, %r13 jl 0xa9f0 jmp 0xab60 movq $-0x1, %rbx jmp 0xab60 decl %eax xorl %edx, %edx movl %eax, %r9d cmpl %r9d, %edx jg 0xab72 leal (%rdx,%r9), %r8d movl %r8d, %edi shrl %edi andl $-0x2, %r8d movzwl (%rcx,%r8), %r8d cmpw %bp, %r8w jae 0xab3e incl %edi movb $0x1, %r8b movl %edi, %edx jmp 0xab4f jbe 0xab4a decl %edi movb $0x1, %r8b movl %edi, %r9d jmp 0xab4f xorl %r8d, %r8d movl %edi, %eax testb %r8b, %r8b jne 0xab18 jmp 0xa9ce xorl %ebx, %ebx jmp 0xab60 movq %r15, %rbx movq %rbx, %rax addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq notl %edx movl %edx, %eax jmp 0xa9ce
/lucaderi[P]CRoaring/src/roaring.c
roaring_bitmap_minimum
uint32_t roaring_bitmap_minimum(const roaring_bitmap_t *bm) { if (bm->high_low_container.size > 0) { container_t *c = bm->high_low_container.containers[0]; uint8_t type = bm->high_low_container.typecodes[0]; uint32_t key = bm->high_low_container.keys[0]; uint32_t lowvalue = container_minimum(c, type); return lowvalue | (key << 16); } return UINT32_MAX; }
pushq %rbx cmpl $0x0, (%rdi) jle 0xabbc movq %rdi, %rax movq 0x8(%rdi), %rdx movq 0x10(%rdi), %rcx movq (%rdx), %rdi movq 0x18(%rax), %rax movb (%rax), %al cmpb $0x4, %al jne 0xab9f movb 0x8(%rdi), %al movq (%rdi), %rdi movzwl (%rcx), %ebx cmpb $0x3, %al je 0xabae movzbl %al, %eax cmpl $0x2, %eax jne 0xabc3 cmpl $0x0, (%rdi) je 0xabca movq 0x8(%rdi), %rax movzwl (%rax), %eax jmp 0xabcc movl $0xffffffff, %ebx # imm = 0xFFFFFFFF jmp 0xabd4 callq 0x19384 jmp 0xabcc xorl %eax, %eax movzwl %ax, %eax shll $0x10, %ebx orl %eax, %ebx movl %ebx, %eax popq %rbx retq
/lucaderi[P]CRoaring/src/roaring.c
roaring_bitmap_maximum
uint32_t roaring_bitmap_maximum(const roaring_bitmap_t *bm) { if (bm->high_low_container.size > 0) { container_t *container = bm->high_low_container.containers[bm->high_low_container.size - 1]; uint8_t typecode = bm->high_low_container.typecodes[bm->high_low_container.size - 1]; uint32_t key = bm->high_low_container.keys[bm->high_low_container.size - 1]; uint32_t lowvalue = container_maximum(container, typecode); return lowvalue | (key << 16); } return 0; }
pushq %rbx movl (%rdi), %ecx testl %ecx, %ecx jle 0xac24 movq %rdi, %rax decl %ecx movq 0x8(%rdi), %rsi movq 0x10(%rdi), %rdx movq (%rsi,%rcx,8), %rdi movq 0x18(%rax), %rax movb (%rax,%rcx), %al cmpb $0x4, %al jne 0xac01 movb 0x8(%rdi), %al movq (%rdi), %rdi movzwl (%rdx,%rcx,2), %ebx cmpb $0x3, %al je 0xac28 movzbl %al, %eax cmpl $0x2, %eax jne 0xac40 movslq (%rdi), %rax testq %rax, %rax je 0xac47 movq 0x8(%rdi), %rcx movzwl -0x2(%rcx,%rax,2), %eax jmp 0xac49 xorl %ebx, %ebx jmp 0xac51 movslq (%rdi), %rcx testq %rcx, %rcx je 0xac47 movq 0x8(%rdi), %rdx movzwl -0x2(%rdx,%rcx,4), %eax addw -0x4(%rdx,%rcx,4), %ax jmp 0xac49 callq 0x193b1 jmp 0xac49 xorl %eax, %eax movzwl %ax, %eax shll $0x10, %ebx orl %eax, %ebx movl %ebx, %eax popq %rbx retq
/lucaderi[P]CRoaring/src/roaring.c
roaring_bitmap_select
bool roaring_bitmap_select(const roaring_bitmap_t *bm, uint32_t rank, uint32_t *element) { container_t *container; uint8_t typecode; uint16_t key; uint32_t start_rank = 0; int i = 0; bool valid = false; while (!valid && i < bm->high_low_container.size) { container = bm->high_low_container.containers[i]; typecode = bm->high_low_container.typecodes[i]; valid = container_select(container, typecode, &start_rank, rank, element); i++; } if (valid) { key = bm->high_low_container.keys[i - 1]; *element |= (((uint32_t)key) << 16); // w/o cast, key promotes signed return true; } else return false; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x10, %rsp movq %rdx, %rbx movq %rdi, %r14 movl $0x0, 0xc(%rsp) cmpl $0x0, (%rdi) jle 0xacfb movl %esi, %ebp xorl %r12d, %r12d leaq 0xc(%rsp), %r15 movq 0x8(%r14), %rax movq 0x18(%r14), %rcx movq (%rax,%r12,8), %rdi movb (%rcx,%r12), %al cmpb $0x4, %al jne 0xac9c movb 0x8(%rdi), %al movq (%rdi), %rdi cmpb $0x3, %al je 0xacc6 movzbl %al, %eax cmpl $0x2, %eax jne 0xacd5 movl 0xc(%rsp), %eax movl (%rdi), %ecx addl %eax, %ecx cmpl %ebp, %ecx jbe 0xacf3 movq 0x8(%rdi), %rcx movl %ebp, %edx subl %eax, %edx movzwl (%rcx,%rdx,2), %eax movl %eax, (%rbx) movb $0x1, %al jmp 0xace2 movq %r15, %rsi movl %ebp, %edx movq %rbx, %rcx callq 0x201d4 jmp 0xace2 movq %r15, %rsi movl %ebp, %edx movq %rbx, %rcx callq 0x192ac incq %r12 testb %al, %al jne 0xad04 movslq (%r14), %rcx cmpq %rcx, %r12 jl 0xac82 jmp 0xad00 movl %ecx, 0xc(%rsp) xorl %eax, %eax jmp 0xace2 xorl %r12d, %r12d xorl %eax, %eax testb %al, %al je 0xad15 movq 0x10(%r14), %rcx movl %r12d, %edx movzwl -0x2(%rcx,%rdx,2), %ecx shll $0x10, %ecx orl %ecx, (%rbx) addq $0x10, %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/roaring.c
roaring_bitmap_intersect
bool roaring_bitmap_intersect(const roaring_bitmap_t *x1, const roaring_bitmap_t *x2) { const int length1 = x1->high_low_container.size, length2 = x2->high_low_container.size; uint64_t answer = 0; int pos1 = 0, pos2 = 0; while (pos1 < length1 && pos2 < length2) { const uint16_t s1 = ra_get_key_at_index(&x1->high_low_container, (uint16_t)pos1); const uint16_t s2 = ra_get_key_at_index(&x2->high_low_container, (uint16_t)pos2); if (s1 == s2) { uint8_t type1, type2; container_t *c1 = ra_get_container_at_index(&x1->high_low_container, (uint16_t)pos1, &type1); container_t *c2 = ra_get_container_at_index(&x2->high_low_container, (uint16_t)pos2, &type2); if (container_intersect(c1, type1, c2, type2)) return true; ++pos1; ++pos2; } else if (s1 < s2) { // s1 < s2 pos1 = ra_advance_until(&x1->high_low_container, s2, pos1); } else { // s1 > s2 pos2 = ra_advance_until(&x2->high_low_container, s1, pos2); } } return answer != 0; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rdi, %r14 movl (%rdi), %r10d movl (%rsi), %r11d xorl %r15d, %r15d movl $0xffff, %r12d # imm = 0xFFFF xorl %r13d, %r13d movq %rsi, (%rsp) cmpl %r10d, %r13d setl %al cmpl %r11d, %r15d setl %bpl andb %al, %bpl cmpb $0x1, %bpl jne 0xafc9 movq 0x10(%r14), %rcx movl %r13d, %eax andl %r12d, %eax movzwl (%rcx,%rax,2), %edx movq 0x10(%rsi), %rbx movl %r15d, %r8d andl %r12d, %r8d movzwl (%rbx,%r8,2), %edi cmpw %di, %dx jne 0xade6 movq 0x8(%r14), %rdx movq 0x18(%r14), %rcx movb (%rcx,%rax), %cl movq (%rdx,%rax,8), %rax cmpb $0x4, %cl jne 0xad9c movb 0x8(%rax), %cl movq (%rax), %rax movq 0x8(%rsi), %rdi movq 0x18(%rsi), %rdx movb (%rdx,%r8), %dl movq (%rdi,%r8,8), %rsi cmpb $0x4, %dl jne 0xadb7 movb 0x8(%rsi), %dl movq (%rsi), %rsi movl %r11d, %ebx movl %r10d, %r12d movzbl %cl, %ecx movzbl %dl, %edx leal (%rdx,%rcx,4), %ecx addl $-0x5, %ecx leaq 0x169e4(%rip), %rdx # 0x217b4 movslq (%rdx,%rcx,4), %rcx addq %rdx, %rcx jmpq *%rcx movq %rax, %rdi callq 0x1303a jmp 0xaf12 jae 0xae59 movl (%r14), %r8d leal 0x1(%r13), %ebx movb $0x1, %al cmpl %r8d, %ebx jge 0xaec2 movslq %ebx, %rdx cmpw %di, (%rcx,%rdx,2) jae 0xaec2 addl $0x2, %r13d movl $0x1, %r9d cmpl %r8d, %r13d jge 0xae37 movl $0x1, %edx movslq %r13d, %r9 cmpw %di, (%rcx,%r9,2) jae 0xae3e movl %edx, %r9d addl %edx, %r9d leal (%rbx,%rdx,2), %r13d movl %r9d, %edx cmpl %r8d, %r13d jl 0xae1b leal -0x1(%r8), %r13d movl %r9d, %edx movslq %r13d, %r9 cmpw %di, (%rcx,%r9,2) je 0xaebd jae 0xaf47 movl %r15d, %ecx movl %r8d, %r13d jmp 0xaf30 movl (%rsi), %r8d leal 0x1(%r15), %ecx movb $0x1, %al cmpl %r8d, %ecx jge 0xaf30 movslq %ecx, %rdi cmpw %dx, (%rbx,%rdi,2) jae 0xaf30 addl $0x2, %r15d movl $0x1, %r9d cmpl %r8d, %r15d jge 0xaea8 movl $0x1, %edi movslq %r15d, %r9 cmpw %dx, (%rbx,%r9,2) jae 0xaeaf movl %edi, %r9d addl %edi, %r9d leal (%rcx,%rdi,2), %r15d movl %r9d, %edi cmpl %r8d, %r15d jl 0xae8c leal -0x1(%r8), %r15d movl %r9d, %edi movslq %r15d, %r9 cmpw %dx, (%rbx,%r9,2) jne 0xaf40 movl %r15d, %ecx jmp 0xaf30 movl %r15d, %ecx movl %ebx, %r13d jmp 0xaf30 movq %rsi, %rdi movq %rax, %rsi jmp 0xaeef movq %rsi, %rdi movq %rax, %rsi jmp 0xaee5 movq %rsi, %rdi movq %rax, %rsi jmp 0xaf0d movq %rax, %rdi callq 0x1b8eb jmp 0xaf12 movq %rax, %rdi callq 0x1ba48 jmp 0xaf12 movq %rax, %rdi callq 0x1fadd jmp 0xaf12 movq %rax, %rdi callq 0x12331 jmp 0xaf12 movq %rax, %rdi callq 0x1afce xorb $0x1, %al movzbl %al, %ecx addl %ecx, %r15d addl %ecx, %r13d movl %r15d, %ecx movl %r12d, %r10d movl %ebx, %r11d movq (%rsp), %rsi movl $0xffff, %r12d # imm = 0xFFFF movl %ecx, %r15d testb %al, %al jne 0xad46 jmp 0xafc9 jae 0xaf84 movl %r8d, %ecx jmp 0xaf30 sarl %edx leal (%rdx,%rbx), %r8d incl %r8d cmpl %r13d, %r8d je 0xaebd addl %ebx, %edx leal (%rdx,%r13), %r9d sarl %r9d movslq %r9d, %r8 cmpw %di, (%rcx,%r8,2) je 0xafbe jb 0xaf73 movl %r9d, %r13d jmp 0xaf76 movl %r9d, %edx leal 0x1(%rdx), %r8d cmpl %r13d, %r8d jne 0xaf5b jmp 0xaebd sarl %edi leal (%rdi,%rcx), %r8d incl %r8d cmpl %r15d, %r8d je 0xaebd addl %ecx, %edi leal (%rdi,%r15), %ecx sarl %ecx movslq %ecx, %r8 cmpw %dx, (%rbx,%r8,2) je 0xaf30 jb 0xafaf movl %ecx, %r15d jmp 0xafb1 movl %ecx, %edi leal 0x1(%rdi), %ecx cmpl %r15d, %ecx jne 0xaf98 jmp 0xaebd movl %r15d, %ecx movl %r9d, %r13d jmp 0xaf30 movl %ebp, %eax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/roaring.c
roaring_bitmap_contains
inline int32_t ra_get_index(const roaring_array_t *ra, uint16_t x) { if ((ra->size == 0) || ra->keys[ra->size - 1] == x) return ra->size - 1; return binarySearch(ra->keys, (int32_t)ra->size, x); }
movslq (%rdi), %rax testq %rax, %rax je 0xb417 movl %esi, %ecx shrl $0x10, %ecx movq 0x10(%rdi), %rdx cmpw %cx, -0x2(%rdx,%rax,2) jne 0xb441 decl %eax testl %eax, %eax js 0xb43e movq 0x8(%rdi), %rcx movq 0x18(%rdi), %rdx movl $0xffff, %r8d # imm = 0xFFFF andl %r8d, %eax movzbl (%rdx,%rax), %edx movq (%rcx,%rax,8), %rdi andl %r8d, %esi jmp 0x2871 xorl %eax, %eax retq decl %eax xorl %r8d, %r8d movl %eax, %r9d cmpl %r9d, %r8d jg 0xb48c leal (%r8,%r9), %r11d movl %r11d, %r10d shrl %r10d andl $-0x2, %r11d movzwl (%rdx,%r11), %r11d cmpw %cx, %r11w jae 0xb472 incl %r10d movb $0x1, %r11b movl %r10d, %r8d jmp 0xb485 jbe 0xb47f decl %r10d movb $0x1, %r11b movl %r10d, %r9d jmp 0xb485 xorl %r11d, %r11d movl %r10d, %eax testb %r11b, %r11b jne 0xb449 jmp 0xb419 notl %r8d movl %r8d, %eax jmp 0xb419
/lucaderi[P]CRoaring/include/roaring/roaring_array.h
ra_append_copy_range
void ra_append_copy_range(roaring_array_t *ra, const roaring_array_t *sa, int32_t start_index, int32_t end_index, bool copy_on_write) { extend_array(ra, end_index - start_index); for (int32_t i = start_index; i < end_index; ++i) { const int32_t pos = ra->size; ra->keys[pos] = sa->keys[i]; if (copy_on_write) { sa->containers[i] = get_copy_of_container( sa->containers[i], &sa->typecodes[i], copy_on_write); ra->containers[pos] = sa->containers[i]; ra->typecodes[pos] = sa->typecodes[i]; } else { ra->containers[pos] = container_clone(sa->containers[i], sa->typecodes[i]); ra->typecodes[pos] = sa->typecodes[i]; } ra->size++; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movl %r8d, 0x4(%rsp) movl %ecx, %ebx movl %edx, %ebp movq %rsi, %r15 movq %rdi, %r12 movl %ecx, %esi subl %edx, %esi callq 0xc6d0 subl %ebp, %ebx movq %rbx, 0x10(%rsp) jle 0xc91e movslq %ebp, %r13 leaq (,%r13,2), %rax movq %rax, 0x8(%rsp) leaq (,%r13,8), %rbx xorl %ebp, %ebp movslq (%r12), %r14 movq 0x10(%r15), %rax addq 0x8(%rsp), %rax movzwl (%rax,%rbp,2), %eax movq 0x10(%r12), %rcx movw %ax, (%rcx,%r14,2) cmpb $0x0, 0x4(%rsp) je 0xc8d4 movq 0x8(%r15), %rax addq %rbx, %rax movq (%rax,%rbp,8), %rdi movq 0x18(%r15), %rsi addq %r13, %rsi addq %rbp, %rsi movl $0x1, %edx callq 0x198df movq 0x8(%r15), %rcx addq %rbx, %rcx movq %rax, (%rcx,%rbp,8) movq 0x8(%r15), %rax addq %rbx, %rax movq (%rax,%rbp,8), %rax jmp 0xc8f0 movq 0x8(%r15), %rax addq %rbx, %rax movq (%rax,%rbp,8), %rdi movq 0x18(%r15), %rax addq %r13, %rax movzbl (%rbp,%rax), %esi callq 0x1994f movq 0x8(%r12), %rcx movq %rax, (%rcx,%r14,8) movq 0x18(%r15), %rax addq %r13, %rax movb (%rbp,%rax), %al movq 0x18(%r12), %rcx movb %al, (%rcx,%r14) incl (%r12) incq %rbp cmpl %ebp, 0x10(%rsp) jne 0xc87b addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/roaring_array.c
ra_append_move_range
void ra_append_move_range(roaring_array_t *ra, roaring_array_t *sa, int32_t start_index, int32_t end_index) { extend_array(ra, end_index - start_index); for (int32_t i = start_index; i < end_index; ++i) { const int32_t pos = ra->size; ra->keys[pos] = sa->keys[i]; ra->containers[pos] = sa->containers[i]; ra->typecodes[pos] = sa->typecodes[i]; ra->size++; } }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movl %ecx, %ebp movl %edx, %r15d movq %rsi, %rbx movq %rdi, %r14 movl %ecx, %esi subl %edx, %esi callq 0xc6d0 cmpl %r15d, %ebp jle 0xca1a movslq %r15d, %rax movslq %ebp, %rcx movslq (%r14), %rdx movq 0x10(%rbx), %rsi movzwl (%rsi,%rax,2), %esi movq 0x10(%r14), %rdi movw %si, (%rdi,%rdx,2) movq 0x8(%rbx), %rsi movq (%rsi,%rax,8), %rsi movq 0x8(%r14), %rdi movq %rsi, (%rdi,%rdx,8) movq 0x18(%rbx), %rsi movb (%rsi,%rax), %sil movq 0x18(%r14), %rdi movb %sil, (%rdi,%rdx) incl (%r14) incq %rax cmpq %rax, %rcx jne 0xc9dc addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/roaring_array.c
ra_insert_new_key_value_at
void ra_insert_new_key_value_at(roaring_array_t *ra, int32_t i, uint16_t key, container_t *c, uint8_t typecode) { extend_array(ra, 1); // May be an optimization opportunity with DIY memmove memmove(&(ra->keys[i + 1]), &(ra->keys[i]), sizeof(uint16_t) * (ra->size - i)); memmove(&(ra->containers[i + 1]), &(ra->containers[i]), sizeof(container_t *) * (ra->size - i)); memmove(&(ra->typecodes[i + 1]), &(ra->typecodes[i]), sizeof(uint8_t) * (ra->size - i)); ra->keys[i] = key; ra->containers[i] = c; ra->typecodes[i] = typecode; ra->size++; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movl %r8d, %ebx movq %rcx, %r15 movl %edx, %ebp movl %esi, %r12d movq %rdi, %r14 movl $0x1, %esi callq 0xc6d0 movq 0x10(%r14), %rax movslq %r12d, %r12 leaq (%rax,%r12,2), %rdi addq $0x2, %rdi leaq (%rax,%r12,2), %rsi movslq (%r14), %rdx subq %r12, %rdx addq %rdx, %rdx callq 0x1190 movq 0x8(%r14), %rax leaq (%rax,%r12,8), %rdi addq $0x8, %rdi leaq (%rax,%r12,8), %rsi movslq (%r14), %rdx subq %r12, %rdx shlq $0x3, %rdx callq 0x1190 movq 0x18(%r14), %rsi leaq (%rsi,%r12), %rdi incq %rdi addq %r12, %rsi movslq (%r14), %rdx subq %r12, %rdx callq 0x1190 movq 0x10(%r14), %rax movw %bp, (%rax,%r12,2) movq 0x8(%r14), %rax movq %r15, (%rax,%r12,8) movq 0x18(%r14), %rax movb %bl, (%rax,%r12) incl (%r14) popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/roaring_array.c
union_uint16
size_t union_uint16(const uint16_t *set_1, size_t size_1, const uint16_t *set_2, size_t size_2, uint16_t *buffer) { size_t pos = 0, idx_1 = 0, idx_2 = 0; if (0 == size_2) { memmove(buffer, set_1, size_1 * sizeof(uint16_t)); return size_1; } if (0 == size_1) { memmove(buffer, set_2, size_2 * sizeof(uint16_t)); return size_2; } uint16_t val_1 = set_1[idx_1], val_2 = set_2[idx_2]; while (true) { if (val_1 < val_2) { buffer[pos++] = val_1; ++idx_1; if (idx_1 >= size_1) break; val_1 = set_1[idx_1]; } else if (val_2 < val_1) { buffer[pos++] = val_2; ++idx_2; if (idx_2 >= size_2) break; val_2 = set_2[idx_2]; } else { buffer[pos++] = val_1; ++idx_1; ++idx_2; if (idx_1 >= size_1 || idx_2 >= size_2) break; val_1 = set_1[idx_1]; val_2 = set_2[idx_2]; } } if (idx_1 < size_1) { const size_t n_elems = size_1 - idx_1; memmove(buffer + pos, set_1 + idx_1, n_elems * sizeof(uint16_t)); pos += n_elems; } else if (idx_2 < size_2) { const size_t n_elems = size_2 - idx_2; memmove(buffer + pos, set_2 + idx_2, n_elems * sizeof(uint16_t)); pos += n_elems; } return pos; }
pushq %r15 pushq %r14 pushq %rbx movq %rsi, %rbx movq %rdi, %rsi testq %rcx, %rcx je 0xec87 movq %rcx, %r14 testq %rbx, %rbx je 0xec95 movzwl (%rdx), %edi xorl %eax, %eax xorl %ecx, %ecx xorl %r15d, %r15d movzwl (%rsi,%rax,2), %r9d incq %r15 cmpw %di, %r9w jb 0xec38 jbe 0xec48 movw %di, -0x2(%r8,%r15,2) leaq 0x1(%rcx), %r10 cmpq %r14, %r10 jae 0xec69 movzwl (%rdx,%r10,2), %edi incq %r15 movq %r10, %rcx jmp 0xec14 movw %r9w, -0x2(%r8,%r15,2) incq %rax cmpq %rbx, %rax jb 0xec0c jmp 0xec6c movw %r9w, -0x2(%r8,%r15,2) incq %rax cmpq %rbx, %rax jae 0xec69 leaq 0x1(%rcx), %r9 cmpq %r14, %r9 jae 0xec69 movzwl 0x2(%rdx,%rcx,2), %edi movq %r9, %rcx jmp 0xec0c incq %rcx subq %rax, %rbx jbe 0xecac leaq (%r8,%r15,2), %rdi leaq (%rsi,%rax,2), %rsi leaq (%rbx,%rbx), %rdx callq 0x1190 addq %r15, %rbx jmp 0xecc8 leaq (%rbx,%rbx), %rdx movq %r8, %rdi callq 0x1190 jmp 0xecc8 leaq (%r14,%r14), %rax movq %r8, %rdi movq %rdx, %rsi movq %rax, %rdx callq 0x1190 movq %r14, %rbx jmp 0xecc8 subq %rcx, %r14 jbe 0xecc5 leaq (%r8,%r15,2), %rdi leaq (%rdx,%rcx,2), %rsi leaq (%r14,%r14), %rdx callq 0x1190 addq %r14, %r15 movq %r15, %rbx movq %rbx, %rax popq %rbx popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/src/array_util.c
union_uint32_card
size_t union_uint32_card(const uint32_t *set_1, size_t size_1, const uint32_t *set_2, size_t size_2) { size_t pos = 0, idx_1 = 0, idx_2 = 0; if (0 == size_2) { return size_1; } if (0 == size_1) { return size_2; } uint32_t val_1 = set_1[idx_1], val_2 = set_2[idx_2]; while (true) { if (val_1 < val_2) { ++idx_1; ++pos; if (idx_1 >= size_1) break; val_1 = set_1[idx_1]; } else if (val_2 < val_1) { ++idx_2; ++pos; if (idx_2 >= size_2) break; val_2 = set_2[idx_2]; } else { ++idx_1; ++idx_2; ++pos; if (idx_1 >= size_1 || idx_2 >= size_2) break; val_1 = set_1[idx_1]; val_2 = set_2[idx_2]; } } if (idx_1 < size_1) { const size_t n_elems = size_1 - idx_1; pos += n_elems; } else if (idx_2 < size_2) { const size_t n_elems = size_2 - idx_2; pos += n_elems; } return pos; }
movq %rsi, %rax testq %rcx, %rcx je 0xf9f6 testq %rax, %rax je 0xf9f3 pushq %rbx movl (%rdx), %r10d xorl %r8d, %r8d xorl %r9d, %r9d xorl %esi, %esi movl (%rdi,%r8,4), %r11d incq %rsi cmpl %r10d, %r11d jb 0xf9c1 jbe 0xf9cb leaq 0x1(%r9), %rbx cmpq %rcx, %rbx jae 0xf9e6 movl (%rdx,%rbx,4), %r10d incq %rsi movq %rbx, %r9 jmp 0xf9a5 incq %r8 cmpq %rax, %r8 jb 0xf99e jmp 0xf9e9 incq %r8 cmpq %rax, %r8 jae 0xf9e6 leaq 0x1(%r9), %r11 cmpq %rcx, %r11 jae 0xf9e6 movl 0x4(%rdx,%r9,4), %r10d movq %r11, %r9 jmp 0xf99e incq %r9 subq %r8, %rax jbe 0xf9f7 addq %rsi, %rax jmp 0xfa07 movq %rcx, %rax retq subq %r9, %rcx jbe 0xfa04 addq %rsi, %rcx movq %rcx, %rax jmp 0xfa07 movq %rsi, %rax popq %rbx retq
/lucaderi[P]CRoaring/src/array_util.c
fast_union_uint16
size_t fast_union_uint16(const uint16_t *set_1, size_t size_1, const uint16_t *set_2, size_t size_2, uint16_t *buffer) { #if CROARING_IS_X64 if (croaring_hardware_support() & ROARING_SUPPORTS_AVX2) { // compute union with smallest array first if (size_1 < size_2) { return union_vector16(set_1, (uint32_t)size_1, set_2, (uint32_t)size_2, buffer); } else { return union_vector16(set_2, (uint32_t)size_2, set_1, (uint32_t)size_1, buffer); } } else { // compute union with smallest array first if (size_1 < size_2) { return union_uint16(set_1, size_1, set_2, size_2, buffer); } else { return union_uint16(set_2, size_2, set_1, size_1, buffer); } } #else // compute union with smallest array first if (size_1 < size_2) { return union_uint16(set_1, size_1, set_2, size_2, buffer); } else { return union_uint16(set_2, size_2, set_1, size_1, buffer); } #endif }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r12 movq %rsi, %r15 movq %rdi, %r13 callq 0x20570 testb $0x1, %al jne 0xfa3d cmpq %r14, %r15 jae 0xfa50 movq %r13, %rdi movq %r15, %rsi movq %r12, %rdx movq %r14, %rcx jmp 0xfa5c cmpq %r14, %r15 jae 0xfa6d movq %r13, %rdi movl %r15d, %esi movq %r12, %rdx movl %r14d, %ecx jmp 0xfa79 movq %r12, %rdi movq %r14, %rsi movq %r13, %rdx movq %r15, %rcx movq %rbx, %r8 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 jmp 0xebe2 movq %r12, %rdi movl %r14d, %esi movq %r13, %rdx movl %r15d, %ecx movq %rbx, %r8 callq 0xee78 movl %eax, %eax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/src/array_util.c
avx2_memequals
CROARING_TARGET_AVX2 static inline bool _avx2_memequals(const void *s1, const void *s2, size_t n) { const uint8_t *ptr1 = (const uint8_t *)s1; const uint8_t *ptr2 = (const uint8_t *)s2; const uint8_t *end1 = ptr1 + n; const uint8_t *end8 = ptr1 + n / 8 * 8; const uint8_t *end32 = ptr1 + n / 32 * 32; while (ptr1 < end32) { __m256i r1 = _mm256_loadu_si256((const __m256i *)ptr1); __m256i r2 = _mm256_loadu_si256((const __m256i *)ptr2); int mask = _mm256_movemask_epi8(_mm256_cmpeq_epi8(r1, r2)); if ((uint32_t)mask != UINT32_MAX) { return false; } ptr1 += 32; ptr2 += 32; } while (ptr1 < end8) { uint64_t v1, v2; memcpy(&v1, ptr1, sizeof(uint64_t)); memcpy(&v2, ptr2, sizeof(uint64_t)); if (v1 != v2) { return false; } ptr1 += 8; ptr2 += 8; } while (ptr1 < end1) { if (*ptr1 != *ptr2) { return false; } ptr1++; ptr2++; } return true; }
leaq (%rdi,%rdx), %rcx movq %rdx, %rax andq $-0x8, %rax addq %rdi, %rax andq $-0x20, %rdx addq %rdi, %rdx cmpq %rdx, %rdi jae 0xfbc5 vmovdqu (%rdi), %ymm0 vpxor (%rsi), %ymm0, %ymm0 addq $0x20, %rdi addq $0x20, %rsi vptest %ymm0, %ymm0 je 0xfb90 xorl %eax, %eax vzeroupper retq movq (%rdi), %rdx addq $0x8, %rdi leaq 0x8(%rsi), %r8 cmpq (%rsi), %rdx movq %r8, %rsi jne 0xfbac cmpq %rax, %rdi jb 0xfbb2 movb $0x1, %al cmpq %rcx, %rdi jae 0xfbae incq %rdi movb -0x1(%rdi), %al cmpb (%rsi), %al sete %al jne 0xfbae incq %rsi leaq 0x1(%rdi), %rdx cmpq %rcx, %rdi movq %rdx, %rdi jb 0xfbd4 jmp 0xfbae
/lucaderi[P]CRoaring/src/array_util.c
avx512_array_container_to_uint32_array
int avx512_array_container_to_uint32_array(void *vout, const uint16_t *array, size_t cardinality, uint32_t base) { int outpos = 0; uint32_t *out = (uint32_t *)vout; size_t i = 0; for (; i + sizeof(__m256i) / sizeof(uint16_t) <= cardinality; i += sizeof(__m256i) / sizeof(uint16_t)) { __m256i vinput = _mm256_loadu_si256((const __m256i *)(array + i)); __m512i voutput = _mm512_add_epi32(_mm512_cvtepu16_epi32(vinput), _mm512_set1_epi32(base)); _mm512_storeu_si512((__m512i *)(out + outpos), voutput); outpos += sizeof(__m512i) / sizeof(uint32_t); } for (; i < cardinality; ++i) { const uint32_t val = base + array[i]; memcpy(out + outpos, &val, sizeof(uint32_t)); // should be compiled as a MOV on x64 outpos++; } return outpos; }
cmpq $0x10, %rdx jae 0xfbfc xorl %r8d, %r8d xorl %eax, %eax jmp 0xfc42 vpbroadcastd %ecx, %zmm0 xorl %eax, %eax movabsq $0x1000000000, %r9 # imm = 0x1000000000 xorl %r8d, %r8d movq %r8, %r10 vpmovzxwd (%rsi,%r8,2), %zmm1 vpaddd %zmm1, %zmm0, %zmm1 movq %rax, %r8 sarq $0x1e, %r8 vmovdqu64 %zmm1, (%rdi,%r8) leaq 0x10(%r10), %r8 addq $0x20, %r10 addq %r9, %rax cmpq %rdx, %r10 jbe 0xfc11 movl %r8d, %eax subq %r8, %rdx jbe 0xfc6a movslq %eax, %r9 leaq (%rdi,%r9,4), %rdi leaq (%rsi,%r8,2), %r8 xorl %esi, %esi movzwl (%r8,%rsi,2), %r9d addl %ecx, %r9d movl %r9d, (%rdi,%rsi,4) incq %rsi cmpq %rsi, %rdx jne 0xfc54 addl %esi, %eax vzeroupper retq nop
/lucaderi[P]CRoaring/src/array_util.c
bitset_extract_setbits_avx512
size_t bitset_extract_setbits_avx512(const uint64_t *words, size_t length, uint32_t *vout, size_t outcapacity, uint32_t base) { uint32_t *out = (uint32_t *)vout; uint32_t *initout = out; uint32_t *safeout = out + outcapacity; __m512i base_v = _mm512_set1_epi32(base); __m512i index_table = _mm512_loadu_si512(vbmi2_table); size_t i = 0; for (; (i < length) && ((out + 64) < safeout); i += 1) { uint64_t v = words[i]; __m512i vec = _mm512_maskz_compress_epi8(v, index_table); uint8_t advance = (uint8_t)roaring_hamming(v); __m512i vbase = _mm512_add_epi32(base_v, _mm512_set1_epi32((int)(i * 64))); __m512i r1 = _mm512_cvtepi8_epi32(_mm512_extracti32x4_epi32(vec, 0)); __m512i r2 = _mm512_cvtepi8_epi32(_mm512_extracti32x4_epi32(vec, 1)); __m512i r3 = _mm512_cvtepi8_epi32(_mm512_extracti32x4_epi32(vec, 2)); __m512i r4 = _mm512_cvtepi8_epi32(_mm512_extracti32x4_epi32(vec, 3)); r1 = _mm512_add_epi32(r1, vbase); r2 = _mm512_add_epi32(r2, vbase); r3 = _mm512_add_epi32(r3, vbase); r4 = _mm512_add_epi32(r4, vbase); _mm512_storeu_si512((__m512i *)out, r1); _mm512_storeu_si512((__m512i *)(out + 16), r2); _mm512_storeu_si512((__m512i *)(out + 32), r3); _mm512_storeu_si512((__m512i *)(out + 48), r4); out += advance; } base += i * 64; for (; (i < length) && (out < safeout); ++i) { uint64_t w = words[i]; while ((w != 0) && (out < safeout)) { uint64_t t = w & (~w + 1); // on x64, should compile to BLSI (careful: the // Intel compiler seems to fail) int r = roaring_trailing_zeroes(w); // on x64, should compile to TZCNT uint32_t val = r + base; memcpy(out, &val, sizeof(uint32_t)); // should be compiled as a MOV on x64 out++; w ^= t; } base += 64; } return out - initout; }
pushq %rbx leaq (%rdx,%rcx,4), %r9 testq %rsi, %rsi sete %al cmpq $0x41, %rcx setl %cl orb %al, %cl jne 0xfd3e vmovdqa64 0x133ec(%rip), %zmm0 # 0x23080 xorl %ecx, %ecx movl %r8d, %r10d movq %rdx, %r11 movq (%rdi,%rcx,8), %rax popcntq %rax, %rbx kmovq %rax, %k1 vpcompressb %zmm0, %zmm1 {%k1} {z} vpbroadcastd %r10d, %zmm2 vpmovsxbd %xmm1, %zmm3 vextracti128 $0x1, %ymm1, %xmm4 vpmovsxbd %xmm4, %zmm4 vextracti32x4 $0x2, %zmm1, %xmm5 vpmovsxbd %xmm5, %zmm5 vextracti32x4 $0x3, %zmm1, %xmm1 vpmovsxbd %xmm1, %zmm1 vpaddd %zmm3, %zmm2, %zmm3 vpaddd %zmm4, %zmm2, %zmm4 vpaddd %zmm5, %zmm2, %zmm5 vpaddd %zmm1, %zmm2, %zmm1 vmovdqu64 %zmm3, (%r11) vmovdqu64 %zmm4, 0x40(%r11) vmovdqu64 %zmm5, 0x80(%r11) vmovdqu64 %zmm1, 0xc0(%r11) leaq (%r11,%rbx,4), %rax incq %rcx cmpq %rsi, %rcx jae 0xfd43 addl $0x40, %r10d leaq (%r11,%rbx,4), %rbx addq $0x100, %rbx # imm = 0x100 movq %rax, %r11 cmpq %r9, %rbx jb 0xfc9c jmp 0xfd43 xorl %ecx, %ecx movq %rdx, %rax cmpq %rsi, %rcx jae 0xfd8c cmpq %r9, %rax jae 0xfd8c movl %ecx, %r10d shll $0x6, %r10d addl %r8d, %r10d movq (%rdi,%rcx,8), %r8 testq %r8, %r8 je 0xfd7b tzcntq %r8, %r11 addl %r10d, %r11d movl %r11d, (%rax) addq $0x4, %rax blsrq %r8, %r8 je 0xfd7b cmpq %r9, %rax jb 0xfd60 incq %rcx cmpq %rsi, %rcx jae 0xfd8c addl $0x40, %r10d cmpq %r9, %rax jb 0xfd57 subq %rdx, %rax sarq $0x2, %rax popq %rbx vzeroupper retq
/lucaderi[P]CRoaring/src/bitset_util.c
bitset_extract_setbits_avx2
CROARING_TARGET_AVX2 size_t bitset_extract_setbits_avx2(const uint64_t *words, size_t length, uint32_t *out, size_t outcapacity, uint32_t base) { uint32_t *initout = out; __m256i baseVec = _mm256_set1_epi32(base - 1); __m256i incVec = _mm256_set1_epi32(64); __m256i add8 = _mm256_set1_epi32(8); uint32_t *safeout = out + outcapacity; size_t i = 0; for (; (i < length) && (out + 64 <= safeout); ++i) { uint64_t w = words[i]; if (w == 0) { baseVec = _mm256_add_epi32(baseVec, incVec); } else { for (int k = 0; k < 4; ++k) { uint8_t byteA = (uint8_t)w; uint8_t byteB = (uint8_t)(w >> 8); w >>= 16; __m256i vecA = _mm256_loadu_si256((const __m256i *)vecDecodeTable[byteA]); __m256i vecB = _mm256_loadu_si256((const __m256i *)vecDecodeTable[byteB]); uint8_t advanceA = lengthTable[byteA]; uint8_t advanceB = lengthTable[byteB]; vecA = _mm256_add_epi32(baseVec, vecA); baseVec = _mm256_add_epi32(baseVec, add8); vecB = _mm256_add_epi32(baseVec, vecB); baseVec = _mm256_add_epi32(baseVec, add8); _mm256_storeu_si256((__m256i *)out, vecA); out += advanceA; _mm256_storeu_si256((__m256i *)out, vecB); out += advanceB; } } } base += i * 64; for (; (i < length) && (out < safeout); ++i) { uint64_t w = words[i]; while ((w != 0) && (out < safeout)) { uint64_t t = w & (~w + 1); // on x64, should compile to BLSI (careful: the // Intel compiler seems to fail) int r = roaring_trailing_zeroes(w); // on x64, should compile to TZCNT uint32_t val = r + base; memcpy(out, &val, sizeof(uint32_t)); // should be compiled as a MOV on x64 out++; w ^= t; } base += 64; } return out - initout; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx leaq (%rdx,%rcx,4), %r9 testq %rsi, %rsi sete %al cmpq $0x40, %rcx setl %cl orb %al, %cl jne 0xff58 leal -0x1(%r8), %eax vmovd %eax, %xmm0 vpbroadcastd %xmm0, %ymm0 vpbroadcastd 0x1314a(%rip), %ymm1 # 0x23008 xorl %r15d, %r15d leaq 0x131f8(%rip), %r10 # 0x230c0 vpbroadcastd 0x1312f(%rip), %ymm2 # 0x23000 leaq 0x151e8(%rip), %r11 # 0x250c0 vpbroadcastd 0x13123(%rip), %ymm3 # 0x23004 movq %rdx, %rax movq (%rdi,%r15,8), %rcx testq %rcx, %rcx je 0xff3e movl $0x4, %ebx movq %rcx, %r14 shrq $0x10, %r14 movzbl %cl, %r12d movzbl (%r12,%r11), %r13d shll $0x5, %r12d movzbl %ch, %ecx movzbl (%rcx,%r11), %ebp shll $0x5, %ecx vpaddd (%r12,%r10), %ymm0, %ymm4 vpaddd (%rcx,%r10), %ymm0, %ymm5 vpaddd %ymm2, %ymm5, %ymm5 vpaddd %ymm3, %ymm0, %ymm0 vmovdqu %ymm4, (%rax) leaq (%rax,%r13,4), %rax vmovdqu %ymm5, (%rax) leaq (%rax,%rbp,4), %rax movq %r14, %rcx decl %ebx jne 0xfef5 jmp 0xff42 vpaddd %ymm1, %ymm0, %ymm0 incq %r15 cmpq %rsi, %r15 jae 0xff5e leaq 0x100(%rax), %rcx cmpq %r9, %rcx jbe 0xfee4 jmp 0xff5e xorl %r15d, %r15d movq %rdx, %rax cmpq %rsi, %r15 jae 0xffa7 cmpq %r9, %rax jae 0xffa7 movl %r15d, %r10d shll $0x6, %r10d addl %r8d, %r10d movq (%rdi,%r15,8), %rcx testq %rcx, %rcx je 0xff96 tzcntq %rcx, %r8 addl %r10d, %r8d movl %r8d, (%rax) addq $0x4, %rax blsrq %rcx, %rcx je 0xff96 cmpq %r9, %rax jb 0xff7b incq %r15 cmpq %rsi, %r15 jae 0xffa7 addl $0x40, %r10d cmpq %r9, %rax jb 0xff72 subq %rdx, %rax sarq $0x2, %rax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp vzeroupper retq
/lucaderi[P]CRoaring/src/bitset_util.c
bitset_flip_list
void bitset_flip_list(uint64_t *words, const uint16_t *list, uint64_t length) { uint64_t offset, load, newload, pos, index; const uint16_t *end = list + length; while (list != end) { pos = *list; offset = pos >> 6; index = pos % 64; load = words[offset]; newload = load ^ (UINT64_C(1) << index); words[offset] = newload; list++; } }
testq %rdx, %rdx je 0x10431 addq %rdx, %rdx xorl %eax, %eax movzwl (%rsi,%rax), %ecx movl %ecx, %r8d movl $0x1, %r9d shlq %cl, %r9 shrl $0x6, %r8d xorq %r9, (%rdi,%r8,8) addq $0x2, %rax cmpq %rax, %rdx jne 0x10410 retq nop
/lucaderi[P]CRoaring/src/bitset_util.c
bitset_print
inline void bitset_print(const bitset_t *b) { printf("{"); for (size_t i = 0; bitset_next_set_bit(b, &i); i++) { printf("%zu, ", i); } printf("}"); }
pushq %r15 pushq %r14 pushq %rbx movq %rdi, %rbx movl $0x7b, %edi callq 0x1040 leaq 0x15da3(%rip), %r14 # 0x261f0 xorl %r15d, %r15d movq %r15, %rax shrq $0x6, %rax movq 0x8(%rbx), %rdx cmpq %rdx, %rax jae 0x104a8 movq (%rbx), %rsi movq (%rsi,%rax,8), %rdi movl %r15d, %ecx shrq %cl, %rdi testq %rdi, %rdi je 0x1047b bsfq %rdi, %rax addq %rax, %r15 jmp 0x104a4 movq %rax, %rcx shlq $0x6, %rcx negq %rcx incq %rax cmpq %rax, %rdx je 0x104a8 movq (%rsi,%rax,8), %rdi addq $-0x40, %rcx incq %rax testq %rdi, %rdi je 0x10488 bsfq %rdi, %r15 subq %rcx, %r15 movb $0x1, %al jmp 0x104aa xorl %eax, %eax testb %al, %al je 0x104c0 movq %r14, %rdi movq %r15, %rsi xorl %eax, %eax callq 0x10d0 incq %r15 jmp 0x10450 movl $0x7d, %edi popq %rbx popq %r14 popq %r15 jmp 0x1040
/lucaderi[P]CRoaring/include/roaring/bitset/bitset.h
bitset_set_to_value
inline void bitset_set_to_value(bitset_t *bitset, size_t i, bool flag) { size_t shiftedi = i / 64; uint64_t mask = ((uint64_t)1) << (i % 64); uint64_t dynmask = ((uint64_t)flag) << (i % 64); if (shiftedi >= bitset->arraysize) { if (!bitset_grow(bitset, shiftedi + 1)) { return; } } uint64_t w = bitset->array[shiftedi]; w &= ~mask; w |= dynmask; bitset->array[shiftedi] = w; }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movq %rsi, %rcx movq %rdi, %rbx movl $0x1, %r14d shlq %cl, %r14 movl %edx, %r15d shlq %cl, %r15 movq %rsi, %r12 shrq $0x6, %r12 cmpq 0x8(%rdi), %r12 jb 0x1062e leaq 0x1(%r12), %rsi movq %rbx, %rdi callq 0x10e15 testb %al, %al je 0x1063f movq (%rbx), %rax notq %r14 andq (%rax,%r12,8), %r14 orq %r15, %r14 movq %r14, (%rax,%r12,8) addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/include/roaring/bitset/bitset.h
bitset_create
bitset_t *bitset_create(void) { bitset_t *bitset = NULL; /* Allocate the bitset itself. */ if ((bitset = (bitset_t *)roaring_malloc(sizeof(bitset_t))) == NULL) { return NULL; } bitset->array = NULL; bitset->arraysize = 0; bitset->capacity = 0; return bitset; }
pushq %rax movl $0x18, %edi callq 0x2369 testq %rax, %rax je 0x1073c xorps %xmm0, %xmm0 movups %xmm0, (%rax) movq $0x0, 0x10(%rax) popq %rcx retq xorl %eax, %eax popq %rcx retq
/lucaderi[P]CRoaring/src/bitset.c
bitset_resize
bool bitset_resize(bitset_t *bitset, size_t newarraysize, bool padwithzeroes) { if (newarraysize > SIZE_MAX / 64) { return false; } size_t smallest = newarraysize < bitset->arraysize ? newarraysize : bitset->arraysize; if (bitset->capacity < newarraysize) { uint64_t *newarray; size_t newcapacity = bitset->capacity; if (newcapacity == 0) { newcapacity = 1; } while (newcapacity < newarraysize) { newcapacity *= 2; } if ((newarray = (uint64_t *)roaring_realloc( bitset->array, sizeof(uint64_t) * newcapacity)) == NULL) { return false; } bitset->capacity = newcapacity; bitset->array = newarray; } if (padwithzeroes && (newarraysize > smallest)) memset(bitset->array + smallest, 0, sizeof(uint64_t) * (newarraysize - smallest)); bitset->arraysize = newarraysize; return true; // success! }
movq %rsi, %rax shrq $0x3a, %rax je 0x1093f xorl %eax, %eax retq pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movl %edx, %ebp movq %rsi, %rbx movq %rdi, %r14 movq 0x8(%rdi), %r12 movq 0x10(%rdi), %rax cmpq %rsi, %r12 movq %rsi, %r15 cmovbq %r12, %r15 cmpq %rsi, %rax jae 0x10998 cmpq $0x1, %rax adcq $0x0, %rax movq %rax, %r13 addq %rax, %rax cmpq %rbx, %r13 jb 0x10971 movq (%r14), %rdi leaq (,%r13,8), %rsi callq 0x236f testq %rax, %rax je 0x109d6 movq %r13, 0x10(%r14) movq %rax, (%r14) cmpq %rbx, %r12 setb %al andb %bpl, %al cmpb $0x1, %al jne 0x109c1 leaq (,%r15,8), %rdi addq (%r14), %rdi movq %rbx, %rdx subq %r15, %rdx shlq $0x3, %rdx xorl %esi, %esi callq 0x1100 movq %rbx, 0x8(%r14) movb $0x1, %al addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq xorl %eax, %eax jmp 0x109c7
/lucaderi[P]CRoaring/src/bitset.c
bitset_contains_all
bool bitset_contains_all(const bitset_t *CBITSET_RESTRICT b1, const bitset_t *CBITSET_RESTRICT b2) { size_t min_size = b1->arraysize; if (b1->arraysize > b2->arraysize) { min_size = b2->arraysize; } for (size_t k = 0; k < min_size; k++) { if ((b1->array[k] & b2->array[k]) != b2->array[k]) { return false; } } if (b2->arraysize > b1->arraysize) { /* Need to check if b2 has any bits set beyond b1's array */ return !any_bits_set(b2, b1->arraysize); } return true; }
movq 0x8(%rdi), %rdx movq 0x8(%rsi), %rcx cmpq %rcx, %rdx movq %rcx, %rax cmovbq %rdx, %rax testq %rax, %rax je 0x10fa2 movq (%rdi), %rdi movq (%rsi), %r8 xorl %r9d, %r9d movq (%rdi,%r9,8), %r10 notq %r10 testq %r10, (%r8,%r9,8) jne 0x10fb3 incq %r9 cmpq %r9, %rax jne 0x10f8d movb $0x1, %al cmpq %rdx, %rcx jbe 0x10fb5 movq (%rsi), %rax cmpq $0x0, (%rax,%rdx,8) je 0x10fb6 xorl %eax, %eax retq incq %rdx movq %rdx, %rsi cmpq %rdx, %rcx je 0x10fcc leaq 0x1(%rsi), %rdx cmpq $0x0, (%rax,%rsi,8) je 0x10fb9 cmpq %rcx, %rsi setae %al retq
/lucaderi[P]CRoaring/src/bitset.c
bitset_union_count
size_t bitset_union_count(const bitset_t *CBITSET_RESTRICT b1, const bitset_t *CBITSET_RESTRICT b2) { size_t answer = 0; size_t minlength = b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize; size_t k = 0; for (; k + 3 < minlength; k += 4) { answer += roaring_hamming(b1->array[k] | b2->array[k]); answer += roaring_hamming(b1->array[k + 1] | b2->array[k + 1]); answer += roaring_hamming(b1->array[k + 2] | b2->array[k + 2]); answer += roaring_hamming(b1->array[k + 3] | b2->array[k + 3]); } for (; k < minlength; ++k) { answer += roaring_hamming(b1->array[k] | b2->array[k]); } if (b2->arraysize > b1->arraysize) { // k is equal to b1->arraysize for (; k + 3 < b2->arraysize; k += 4) { answer += roaring_hamming(b2->array[k]); answer += roaring_hamming(b2->array[k + 1]); answer += roaring_hamming(b2->array[k + 2]); answer += roaring_hamming(b2->array[k + 3]); } for (; k < b2->arraysize; ++k) { answer += roaring_hamming(b2->array[k]); } } else { // k is equal to b2->arraysize for (; k + 3 < b1->arraysize; k += 4) { answer += roaring_hamming(b1->array[k]); answer += roaring_hamming(b1->array[k + 1]); answer += roaring_hamming(b1->array[k + 2]); answer += roaring_hamming(b1->array[k + 3]); } for (; k < b1->arraysize; ++k) { answer += roaring_hamming(b1->array[k]); } } return answer; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq 0x8(%rdi), %rcx movq 0x8(%rsi), %rdx cmpq %rdx, %rcx movq %rdx, %r9 cmovbq %rcx, %r9 cmpq $0x4, %r9 jb 0x110de movq (%rdi), %r10 movq (%rsi), %r11 xorl %ebx, %ebx movdqa 0x151b7(%rip), %xmm0 # 0x261c0 movdqa 0x151bf(%rip), %xmm1 # 0x261d0 movdqa 0x151c7(%rip), %xmm2 # 0x261e0 pxor %xmm3, %xmm3 xorl %eax, %eax movdqu (%r10,%rbx,8), %xmm5 movdqu 0x10(%r10,%rbx,8), %xmm6 movdqu (%r11,%rbx,8), %xmm4 por %xmm5, %xmm4 movdqu 0x10(%r11,%rbx,8), %xmm5 por %xmm6, %xmm5 movdqa %xmm5, %xmm6 psrlw $0x1, %xmm6 pand %xmm0, %xmm6 psubb %xmm6, %xmm5 movdqa %xmm5, %xmm6 pand %xmm1, %xmm6 psrlw $0x2, %xmm5 pand %xmm1, %xmm5 paddb %xmm6, %xmm5 movdqa %xmm5, %xmm6 psrlw $0x4, %xmm6 paddb %xmm5, %xmm6 pand %xmm2, %xmm6 movdqa %xmm4, %xmm5 psrlw $0x1, %xmm5 pand %xmm0, %xmm5 psubb %xmm5, %xmm4 movdqa %xmm4, %xmm5 pand %xmm1, %xmm5 psrlw $0x2, %xmm4 pand %xmm1, %xmm4 paddb %xmm5, %xmm4 movdqa %xmm4, %xmm5 psrlw $0x4, %xmm5 paddb %xmm4, %xmm5 pand %xmm2, %xmm5 paddb %xmm6, %xmm5 psadbw %xmm3, %xmm5 pshufd $0xee, %xmm5, %xmm4 # xmm4 = xmm5[2,3,2,3] paddq %xmm5, %xmm4 movq %xmm4, %r8 addq %r8, %rax leaq 0x4(%rbx), %r8 addq $0x7, %rbx cmpq %r9, %rbx movq %r8, %rbx jb 0x1101f jmp 0x110e3 xorl %eax, %eax xorl %r8d, %r8d cmpq %r9, %r8 jae 0x1115d movq (%rdi), %r10 movq (%rsi), %r11 movabsq $0x5555555555555555, %rbx # imm = 0x5555555555555555 movabsq $0x3333333333333333, %r14 # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %r15 # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %r12 # imm = 0x101010101010101 movq (%r11,%r8,8), %r13 orq (%r10,%r8,8), %r13 movq %r13, %rbp shrq %rbp andq %rbx, %rbp subq %rbp, %r13 movq %r13, %rbp andq %r14, %rbp shrq $0x2, %r13 andq %r14, %r13 addq %rbp, %r13 movq %r13, %rbp shrq $0x4, %rbp addq %r13, %rbp andq %r15, %rbp imulq %r12, %rbp shrq $0x38, %rbp addq %rbp, %rax incq %r8 cmpq %r8, %r9 jne 0x11116 movq %r9, %r8 leaq 0x3(%r8), %r9 cmpq %rdx, %rcx jae 0x1123f cmpq %rdx, %r9 jae 0x11311 movq (%rsi), %rcx movdqa 0x15042(%rip), %xmm0 # 0x261c0 movdqa 0x1504a(%rip), %xmm1 # 0x261d0 movdqa 0x15052(%rip), %xmm2 # 0x261e0 pxor %xmm3, %xmm3 movdqu (%rcx,%r8,8), %xmm4 movdqu 0x10(%rcx,%r8,8), %xmm5 movdqa %xmm5, %xmm6 psrlw $0x1, %xmm6 pand %xmm0, %xmm6 psubb %xmm6, %xmm5 movdqa %xmm5, %xmm6 pand %xmm1, %xmm6 psrlw $0x2, %xmm5 pand %xmm1, %xmm5 paddb %xmm6, %xmm5 movdqa %xmm5, %xmm6 psrlw $0x4, %xmm6 paddb %xmm5, %xmm6 pand %xmm2, %xmm6 movdqa %xmm4, %xmm5 psrlw $0x1, %xmm5 pand %xmm0, %xmm5 psubb %xmm5, %xmm4 movdqa %xmm4, %xmm5 pand %xmm1, %xmm5 psrlw $0x2, %xmm4 pand %xmm1, %xmm4 paddb %xmm5, %xmm4 movdqa %xmm4, %xmm5 psrlw $0x4, %xmm5 paddb %xmm4, %xmm5 pand %xmm2, %xmm5 paddb %xmm6, %xmm5 psadbw %xmm3, %xmm5 pshufd $0xee, %xmm5, %xmm4 # xmm4 = xmm5[2,3,2,3] paddq %xmm5, %xmm4 movq %xmm4, %rdi addq %rdi, %rax leaq 0x4(%r8), %rdi addq $0x7, %r8 cmpq %rdx, %r8 movq %rdi, %r8 jb 0x11192 jmp 0x11314 cmpq %rcx, %r9 jae 0x1138a movq (%rdi), %rdx movdqa 0x14f6d(%rip), %xmm0 # 0x261c0 movdqa 0x14f75(%rip), %xmm1 # 0x261d0 movdqa 0x14f7d(%rip), %xmm2 # 0x261e0 pxor %xmm3, %xmm3 movdqu (%rdx,%r8,8), %xmm4 movdqu 0x10(%rdx,%r8,8), %xmm5 movdqa %xmm5, %xmm6 psrlw $0x1, %xmm6 pand %xmm0, %xmm6 psubb %xmm6, %xmm5 movdqa %xmm5, %xmm6 pand %xmm1, %xmm6 psrlw $0x2, %xmm5 pand %xmm1, %xmm5 paddb %xmm6, %xmm5 movdqa %xmm5, %xmm6 psrlw $0x4, %xmm6 paddb %xmm5, %xmm6 pand %xmm2, %xmm6 movdqa %xmm4, %xmm5 psrlw $0x1, %xmm5 pand %xmm0, %xmm5 psubb %xmm5, %xmm4 movdqa %xmm4, %xmm5 pand %xmm1, %xmm5 psrlw $0x2, %xmm4 pand %xmm1, %xmm4 paddb %xmm5, %xmm4 movdqa %xmm4, %xmm5 psrlw $0x4, %xmm5 paddb %xmm4, %xmm5 pand %xmm2, %xmm5 paddb %xmm6, %xmm5 psadbw %xmm3, %xmm5 pshufd $0xee, %xmm5, %xmm4 # xmm4 = xmm5[2,3,2,3] paddq %xmm5, %xmm4 movq %xmm4, %rsi addq %rsi, %rax leaq 0x4(%r8), %rsi addq $0x7, %r8 cmpq %rcx, %r8 movq %rsi, %r8 jb 0x11267 jmp 0x1138d movq %r8, %rdi cmpq %rdx, %rdi jae 0x113fd movq (%rsi), %rcx movabsq $0x5555555555555555, %rsi # imm = 0x5555555555555555 movabsq $0x3333333333333333, %r8 # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %r9 # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %r10 # imm = 0x101010101010101 movq (%rcx,%rdi,8), %r11 movq %r11, %rbx shrq %rbx andq %rsi, %rbx subq %rbx, %r11 movq %r11, %rbx andq %r8, %rbx shrq $0x2, %r11 andq %r8, %r11 addq %rbx, %r11 movq %r11, %rbx shrq $0x4, %rbx addq %r11, %rbx andq %r9, %rbx imulq %r10, %rbx shrq $0x38, %rbx addq %rbx, %rax incq %rdi cmpq %rdi, %rdx jne 0x11348 jmp 0x113fd movq %r8, %rsi cmpq %rcx, %rsi jae 0x113fd movq (%rdi), %rdx movabsq $0x5555555555555555, %rdi # imm = 0x5555555555555555 movabsq $0x3333333333333333, %r8 # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %r9 # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %r10 # imm = 0x101010101010101 movq (%rdx,%rsi,8), %r11 movq %r11, %rbx shrq %rbx andq %rdi, %rbx subq %rbx, %r11 movq %r11, %rbx andq %r8, %rbx shrq $0x2, %r11 andq %r8, %r11 addq %rbx, %r11 movq %r11, %rbx shrq $0x4, %rbx addq %r11, %rbx andq %r9, %rbx imulq %r10, %rbx shrq $0x38, %rbx addq %rbx, %rax incq %rsi cmpq %rsi, %rcx jne 0x113bd popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/bitset.c
bitset_inplace_intersection
void bitset_inplace_intersection(bitset_t *CBITSET_RESTRICT b1, const bitset_t *CBITSET_RESTRICT b2) { size_t minlength = b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize; size_t k = 0; for (; k < minlength; ++k) { b1->array[k] &= b2->array[k]; } for (; k < b1->arraysize; ++k) { b1->array[k] = 0; // memset could, maybe, be a tiny bit faster } }
movq 0x8(%rdi), %rdx movq 0x8(%rsi), %rcx cmpq %rcx, %rdx cmovbq %rdx, %rcx testq %rcx, %rcx je 0x11436 movq (%rsi), %rsi movq (%rdi), %r8 xorl %eax, %eax movq (%rsi,%rax,8), %r9 andq %r9, (%r8,%rax,8) incq %rax cmpq %rax, %rcx jne 0x11424 jmp 0x11438 xorl %eax, %eax subq %rax, %rdx jbe 0x11452 shlq $0x3, %rax addq (%rdi), %rax shlq $0x3, %rdx movq %rax, %rdi xorl %esi, %esi jmp 0x1100 retq
/lucaderi[P]CRoaring/src/bitset.c
bitset_inplace_symmetric_difference
bool bitset_inplace_symmetric_difference(bitset_t *CBITSET_RESTRICT b1, const bitset_t *CBITSET_RESTRICT b2) { size_t minlength = b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize; size_t k = 0; for (; k < minlength; ++k) { b1->array[k] ^= b2->array[k]; } if (b2->arraysize > b1->arraysize) { size_t oldsize = b1->arraysize; if (!bitset_resize(b1, b2->arraysize, false)) return false; memcpy(b1->array + oldsize, b2->array + oldsize, (b2->arraysize - oldsize) * sizeof(uint64_t)); } return true; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movq %rsi, %r14 movq %rdi, %r15 movq 0x8(%rdi), %rbx movq 0x8(%rsi), %rsi cmpq %rsi, %rbx movq %rsi, %rax cmovbq %rbx, %rax testq %rax, %rax je 0x1163f movq (%r14), %rcx movq (%r15), %rdx xorl %edi, %edi movq (%rcx,%rdi,8), %r8 xorq %r8, (%rdx,%rdi,8) incq %rdi cmpq %rdi, %rax jne 0x1162f movb $0x1, %bpl movq %rsi, %r12 subq %rbx, %r12 jbe 0x11679 xorl %ebp, %ebp movq %r15, %rdi xorl %edx, %edx callq 0x10933 testb %al, %al je 0x11679 shlq $0x3, %rbx movq (%r15), %rdi addq %rbx, %rdi addq (%r14), %rbx shlq $0x3, %r12 movq %rbx, %rsi movq %r12, %rdx callq 0x1140 movb $0x1, %bpl movl %ebp, %eax popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/bitset.c
bitset_symmetric_difference_count
size_t bitset_symmetric_difference_count(const bitset_t *CBITSET_RESTRICT b1, const bitset_t *CBITSET_RESTRICT b2) { size_t minlength = b1->arraysize < b2->arraysize ? b1->arraysize : b2->arraysize; size_t k = 0; size_t answer = 0; for (; k < minlength; ++k) { answer += roaring_hamming(b1->array[k] ^ b2->array[k]); } if (b2->arraysize > b1->arraysize) { for (; k < b2->arraysize; ++k) { answer += roaring_hamming(b2->array[k]); } } else { for (; k < b1->arraysize; ++k) { answer += roaring_hamming(b1->array[k]); } } return answer; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq 0x8(%rdi), %rdx movq 0x8(%rsi), %r8 cmpq %r8, %rdx movq %r8, %rcx cmovbq %rdx, %rcx testq %rcx, %rcx je 0x1171e movq (%rdi), %r9 movq (%rsi), %r10 xorl %eax, %eax movabsq $0x5555555555555555, %r11 # imm = 0x5555555555555555 movabsq $0x3333333333333333, %rbx # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %r14 # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %r15 # imm = 0x101010101010101 xorl %r12d, %r12d movq (%r10,%r12,8), %r13 xorq (%r9,%r12,8), %r13 movq %r13, %rbp shrq %rbp andq %r11, %rbp subq %rbp, %r13 movq %r13, %rbp andq %rbx, %rbp shrq $0x2, %r13 andq %rbx, %r13 addq %rbp, %r13 movq %r13, %rbp shrq $0x4, %rbp addq %r13, %rbp andq %r14, %rbp imulq %r15, %rbp shrq $0x38, %rbp addq %rbp, %rax incq %r12 cmpq %r12, %rcx jne 0x116d8 jmp 0x11722 xorl %ecx, %ecx xorl %eax, %eax cmpq %r8, %rdx jae 0x1179d cmpq %r8, %rcx jae 0x1180d movq (%rsi), %rdx movabsq $0x5555555555555555, %rsi # imm = 0x5555555555555555 movabsq $0x3333333333333333, %rdi # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %r9 # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %r10 # imm = 0x101010101010101 movq (%rdx,%rcx,8), %r11 movq %r11, %rbx shrq %rbx andq %rsi, %rbx subq %rbx, %r11 movq %r11, %rbx andq %rdi, %rbx shrq $0x2, %r11 andq %rdi, %r11 addq %rbx, %r11 movq %r11, %rbx shrq $0x4, %rbx addq %r11, %rbx andq %r9, %rbx imulq %r10, %rbx shrq $0x38, %rbx addq %rbx, %rax incq %rcx cmpq %rcx, %r8 jne 0x1175b jmp 0x1180d cmpq %rdx, %rcx jae 0x1180d movq (%rdi), %rsi movabsq $0x5555555555555555, %rdi # imm = 0x5555555555555555 movabsq $0x3333333333333333, %r8 # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %r9 # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %r10 # imm = 0x101010101010101 movq (%rsi,%rcx,8), %r11 movq %r11, %rbx shrq %rbx andq %rdi, %rbx subq %rbx, %r11 movq %r11, %rbx andq %r8, %rbx shrq $0x2, %r11 andq %r8, %r11 addq %rbx, %r11 movq %r11, %rbx shrq $0x4, %rbx addq %r11, %rbx andq %r9, %rbx imulq %r10, %rbx shrq $0x38, %rbx addq %rbx, %rax incq %rcx cmpq %rcx, %rdx jne 0x117cd popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/bitset.c
array_container_rank_many
inline uint32_t array_container_rank_many(const array_container_t *arr, uint64_t start_rank, const uint32_t *begin, const uint32_t *end, uint64_t *ans) { const uint16_t high = (uint16_t)((*begin) >> 16); uint32_t pos = 0; const uint32_t *iter = begin; for (; iter != end; iter++) { uint32_t x = *iter; uint16_t xhigh = (uint16_t)(x >> 16); if (xhigh != high) return iter - begin; // stop at next container const int32_t idx = binarySearch(arr->array + pos, arr->cardinality - pos, (uint16_t)x); const bool is_present = idx >= 0; if (is_present) { *(ans++) = start_rank + pos + (idx + 1); pos = idx + 1; } else { *(ans++) = start_rank + pos + (-idx - 1); } } return iter - begin; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rsi, -0x8(%rsp) movq %rdi, -0x10(%rsp) movq %rdx, %r9 movq %rdx, -0x18(%rsp) cmpq %rcx, %rdx je 0x11a41 movq -0x18(%rsp), %r9 movl (%r9), %edi xorl %r11d, %r11d movl (%r9), %ebp movl %ebp, %ebx xorl %edi, %ebx cmpl $0x10000, %ebx # imm = 0x10000 jae 0x119f7 movl %r11d, %r14d leaq (%r14,%r14), %r12 movq -0x10(%rsp), %rdx addq 0x8(%rdx), %r12 movl %r11d, %r13d notl %r13d addl (%rdx), %r13d xorl %r15d, %r15d cmpl %r13d, %r15d jg 0x11a05 leal (%r15,%r13), %edx movl %edx, %esi shrl %esi andl $-0x2, %edx movzwl (%r12,%rdx), %edx cmpw %bp, %dx jae 0x119e1 incl %esi movb $0x1, %dl movl %esi, %r15d jmp 0x119f1 jbe 0x119ec decl %esi movb $0x1, %dl movl %esi, %r13d jmp 0x119f1 xorl %edx, %edx movl %esi, %r10d testb %dl, %dl jne 0x119be jmp 0x11a0b movq %r9, %rax subq -0x18(%rsp), %rax shrq $0x2, %rax jmp 0x11a2c notl %r15d movl %r15d, %r10d addq -0x8(%rsp), %r14 leal 0x1(%r10), %edx testl %r10d, %r10d notl %r10d cmovnsl %edx, %r10d cmovnsl %edx, %r11d addq %r14, %r10 movq %r10, (%r8) addq $0x8, %r8 cmpl $0xffff, %ebx # imm = 0xFFFF ja 0x11a4d addq $0x4, %r9 cmpq %rcx, %r9 jne 0x11993 subq -0x18(%rsp), %r9 shrq $0x2, %r9 movl %r9d, %eax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/include/roaring/containers/array.h
array_container_contains
inline bool array_container_contains(const array_container_t *arr, uint16_t pos) { // return binarySearch(arr->array, arr->cardinality, pos) >= 0; // binary search with fallback to linear search for short ranges int32_t low = 0; const uint16_t *carr = (const uint16_t *)arr->array; int32_t high = arr->cardinality - 1; // while (high - low >= 0) { while (high >= low + 16) { int32_t middleIndex = (low + high) >> 1; uint16_t middleValue = carr[middleIndex]; if (middleValue < pos) { low = middleIndex + 1; } else if (middleValue > pos) { high = middleIndex - 1; } else { return true; } } for (int i = low; i <= high; i++) { uint16_t v = carr[i]; if (v == pos) { return true; } if (v > pos) return false; } return false; }
movq 0x8(%rdi), %rax movl (%rdi), %ecx decl %ecx xorl %edx, %edx leal 0x10(%rdx), %edi cmpl %edi, %ecx jl 0x11af2 leal (%rdx,%rcx), %edi movl %edi, %r8d shrl %r8d andl $-0x2, %edi leal 0x1(%r8), %r9d decl %r8d cmpw %si, (%rax,%rdi) cmovbel %ecx, %r8d cmovbl %ecx, %r8d cmovael %edx, %r9d movl %r8d, %ecx movl %r9d, %edx jne 0x11abd movb $0x1, %al retq cmpl %ecx, %edx jg 0x11b28 movl %edx, %edi leaq (%rax,%rdi,2), %rdi subl %edx, %ecx incl %ecx movb $0x1, %al xorl %edx, %edx movzwl (%rdi,%rdx,2), %r8d cmpw %si, %r8w setbe %r9b sete %r10b andb %r9b, %al orb %r10b, %al cmpw %si, %r8w jae 0x11af1 incq %rdx cmpl %edx, %ecx jne 0x11b04 xorl %eax, %eax retq
/lucaderi[P]CRoaring/include/roaring/containers/array.h
array_container_create_range
array_container_t *array_container_create_range(uint32_t min, uint32_t max) { array_container_t *answer = array_container_create_given_capacity(max - min + 1); if (answer == NULL) return answer; answer->cardinality = 0; for (uint32_t k = min; k < max; k++) { answer->array[answer->cardinality++] = k; } return answer; }
pushq %rbp pushq %rbx pushq %rax movl %esi, %ebx movl %edi, %ebp movl %esi, %edi subl %ebp, %edi incl %edi callq 0x11b2b testq %rax, %rax je 0x11bdf movl $0x0, (%rax) cmpl %ebp, %ebx jbe 0x11bdf movslq (%rax), %rcx movq %rcx, %rdx addq %rcx, %rdx addq 0x8(%rax), %rdx movw %bp, (%rdx) incl %ebp addq $0x2, %rdx incl %ecx cmpl %ebp, %ebx jne 0x11bce movl %ecx, (%rax) addq $0x8, %rsp popq %rbx popq %rbp retq
/lucaderi[P]CRoaring/src/containers/array.c
array_container_clone
ALLOW_UNALIGNED array_container_t *array_container_clone(const array_container_t *src) { array_container_t *newcontainer = array_container_create_given_capacity(src->capacity); if (newcontainer == NULL) return NULL; newcontainer->cardinality = src->cardinality; memcpy(newcontainer->array, src->array, src->cardinality * sizeof(uint16_t)); return newcontainer; }
pushq %r14 pushq %rbx pushq %rax movq %rdi, %r14 movl 0x4(%rdi), %edi callq 0x11b2b testq %rax, %rax je 0x11c17 movq %rax, %rbx movl (%r14), %eax movl %eax, (%rbx) movq 0x8(%rbx), %rdi movq 0x8(%r14), %rsi movslq (%r14), %rdx addq %rdx, %rdx callq 0x1140 jmp 0x11c19 xorl %ebx, %ebx movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r14 retq
/lucaderi[P]CRoaring/src/containers/array.c
array_container_offset
void array_container_offset(const array_container_t *c, container_t **loc, container_t **hic, uint16_t offset) { array_container_t *lo = NULL, *hi = NULL; int top, lo_cap, hi_cap; top = (1 << 16) - offset; lo_cap = count_less(c->array, c->cardinality, top); if (loc && lo_cap) { lo = array_container_create_given_capacity(lo_cap); for (int i = 0; i < lo_cap; ++i) { array_container_add(lo, c->array[i] + offset); } *loc = (container_t *)lo; } hi_cap = c->cardinality - lo_cap; if (hic && hi_cap) { hi = array_container_create_given_capacity(hi_cap); for (int i = lo_cap; i < c->cardinality; ++i) { array_container_add(hi, c->array[i] + offset); } *hic = (container_t *)hi; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rdi, %r14 movl (%rdi), %r11d testl %r11d, %r11d je 0x11c8d movl %ecx, %eax negl %eax movq 0x8(%r14), %r10 decl %r11d xorl %ebx, %ebx cmpl %r11d, %ebx jg 0x11c91 leal (%rbx,%r11), %r9d movl %r9d, %r8d shrl %r8d andl $-0x2, %r9d movzwl (%r10,%r9), %r9d cmpw %ax, %r9w jae 0x11c73 incl %r8d movb $0x1, %r9b movl %r8d, %ebx jmp 0x11c86 jbe 0x11c80 decl %r8d movb $0x1, %r9b movl %r8d, %r11d jmp 0x11c86 xorl %r9d, %r9d movl %r8d, %edi testb %r9b, %r9b jne 0x11c4a jmp 0x11c95 xorl %ebx, %ebx jmp 0x11c9c notl %ebx movl %ebx, %edi movl %edi, %ebx sarl $0x1f, %ebx xorl %edi, %ebx testq %rsi, %rsi movl %ecx, 0x8(%rsp) movq %rdx, 0x10(%rsp) je 0x11dfe testl %ebx, %ebx je 0x11dfe movq %rsi, 0x20(%rsp) movl %ebx, %edi callq 0x11b2b movl 0x8(%rsp), %r8d movq %rax, %r13 movl %ebx, 0xc(%rsp) testl %ebx, %ebx jle 0x11ded movl 0xc(%rsp), %r9d xorl %r15d, %r15d movq %r9, 0x18(%rsp) movq 0x8(%r14), %rax movzwl (%rax,%r15,2), %r12d addw %r8w, %r12w movslq (%r13), %rbp testq %rbp, %rbp je 0x11d0d movq 0x8(%r13), %rax cmpl $0x7fffffff, %ebp # imm = 0x7FFFFFFF je 0x11d4f cmpw %r12w, -0x2(%rax,%rbp,2) jae 0x11d4f movl 0x4(%r13), %esi cmpl %esi, %ebp jne 0x11d2e incl %esi movq %r13, %rdi movl $0x1, %edx callq 0x11fbf movq 0x18(%rsp), %r9 movl 0x8(%rsp), %r8d movq 0x8(%r13), %rax movslq (%r13), %rcx leal 0x1(%rcx), %edx movl %edx, (%r13) movw %r12w, (%rax,%rcx,2) incq %r15 cmpq %r9, %r15 jne 0x11ce3 jmp 0x11ded leal -0x1(%rbp), %edx xorl %ecx, %ecx cmpl %edx, %ecx jg 0x11d8b leal (%rcx,%rdx), %edi movl %edi, %esi shrl %esi andl $-0x2, %edi movzwl (%rax,%rdi), %edi cmpw %r12w, %di jae 0x11d75 incl %esi movb $0x1, %dil movl %esi, %ecx jmp 0x11d84 jbe 0x11d80 decl %esi movb $0x1, %dil movl %esi, %edx jmp 0x11d84 xorl %edi, %edi movl %esi, %ebx testb %dil, %dil jne 0x11d54 jmp 0x11d8f notl %ecx movl %ecx, %ebx cmpl $0x7fffffff, %ebp # imm = 0x7FFFFFFF je 0x11d42 testl %ebx, %ebx jns 0x11d42 movl 0x4(%r13), %esi cmpl %esi, %ebp jne 0x11db2 incl %esi movq %r13, %rdi movl $0x1, %edx callq 0x11fbf notl %ebx movq 0x8(%r13), %rax leaq (%rax,%rbx,2), %rsi leaq (%rax,%rbx,2), %rdi addq $0x2, %rdi subl %ebx, %ebp movslq %ebp, %rdx addq %rdx, %rdx callq 0x1190 movq 0x8(%r13), %rax movw %r12w, (%rax,%rbx,2) incl (%r13) movl 0x8(%rsp), %r8d movq 0x18(%rsp), %r9 jmp 0x11d42 movq 0x20(%rsp), %rax movq %r13, (%rax) movq 0x10(%rsp), %rdx movl 0xc(%rsp), %ebx testq %rdx, %rdx je 0x11f38 movl (%r14), %edi subl %ebx, %edi je 0x11f38 callq 0x11b2b movl 0x8(%rsp), %r8d movq %rax, %r15 cmpl (%r14), %ebx jge 0x11f30 movslq %ebx, %rbx movq 0x8(%r14), %rax movzwl (%rax,%rbx,2), %r12d addw %r8w, %r12w movslq (%r15), %r13 testq %r13, %r13 je 0x11e55 movq 0x8(%r15), %rax cmpl $0x7fffffff, %r13d # imm = 0x7FFFFFFF je 0x11e94 cmpw %r12w, -0x2(%rax,%r13,2) jae 0x11e94 movl 0x4(%r15), %esi cmpl %esi, %r13d jne 0x11e72 incl %esi movq %r15, %rdi movl $0x1, %edx callq 0x11fbf movl 0x8(%rsp), %r8d movq 0x8(%r15), %rax movslq (%r15), %rcx leal 0x1(%rcx), %edx movl %edx, (%r15) movw %r12w, (%rax,%rcx,2) incq %rbx movslq (%r14), %rax cmpq %rax, %rbx jl 0x11e2b jmp 0x11f30 leal -0x1(%r13), %edx xorl %ecx, %ecx cmpl %edx, %ecx jg 0x11ed1 leal (%rcx,%rdx), %edi movl %edi, %esi shrl %esi andl $-0x2, %edi movzwl (%rax,%rdi), %edi cmpw %r12w, %di jae 0x11ebb incl %esi movb $0x1, %dil movl %esi, %ecx jmp 0x11eca jbe 0x11ec6 decl %esi movb $0x1, %dil movl %esi, %edx jmp 0x11eca xorl %edi, %edi movl %esi, %ebp testb %dil, %dil jne 0x11e9a jmp 0x11ed5 notl %ecx movl %ecx, %ebp cmpl $0x7fffffff, %r13d # imm = 0x7FFFFFFF je 0x11e84 testl %ebp, %ebp jns 0x11e84 movl 0x4(%r15), %esi cmpl %esi, %r13d jne 0x11efa incl %esi movq %r15, %rdi movl $0x1, %edx callq 0x11fbf notl %ebp movq 0x8(%r15), %rax leaq (%rax,%rbp,2), %rsi leaq (%rax,%rbp,2), %rdi addq $0x2, %rdi subl %ebp, %r13d movslq %r13d, %rdx addq %rdx, %rdx callq 0x1190 movq 0x8(%r15), %rax movw %r12w, (%rax,%rbp,2) incl (%r15) movl 0x8(%rsp), %r8d jmp 0x11e84 movq 0x10(%rsp), %rax movq %r15, (%rax) addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/array.c
array_container_add_from_range
void array_container_add_from_range(array_container_t *arr, uint32_t min, uint32_t max, uint16_t step) { for (uint32_t value = min; value < max; value += step) { array_container_append(arr, value); } }
cmpl %edx, %esi jae 0x120f4 pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movl %edx, %ebx movl %esi, %ebp movq %rdi, %r14 movzwl %cx, %r15d movl 0x4(%r14), %esi cmpl %esi, (%r14) jne 0x120d2 incl %esi movq %r14, %rdi movl $0x1, %edx callq 0x11fbf movq 0x8(%r14), %rax movslq (%r14), %rcx leal 0x1(%rcx), %edx movl %edx, (%r14) movw %bp, (%rax,%rcx,2) addl %r15d, %ebp cmpl %ebx, %ebp jb 0x120ba addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/array.c
array_container_andnot
void array_container_andnot(const array_container_t *array_1, const array_container_t *array_2, array_container_t *out) { if (out->capacity < array_1->cardinality) array_container_grow(out, array_1->cardinality, false); #if CROARING_IS_X64 if ((croaring_hardware_support() & ROARING_SUPPORTS_AVX2) && (out != array_1) && (out != array_2)) { out->cardinality = difference_vector16( array_1->array, array_1->cardinality, array_2->array, array_2->cardinality, out->array); } else { out->cardinality = difference_uint16(array_1->array, array_1->cardinality, array_2->array, array_2->cardinality, out->array); } #else out->cardinality = difference_uint16(array_1->array, array_1->cardinality, array_2->array, array_2->cardinality, out->array); #endif }
pushq %r15 pushq %r14 pushq %rbx movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rdi), %esi cmpl %esi, 0x4(%rdx) jge 0x12163 movq %rbx, %rdi xorl %edx, %edx callq 0x11fbf callq 0x20570 movq 0x8(%r15), %rdi movslq (%r15), %rsi cmpq %r14, %rbx je 0x12190 cmpq %r15, %rbx je 0x12190 andl $0x1, %eax je 0x12190 movq 0x8(%r14), %rdx movslq (%r14), %rcx movq 0x8(%rbx), %r8 callq 0xe166 jmp 0x121a0 movq 0x8(%r14), %rdx movl (%r14), %ecx movq 0x8(%rbx), %r8 callq 0xecd1 movl %eax, (%rbx) popq %rbx popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/src/containers/array.c
array_container_intersection
void array_container_intersection(const array_container_t *array1, const array_container_t *array2, array_container_t *out) { int32_t card_1 = array1->cardinality, card_2 = array2->cardinality, min_card = minimum_int32(card_1, card_2); const int threshold = 64; // subject to tuning #if CROARING_IS_X64 if (out->capacity < min_card) { array_container_grow(out, min_card + sizeof(__m128i) / sizeof(uint16_t), false); } #else if (out->capacity < min_card) { array_container_grow(out, min_card, false); } #endif if (card_1 * threshold < card_2) { out->cardinality = intersect_skewed_uint16( array1->array, card_1, array2->array, card_2, out->array); } else if (card_2 * threshold < card_1) { out->cardinality = intersect_skewed_uint16( array2->array, card_2, array1->array, card_1, out->array); } else { #if CROARING_IS_X64 if (croaring_hardware_support() & ROARING_SUPPORTS_AVX2) { out->cardinality = intersect_vector16( array1->array, card_1, array2->array, card_2, out->array); } else { out->cardinality = intersect_uint16( array1->array, card_1, array2->array, card_2, out->array); } #else out->cardinality = intersect_uint16(array1->array, card_1, array2->array, card_2, out->array); #endif } }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdx, %rbx movq %rsi, %r12 movq %rdi, %r13 movslq (%rdi), %r15 movslq (%rsi), %r14 cmpl %r14d, %r15d movl %r14d, %esi cmovll %r15d, %esi cmpl %esi, 0x4(%rdx) jge 0x1222c addl $0x8, %esi movq %rbx, %rdi xorl %edx, %edx callq 0x11fbf movl %r15d, %eax shll $0x6, %eax cmpl %r14d, %eax jge 0x1224c movq 0x8(%r13), %rdi movq 0x8(%r12), %rdx movq 0x8(%rbx), %r8 movq %r15, %rsi movq %r14, %rcx jmp 0x1226a movl %r14d, %eax shll $0x6, %eax cmpl %r15d, %eax jge 0x1227b movq 0x8(%r12), %rdi movq 0x8(%r13), %rdx movq 0x8(%rbx), %r8 movq %r14, %rsi movq %r15, %rcx callq 0xe400 movl %eax, (%rbx) popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq callq 0x20570 movq 0x8(%r13), %rdi movq 0x8(%r12), %rdx movq 0x8(%rbx), %r8 movq %r15, %rsi movq %r14, %rcx testb $0x1, %al jne 0x1229e callq 0xea15 jmp 0x1226f callq 0xdbca jmp 0x1226f
/lucaderi[P]CRoaring/src/containers/array.c
array_container_intersection_cardinality
int array_container_intersection_cardinality(const array_container_t *array1, const array_container_t *array2) { int32_t card_1 = array1->cardinality, card_2 = array2->cardinality; const int threshold = 64; // subject to tuning if (card_1 * threshold < card_2) { return intersect_skewed_uint16_cardinality(array1->array, card_1, array2->array, card_2); } else if (card_2 * threshold < card_1) { return intersect_skewed_uint16_cardinality(array2->array, card_2, array1->array, card_1); } else { #if CROARING_IS_X64 if (croaring_hardware_support() & ROARING_SUPPORTS_AVX2) { return intersect_vector16_cardinality(array1->array, card_1, array2->array, card_2); } else { return intersect_uint16_cardinality(array1->array, card_1, array2->array, card_2); } #else return intersect_uint16_cardinality(array1->array, card_1, array2->array, card_2); #endif } }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movq %rsi, %r15 movq %rdi, %r12 movslq (%rdi), %r14 movslq (%rsi), %rbx movl %r14d, %eax shll $0x6, %eax cmpl %ebx, %eax jge 0x122d4 movq 0x8(%r12), %rdi movq 0x8(%r15), %rdx movq %r14, %rsi movq %rbx, %rcx jmp 0x122ed movl %ebx, %eax shll $0x6, %eax cmpl %r14d, %eax jge 0x122fd movq 0x8(%r15), %rdi movq 0x8(%r12), %rdx movq %rbx, %rsi movq %r14, %rcx addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 jmp 0xe768 callq 0x20570 movq 0x8(%r12), %rdi movq 0x8(%r15), %rdx movq %r14, %rsi movq %rbx, %rcx addq $0x8, %rsp testb $0x1, %al jne 0x12325 popq %rbx popq %r12 popq %r14 popq %r15 jmp 0xea7e popq %rbx popq %r12 popq %r14 popq %r15 jmp 0xe018
/lucaderi[P]CRoaring/src/containers/array.c
array_container_intersect
bool array_container_intersect(const array_container_t *array1, const array_container_t *array2) { int32_t card_1 = array1->cardinality, card_2 = array2->cardinality; const int threshold = 64; // subject to tuning if (card_1 * threshold < card_2) { return intersect_skewed_uint16_nonempty(array1->array, card_1, array2->array, card_2); } else if (card_2 * threshold < card_1) { return intersect_skewed_uint16_nonempty(array2->array, card_2, array1->array, card_1); } else { // we do not bother vectorizing return intersect_uint16_nonempty(array1->array, card_1, array2->array, card_2); } }
movslq (%rdi), %rax movslq (%rsi), %rcx movl %eax, %edx shll $0x6, %edx cmpl %ecx, %edx jge 0x12350 movq 0x8(%rdi), %rdi movq 0x8(%rsi), %rdx movq %rax, %rsi jmp 0xe920 movl %ecx, %edx shll $0x6, %edx cmpl %eax, %edx jge 0x1236f movq 0x8(%rsi), %rsi movq 0x8(%rdi), %rdx movq %rsi, %rdi movq %rcx, %rsi movq %rax, %rcx jmp 0xe920 movq 0x8(%rdi), %rdi movq 0x8(%rsi), %rdx movq %rax, %rsi jmp 0xeadc
/lucaderi[P]CRoaring/src/containers/array.c
array_container_to_uint32_array
ALLOW_UNALIGNED int array_container_to_uint32_array(void *vout, const array_container_t *cont, uint32_t base) { #if CROARING_IS_X64 int support = croaring_hardware_support(); #if CROARING_COMPILER_SUPPORTS_AVX512 if (support & ROARING_SUPPORTS_AVX512) { return avx512_array_container_to_uint32_array(vout, cont->array, cont->cardinality, base); } #endif if (support & ROARING_SUPPORTS_AVX2) { return array_container_to_uint32_array_vector16( vout, cont->array, cont->cardinality, base); } #endif // CROARING_IS_X64 int outpos = 0; uint32_t *out = (uint32_t *)vout; size_t i = 0; for (; i < (size_t)cont->cardinality; ++i) { const uint32_t val = base + cont->array[i]; memcpy(out + outpos, &val, sizeof(uint32_t)); // should be compiled as a MOV on x64 outpos++; } return outpos; }
pushq %r15 pushq %r14 pushq %rbx movl %edx, %ebx movq %rsi, %r15 movq %rdi, %r14 callq 0x20570 testb $0x2, %al jne 0x12448 testb $0x1, %al jne 0x1245e cmpl $0x0, (%r15) je 0x12474 xorl %eax, %eax movq 0x8(%r15), %rcx movzwl (%rcx,%rax,2), %ecx addl %ebx, %ecx movl %ecx, (%r14,%rax,4) incq %rax movslq (%r15), %rcx cmpq %rcx, %rax jb 0x1242d jmp 0x12476 movq 0x8(%r15), %rsi movslq (%r15), %rdx movq %r14, %rdi movl %ebx, %ecx popq %rbx popq %r14 popq %r15 jmp 0xfbef movq 0x8(%r15), %rsi movslq (%r15), %rdx movq %r14, %rdi movl %ebx, %ecx popq %rbx popq %r14 popq %r15 jmp 0xdd52 xorl %eax, %eax popq %rbx popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/src/containers/array.c
array_container_read
int32_t array_container_read(int32_t cardinality, array_container_t *container, const char *buf) { if (container->capacity < cardinality) { array_container_grow(container, cardinality, false); } container->cardinality = cardinality; memcpy(container->array, buf, container->cardinality * sizeof(uint16_t)); return array_container_size_in_bytes(container); }
pushq %rbp pushq %r14 pushq %rbx movq %rdx, %r14 movq %rsi, %rbx movl %edi, %ebp cmpl %edi, 0x4(%rsi) jge 0x126c7 movq %rbx, %rdi movl %ebp, %esi xorl %edx, %edx callq 0x11fbf movl %ebp, (%rbx) movq 0x8(%rbx), %rdi movslq %ebp, %rdx addq %rdx, %rdx movq %r14, %rsi callq 0x1140 movl (%rbx), %eax addl %eax, %eax popq %rbx popq %r14 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/array.c
bitset_container_create
bitset_container_t *bitset_container_create(void) { bitset_container_t *bitset = (bitset_container_t *)roaring_malloc(sizeof(bitset_container_t)); if (!bitset) { return NULL; } size_t align_size = 32; #if CROARING_IS_X64 int support = croaring_hardware_support(); if (support & ROARING_SUPPORTS_AVX512) { // sizeof(__m512i) == 64 align_size = 64; } else { // sizeof(__m256i) == 32 align_size = 32; } #endif bitset->words = (uint64_t *)roaring_aligned_malloc( align_size, sizeof(uint64_t) * BITSET_CONTAINER_SIZE_IN_WORDS); if (!bitset->words) { roaring_free(bitset); return NULL; } bitset_container_clear(bitset); return bitset; }
pushq %rbx movl $0x10, %edi callq 0x2369 testq %rax, %rax je 0x12885 movq %rax, %rbx callq 0x20570 andl $0x2, %eax shll $0x4, %eax leaq 0x20(%rax), %rdi movl $0x2000, %esi # imm = 0x2000 callq 0x2387 movq %rax, 0x8(%rbx) testq %rax, %rax je 0x1287d movl $0x2000, %edx # imm = 0x2000 movq %rax, %rdi xorl %esi, %esi callq 0x1100 movl $0x0, (%rbx) jmp 0x12887 movq %rbx, %rdi callq 0x2381 xorl %ebx, %ebx movq %rbx, %rax popq %rbx retq
/lucaderi[P]CRoaring/src/containers/bitset.c
bitset_container_offset
void bitset_container_offset(const bitset_container_t *c, container_t **loc, container_t **hic, uint16_t offset) { bitset_container_t *bc = NULL; uint64_t val; uint16_t b, i, end; b = offset >> 6; i = offset % 64; end = 1024 - b; if (loc != NULL) { bc = bitset_container_create(); if (i == 0) { memcpy(bc->words + b, c->words, 8 * end); } else { bc->words[b] = c->words[0] << i; for (uint32_t k = 1; k < end; ++k) { val = c->words[k] << i; val |= c->words[k - 1] >> (64 - i); bc->words[b + k] = val; } } bc->cardinality = bitset_container_compute_cardinality(bc); if (bc->cardinality != 0) { *loc = bc; } if (bc->cardinality == c->cardinality) { return; } } if (hic == NULL) { // Both hic and loc can't be NULL, so bc is never NULL here if (bc->cardinality == 0) { bitset_container_free(bc); } return; } if (bc == NULL || bc->cardinality != 0) { bc = bitset_container_create(); } if (i == 0) { memcpy(bc->words, c->words + end, 8 * b); } else { for (uint32_t k = end; k < 1024; ++k) { val = c->words[k] << i; val |= c->words[k - 1] >> (64 - i); bc->words[k - end] = val; } bc->words[b] = c->words[1023] >> (64 - i); } bc->cardinality = bitset_container_compute_cardinality(bc); if (bc->cardinality == 0) { bitset_container_free(bc); return; } *hic = bc; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movl %ecx, %ebx movq %rdx, 0x10(%rsp) movq %rdi, 0x8(%rsp) movl %ecx, %ebp shrl $0x6, %ebp movl %ecx, %r12d andl $0x3f, %r12d movl $0x400, %r15d # imm = 0x400 subl %ebp, %r15d testq %rsi, %rsi je 0x12ca2 movq %rsi, %r13 callq 0x12831 movq %rax, %r14 testw %r12w, %r12w je 0x12ca7 movq 0x8(%rsp), %rax movq 0x8(%rax), %rsi movq (%rsi), %rax movl %r12d, %ecx shlq %cl, %rax movq 0x8(%r14), %rcx movzwl %bp, %r8d movq %rax, (%rcx,%r8,8) cmpl $0x3ff, %r8d # imm = 0x3FF je 0x12cc6 movzwl %r12w, %eax movl $0x40, %edx subl %eax, %edx cmpw $0x3, %r15w movl $0x2, %edi cmovael %r15d, %edi movzwl %di, %edi leaq (%rcx,%r8,8), %r8 movl $0x1, %r9d movq -0x8(%rsi,%r9,8), %r10 movq (%rsi,%r9,8), %r11 movl %eax, %ecx shlq %cl, %r11 movl %edx, %ecx shrq %cl, %r10 orq %r11, %r10 movq %r10, (%r8,%r9,8) incq %r9 cmpq %r9, %rdi jne 0x12c7e jmp 0x12cc6 xorl %r14d, %r14d jmp 0x12ce6 movzwl %bp, %edi shll $0x3, %edi addq 0x8(%r14), %rdi movq 0x8(%rsp), %rax movq 0x8(%rax), %rsi movzwl %r15w, %edx shll $0x3, %edx callq 0x1140 movq %r14, %rdi callq 0x12de3 movl %eax, (%r14) testl %eax, %eax je 0x12cd9 movq %r14, (%r13) movq 0x8(%rsp), %rcx cmpl (%rcx), %eax je 0x12dbe movq 0x10(%rsp), %r13 testq %r13, %r13 je 0x12d84 testq %r14, %r14 je 0x12cff cmpl $0x0, (%r14) je 0x12d07 callq 0x12831 movq %rax, %r14 testw %r12w, %r12w je 0x12d8c movzwl %r12w, %eax cmpw $0x40, %bx jb 0x12d60 movl $0x40, %edx subl %eax, %edx movq 0x8(%r14), %rsi movzwl %r15w, %r8d movl $0x400, %edi # imm = 0x400 subq %r8, %rdi shll $0x3, %r8d movq 0x8(%rsp), %rcx addq 0x8(%rcx), %r8 xorl %r9d, %r9d movq -0x8(%r8,%r9,8), %r10 movq (%r8,%r9,8), %r11 movl %eax, %ecx shlq %cl, %r11 movl %edx, %ecx shrq %cl, %r10 orq %r11, %r10 movq %r10, (%rsi,%r9,8) incq %r9 cmpq %r9, %rdi jne 0x12d3e movq 0x8(%rsp), %rcx movq 0x8(%rcx), %rcx movq 0x1ff8(%rcx), %rdx negb %al movl %eax, %ecx shrq %cl, %rdx movq 0x8(%r14), %rax movzwl %bp, %ecx movq %rdx, (%rax,%rcx,8) jmp 0x12dab cmpl $0x0, (%r14) jne 0x12dbe jmp 0x12dcd movq 0x8(%r14), %rdi movzwl %r15w, %esi shll $0x3, %esi movq 0x8(%rsp), %rax addq 0x8(%rax), %rsi movzwl %bp, %edx shll $0x3, %edx callq 0x1140 movq %r14, %rdi callq 0x12de3 movl %eax, (%r14) testl %eax, %eax je 0x12dcd movq %r14, (%r13) addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %r14, %rdi addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0x12b5d
/lucaderi[P]CRoaring/src/containers/bitset.c
bitset_container_compute_cardinality
int bitset_container_compute_cardinality(const bitset_container_t *bitset) { int support = croaring_hardware_support(); #if CROARING_COMPILER_SUPPORTS_AVX512 if (support & ROARING_SUPPORTS_AVX512) { return (int)avx512_vpopcount( (const __m512i *)bitset->words, BITSET_CONTAINER_SIZE_IN_WORDS / (WORDS_IN_AVX512_REG)); } else #endif // CROARING_COMPILER_SUPPORTS_AVX512 if (support & ROARING_SUPPORTS_AVX2) { return (int)avx2_harley_seal_popcount256( (const __m256i *)bitset->words, BITSET_CONTAINER_SIZE_IN_WORDS / (WORDS_IN_AVX2_REG)); } else { return _scalar_bitset_container_compute_cardinality(bitset); } }
pushq %rbx movq %rdi, %rbx callq 0x20570 testb $0x2, %al jne 0x12f90 movq 0x8(%rbx), %rdi testb $0x1, %al jne 0x12f9b pxor %xmm0, %xmm0 xorl %eax, %eax movdqa 0x133b2(%rip), %xmm1 # 0x261c0 movdqa 0x133ba(%rip), %xmm2 # 0x261d0 movdqa 0x133c2(%rip), %xmm3 # 0x261e0 pxor %xmm5, %xmm5 movq 0x20(%rdi,%rax), %xmm4 movq (%rdi,%rax), %xmm6 punpcklqdq %xmm4, %xmm6 # xmm6 = xmm6[0],xmm4[0] movdqa %xmm6, %xmm4 psrlw $0x1, %xmm4 pand %xmm1, %xmm4 psubb %xmm4, %xmm6 movdqa %xmm6, %xmm4 pand %xmm2, %xmm4 psrlw $0x2, %xmm6 pand %xmm2, %xmm6 paddb %xmm4, %xmm6 movdqa %xmm6, %xmm4 psrlw $0x4, %xmm4 paddb %xmm6, %xmm4 pand %xmm3, %xmm4 psadbw %xmm0, %xmm4 pshufd $0xe8, %xmm4, %xmm4 # xmm4 = xmm4[0,2,2,3] paddd %xmm5, %xmm4 movq 0x28(%rdi,%rax), %xmm5 movq 0x8(%rdi,%rax), %xmm6 punpcklqdq %xmm5, %xmm6 # xmm6 = xmm6[0],xmm5[0] movdqa %xmm6, %xmm5 psrlw $0x1, %xmm5 pand %xmm1, %xmm5 psubb %xmm5, %xmm6 movdqa %xmm6, %xmm5 pand %xmm2, %xmm5 psrlw $0x2, %xmm6 pand %xmm2, %xmm6 paddb %xmm5, %xmm6 movdqa %xmm6, %xmm5 psrlw $0x4, %xmm5 paddb %xmm6, %xmm5 pand %xmm3, %xmm5 psadbw %xmm0, %xmm5 pshufd $0xe8, %xmm5, %xmm5 # xmm5 = xmm5[0,2,2,3] movq 0x30(%rdi,%rax), %xmm6 movq 0x10(%rdi,%rax), %xmm7 punpcklqdq %xmm6, %xmm7 # xmm7 = xmm7[0],xmm6[0] movdqa %xmm7, %xmm6 psrlw $0x1, %xmm6 pand %xmm1, %xmm6 psubb %xmm6, %xmm7 movdqa %xmm7, %xmm6 pand %xmm2, %xmm6 psrlw $0x2, %xmm7 pand %xmm2, %xmm7 paddb %xmm6, %xmm7 movdqa %xmm7, %xmm6 psrlw $0x4, %xmm6 paddb %xmm7, %xmm6 pand %xmm3, %xmm6 psadbw %xmm0, %xmm6 pshufd $0xe8, %xmm6, %xmm6 # xmm6 = xmm6[0,2,2,3] paddd %xmm5, %xmm6 paddd %xmm4, %xmm6 movq 0x38(%rdi,%rax), %xmm4 movq 0x18(%rdi,%rax), %xmm5 punpcklqdq %xmm4, %xmm5 # xmm5 = xmm5[0],xmm4[0] movdqa %xmm5, %xmm4 psrlw $0x1, %xmm4 pand %xmm1, %xmm4 psubb %xmm4, %xmm5 movdqa %xmm5, %xmm4 pand %xmm2, %xmm4 psrlw $0x2, %xmm5 pand %xmm2, %xmm5 paddb %xmm4, %xmm5 movdqa %xmm5, %xmm4 psrlw $0x4, %xmm4 paddb %xmm5, %xmm4 pand %xmm3, %xmm4 psadbw %xmm0, %xmm4 pshufd $0xe8, %xmm4, %xmm5 # xmm5 = xmm4[0,2,2,3] paddd %xmm6, %xmm5 addq $0x40, %rax cmpq $0x2000, %rax # imm = 0x2000 jne 0x12e22 pshufd $0x55, %xmm5, %xmm0 # xmm0 = xmm5[1,1,1,1] paddd %xmm5, %xmm0 movd %xmm0, %eax jmp 0x12fa0 movq 0x8(%rbx), %rdi callq 0x13075 jmp 0x12fa0 callq 0x130f4 popq %rbx retq
/lucaderi[P]CRoaring/src/containers/bitset.c
bitset_container_set_range
void bitset_container_set_range(bitset_container_t *bitset, uint32_t begin, uint32_t end) { bitset_set_range(bitset->words, begin, end); bitset->cardinality = bitset_container_compute_cardinality(bitset); // could be smarter }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movq %rdi, %rbx cmpl %edx, %esi je 0x13025 movl %edx, %r14d movq 0x8(%rbx), %r15 movl %esi, %eax shrl $0x6, %eax movq $-0x1, %rdx movl %esi, %ecx shlq %cl, %rdx leal -0x1(%r14), %ebp shrl $0x6, %ebp movl %ebp, %ecx subl %eax, %ecx jne 0x12fe5 negb %r14b movl %r14d, %ecx shlq %cl, %rdx shrq %cl, %rdx movl %eax, %ebp jmp 0x1301f movl %eax, %esi orq %rdx, (%r15,%rsi,8) leal 0x1(%rax), %edx cmpl %ebp, %edx jae 0x1300f leaq (%r15,%rax,8), %rdi addq $0x8, %rdi addl $-0x2, %ecx leaq 0x8(,%rcx,8), %rdx movl $0xff, %esi callq 0x1100 negb %r14b movq $-0x1, %rdx movl %r14d, %ecx shrq %cl, %rdx movl %ebp, %eax orq %rdx, (%r15,%rax,8) movq %rbx, %rdi callq 0x12de3 movl %eax, (%rbx) addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/bitset.c
bitset_container_intersect
bool bitset_container_intersect(const bitset_container_t *src_1, const bitset_container_t *src_2) { // could vectorize, but this is probably already quite fast in practice const uint64_t *__restrict__ words_1 = src_1->words; const uint64_t *__restrict__ words_2 = src_2->words; for (int i = 0; i < BITSET_CONTAINER_SIZE_IN_WORDS; i++) { if ((words_1[i] & words_2[i]) != 0) return true; } return false; }
movq 0x8(%rdi), %rcx movq 0x8(%rsi), %rdx movq (%rdx), %rsi movb $0x1, %al testq %rsi, (%rcx) je 0x1304d retq xorl %esi, %esi movq %rsi, %rax cmpq $0x3ff, %rsi # imm = 0x3FF je 0x1306b movq 0x8(%rdx,%rax,8), %rdi leaq 0x1(%rax), %rsi testq %rdi, 0x8(%rcx,%rax,8) je 0x1304f cmpq $0x3ff, %rax # imm = 0x3FF setb %al retq
/lucaderi[P]CRoaring/src/containers/bitset.c
avx512_vpopcount
static inline uint64_t avx512_vpopcount(const __m512i *data, const uint64_t size) { const uint64_t limit = size - size % 4; __m512i total = _mm512_setzero_si512(); uint64_t i = 0; for (; i < limit; i += 4) { VPOPCNT_AND_ADD(data + i, 0, total); VPOPCNT_AND_ADD(data + i, 1, total); VPOPCNT_AND_ADD(data + i, 2, total); VPOPCNT_AND_ADD(data + i, 3, total); } for (; i < size; i++) { total = _mm512_add_epi64( total, _mm512_popcnt_epi64(_mm512_loadu_si512(data + i))); } return simd_sum_epu64(total); }
addq $0xc0, %rdi vpxor %xmm0, %xmm0, %xmm0 movq $-0x4, %rax vpopcntq -0xc0(%rdi), %zmm1 vpaddq %zmm0, %zmm1, %zmm0 vpopcntq -0x80(%rdi), %zmm1 vpopcntq -0x40(%rdi), %zmm2 vpaddq %zmm2, %zmm1, %zmm1 vpaddq %zmm1, %zmm0, %zmm0 vpopcntq (%rdi), %zmm1 vpaddq %zmm1, %zmm0, %zmm0 addq $0x4, %rax addq $0x100, %rdi # imm = 0x100 cmpq $0x7c, %rax jb 0x13087 vextracti64x4 $0x1, %zmm0, %ymm1 vpaddq %zmm1, %zmm0, %zmm0 vextracti128 $0x1, %ymm0, %xmm1 vpaddq %xmm1, %xmm0, %xmm0 vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] vpaddq %xmm1, %xmm0, %xmm0 vmovq %xmm0, %rax vzeroupper retq
/lucaderi[P]CRoaring/include/roaring/bitset_util.h
avx2_harley_seal_popcount256
static uint64_t avx2_harley_seal_popcount256(const __m256i *data, const uint64_t size) { __m256i total = _mm256_setzero_si256(); __m256i ones = _mm256_setzero_si256(); __m256i twos = _mm256_setzero_si256(); __m256i fours = _mm256_setzero_si256(); __m256i eights = _mm256_setzero_si256(); __m256i sixteens = _mm256_setzero_si256(); __m256i twosA, twosB, foursA, foursB, eightsA, eightsB; const uint64_t limit = size - size % 16; uint64_t i = 0; for (; i < limit; i += 16) { CSA(&twosA, &ones, ones, _mm256_lddqu_si256(data + i), _mm256_lddqu_si256(data + i + 1)); CSA(&twosB, &ones, ones, _mm256_lddqu_si256(data + i + 2), _mm256_lddqu_si256(data + i + 3)); CSA(&foursA, &twos, twos, twosA, twosB); CSA(&twosA, &ones, ones, _mm256_lddqu_si256(data + i + 4), _mm256_lddqu_si256(data + i + 5)); CSA(&twosB, &ones, ones, _mm256_lddqu_si256(data + i + 6), _mm256_lddqu_si256(data + i + 7)); CSA(&foursB, &twos, twos, twosA, twosB); CSA(&eightsA, &fours, fours, foursA, foursB); CSA(&twosA, &ones, ones, _mm256_lddqu_si256(data + i + 8), _mm256_lddqu_si256(data + i + 9)); CSA(&twosB, &ones, ones, _mm256_lddqu_si256(data + i + 10), _mm256_lddqu_si256(data + i + 11)); CSA(&foursA, &twos, twos, twosA, twosB); CSA(&twosA, &ones, ones, _mm256_lddqu_si256(data + i + 12), _mm256_lddqu_si256(data + i + 13)); CSA(&twosB, &ones, ones, _mm256_lddqu_si256(data + i + 14), _mm256_lddqu_si256(data + i + 15)); CSA(&foursB, &twos, twos, twosA, twosB); CSA(&eightsB, &fours, fours, foursA, foursB); CSA(&sixteens, &eights, eights, eightsA, eightsB); total = _mm256_add_epi64(total, popcount256(sixteens)); } total = _mm256_slli_epi64(total, 4); // * 16 total = _mm256_add_epi64( total, _mm256_slli_epi64(popcount256(eights), 3)); // += 8 * ... total = _mm256_add_epi64( total, _mm256_slli_epi64(popcount256(fours), 2)); // += 4 * ... total = _mm256_add_epi64( total, _mm256_slli_epi64(popcount256(twos), 1)); // += 2 * ... total = _mm256_add_epi64(total, popcount256(ones)); for (; i < size; i++) total = _mm256_add_epi64(total, popcount256(_mm256_lddqu_si256(data + i))); return (uint64_t)(_mm256_extract_epi64(total, 0)) + (uint64_t)(_mm256_extract_epi64(total, 1)) + (uint64_t)(_mm256_extract_epi64(total, 2)) + (uint64_t)(_mm256_extract_epi64(total, 3)); }
vpxor %xmm3, %xmm3, %xmm3 movq $-0x10, %rax vpbroadcastb 0x13208(%rip), %ymm0 # 0x26310 vbroadcasti128 0x131df(%rip), %ymm2 # ymm2 = mem[0,1,0,1] vbroadcasti128 0x131e6(%rip), %ymm1 # ymm1 = mem[0,1,0,1] vxorpd %xmm6, %xmm6, %xmm6 vxorpd %xmm7, %xmm7, %xmm7 vxorpd %xmm5, %xmm5, %xmm5 vxorpd %xmm4, %xmm4, %xmm4 vlddqu (%rdi), %ymm8 vlddqu 0x20(%rdi), %ymm9 vxorpd %ymm6, %ymm8, %ymm10 vandpd %ymm6, %ymm8, %ymm6 vandpd %ymm9, %ymm10, %ymm8 vorpd %ymm6, %ymm8, %ymm6 vxorpd %ymm9, %ymm10, %ymm8 vlddqu 0x40(%rdi), %ymm9 vlddqu 0x60(%rdi), %ymm10 vxorpd %ymm9, %ymm8, %ymm11 vandpd %ymm9, %ymm8, %ymm8 vandpd %ymm10, %ymm11, %ymm9 vorpd %ymm8, %ymm9, %ymm8 vxorpd %ymm10, %ymm11, %ymm9 vxorpd %ymm7, %ymm6, %ymm10 vandpd %ymm7, %ymm6, %ymm6 vandpd %ymm10, %ymm8, %ymm7 vorpd %ymm6, %ymm7, %ymm6 vxorpd %ymm10, %ymm8, %ymm7 vlddqu 0x80(%rdi), %ymm8 vlddqu 0xa0(%rdi), %ymm10 vxorpd %ymm8, %ymm9, %ymm11 vandpd %ymm8, %ymm9, %ymm8 vandpd %ymm10, %ymm11, %ymm9 vorpd %ymm8, %ymm9, %ymm8 vxorpd %ymm10, %ymm11, %ymm9 vlddqu 0xc0(%rdi), %ymm10 vlddqu 0xe0(%rdi), %ymm11 vxorpd %ymm10, %ymm9, %ymm12 vandpd %ymm10, %ymm9, %ymm9 vandpd %ymm11, %ymm12, %ymm10 vorpd %ymm9, %ymm10, %ymm9 vxorpd %ymm11, %ymm12, %ymm10 vxorpd %ymm7, %ymm8, %ymm11 vandpd %ymm7, %ymm8, %ymm7 vandpd %ymm11, %ymm9, %ymm8 vorpd %ymm7, %ymm8, %ymm7 vxorpd %ymm11, %ymm9, %ymm9 vxorpd %ymm5, %ymm6, %ymm11 vandpd %ymm5, %ymm6, %ymm5 vandpd %ymm7, %ymm11, %ymm6 vorpd %ymm5, %ymm6, %ymm8 vxorpd %ymm7, %ymm11, %ymm5 vlddqu 0x100(%rdi), %ymm6 vlddqu 0x120(%rdi), %ymm7 vxorpd %ymm6, %ymm10, %ymm11 vandpd %ymm6, %ymm10, %ymm6 vandpd %ymm7, %ymm11, %ymm10 vorpd %ymm6, %ymm10, %ymm6 vxorpd %ymm7, %ymm11, %ymm7 vlddqu 0x140(%rdi), %ymm10 vlddqu 0x160(%rdi), %ymm11 vxorpd %ymm7, %ymm10, %ymm12 vandpd %ymm7, %ymm10, %ymm7 vandpd %ymm11, %ymm12, %ymm10 vorpd %ymm7, %ymm10, %ymm7 vxorpd %ymm11, %ymm12, %ymm10 vxorpd %ymm6, %ymm9, %ymm11 vandpd %ymm6, %ymm9, %ymm6 vandpd %ymm7, %ymm11, %ymm9 vorpd %ymm6, %ymm9, %ymm9 vxorpd %ymm7, %ymm11, %ymm7 vlddqu 0x180(%rdi), %ymm6 vlddqu 0x1a0(%rdi), %ymm11 vxorpd %ymm6, %ymm10, %ymm12 vandpd %ymm6, %ymm10, %ymm6 vandpd %ymm11, %ymm12, %ymm10 vorpd %ymm6, %ymm10, %ymm10 vxorpd %ymm11, %ymm12, %ymm6 vlddqu 0x1c0(%rdi), %ymm11 vlddqu 0x1e0(%rdi), %ymm12 vxorpd %ymm6, %ymm11, %ymm13 vandpd %ymm6, %ymm11, %ymm6 vandpd %ymm12, %ymm13, %ymm11 vorpd %ymm6, %ymm11, %ymm11 vxorpd %ymm12, %ymm13, %ymm6 vxorpd %ymm7, %ymm10, %ymm12 vandpd %ymm7, %ymm10, %ymm7 vandpd %ymm12, %ymm11, %ymm10 vorpd %ymm7, %ymm10, %ymm10 vxorpd %ymm12, %ymm11, %ymm7 vxorpd %ymm5, %ymm9, %ymm11 vandpd %ymm5, %ymm9, %ymm5 vandpd %ymm11, %ymm10, %ymm9 vorpd %ymm5, %ymm9, %ymm9 vxorpd %ymm11, %ymm10, %ymm5 vxorpd %ymm4, %ymm8, %ymm10 vandpd %ymm4, %ymm8, %ymm4 vandpd %ymm10, %ymm9, %ymm8 vorpd %ymm4, %ymm8, %ymm8 vxorpd %ymm10, %ymm9, %ymm4 vpsrlw $0x4, %ymm8, %ymm9 vpand %ymm0, %ymm8, %ymm8 vpshufb %ymm8, %ymm2, %ymm8 vpand %ymm0, %ymm9, %ymm9 vpshufb %ymm9, %ymm1, %ymm9 vpsadbw %ymm9, %ymm8, %ymm8 vpaddq %ymm3, %ymm8, %ymm3 addq $0x10, %rax addq $0x200, %rdi # imm = 0x200 cmpq $0xf0, %rax jb 0x1312a vpsllq $0x4, %ymm3, %ymm3 vpsrlw $0x4, %ymm4, %ymm8 vpand %ymm0, %ymm4, %ymm4 vpshufb %ymm4, %ymm2, %ymm4 vpand %ymm0, %ymm8, %ymm8 vpshufb %ymm8, %ymm1, %ymm8 vpsadbw %ymm4, %ymm8, %ymm4 vpsllq $0x3, %ymm4, %ymm4 vpsrlw $0x4, %ymm5, %ymm8 vpand %ymm0, %ymm5, %ymm5 vpshufb %ymm5, %ymm2, %ymm5 vpand %ymm0, %ymm8, %ymm8 vpshufb %ymm8, %ymm1, %ymm8 vpsadbw %ymm5, %ymm8, %ymm5 vpsllq $0x2, %ymm5, %ymm5 vpaddq %ymm5, %ymm4, %ymm4 vpsrlw $0x4, %ymm7, %ymm5 vpand %ymm0, %ymm7, %ymm7 vpshufb %ymm7, %ymm2, %ymm7 vpand %ymm0, %ymm5, %ymm5 vpshufb %ymm5, %ymm1, %ymm5 vpsadbw %ymm5, %ymm7, %ymm5 vpaddq %ymm5, %ymm5, %ymm5 vpaddq %ymm5, %ymm4, %ymm4 vpaddq %ymm3, %ymm4, %ymm3 vpsrlw $0x4, %ymm6, %ymm4 vpand %ymm0, %ymm6, %ymm5 vpshufb %ymm5, %ymm2, %ymm2 vpand %ymm0, %ymm4, %ymm0 vpshufb %ymm0, %ymm1, %ymm0 vpsadbw %ymm0, %ymm2, %ymm0 vpaddq %ymm0, %ymm3, %ymm0 vextracti128 $0x1, %ymm0, %xmm1 vpaddq %xmm1, %xmm0, %xmm0 vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] vpaddq %xmm1, %xmm0, %xmm0 vmovq %xmm0, %rax vzeroupper retq
/lucaderi[P]CRoaring/include/roaring/bitset_util.h
bitset_container_equals
ALLOW_UNALIGNED bool bitset_container_equals(const bitset_container_t *container1, const bitset_container_t *container2) { if((container1->cardinality != BITSET_UNKNOWN_CARDINALITY) && (container2->cardinality != BITSET_UNKNOWN_CARDINALITY)) { if(container1->cardinality != container2->cardinality) { return false; } if (container1->cardinality == INT32_C(0x10000)) { return true; } } #if CROARING_IS_X64 int support = croaring_hardware_support(); #if CROARING_COMPILER_SUPPORTS_AVX512 if( support & ROARING_SUPPORTS_AVX512 ) { return _avx512_bitset_container_equals(container1, container2); } else #endif if( support & ROARING_SUPPORTS_AVX2 ) { return _avx2_bitset_container_equals(container1, container2); } #endif return memcmp(container1->words, container2->words, BITSET_CONTAINER_SIZE_IN_WORDS*sizeof(uint64_t)) == 0; }
pushq %r14 pushq %rbx pushq %rax movq %rsi, %rbx movq %rdi, %r14 movl (%rdi), %ecx cmpl $-0x1, %ecx je 0x19154 movl (%rbx), %eax cmpl $-0x1, %eax je 0x19154 cmpl %eax, %ecx jne 0x19189 movb $0x1, %al cmpl $0x10000, %ecx # imm = 0x10000 je 0x1918b callq 0x20570 testb $0x2, %al jne 0x1917a testb $0x1, %al jne 0x19193 movq 0x8(%r14), %rdi movq 0x8(%rbx), %rsi movl $0x2000, %edx # imm = 0x2000 callq 0x1170 testl %eax, %eax sete %al jmp 0x1918b movq %r14, %rdi movq %rbx, %rsi addq $0x8, %rsp popq %rbx popq %r14 jmp 0x191a2 xorl %eax, %eax addq $0x8, %rsp popq %rbx popq %r14 retq movq %r14, %rdi movq %rbx, %rsi addq $0x8, %rsp popq %rbx popq %r14 jmp 0x191fb
/lucaderi[P]CRoaring/src/containers/bitset.c
avx512_bitset_container_equals
static inline bool _avx512_bitset_container_equals(const bitset_container_t *container1, const bitset_container_t *container2) { const __m512i *ptr1 = (const __m512i*)container1->words; const __m512i *ptr2 = (const __m512i*)container2->words; for (size_t i = 0; i < BITSET_CONTAINER_SIZE_IN_WORDS*sizeof(uint64_t)/64; i++) { __m512i r1 = _mm512_loadu_si512(ptr1+i); __m512i r2 = _mm512_loadu_si512(ptr2+i); __mmask64 mask = _mm512_cmpeq_epi8_mask(r1, r2); if ((uint64_t)mask != UINT64_MAX) { return false; } } return true; }
movq 0x8(%rdi), %rax movq 0x8(%rsi), %rcx vmovdqu64 (%rax), %zmm0 vpcmpneqd (%rcx), %zmm0, %k0 kortestw %k0, %k0 je 0x191c3 xorl %eax, %eax vzeroupper retq movl $0x40, %edx xorl %edi, %edi movq %rdi, %rsi cmpq $0x7f, %rdi je 0x191f0 vmovdqu64 (%rax,%rdx), %zmm0 vpcmpneqd (%rcx,%rdx), %zmm0, %k0 addq $0x40, %rdx leaq 0x1(%rsi), %rdi kortestw %k0, %k0 je 0x191ca cmpq $0x7f, %rsi setae %al vzeroupper retq
/lucaderi[P]CRoaring/src/containers/bitset.c
container_printf_as_uint32_array
static inline const container_t *container_unwrap_shared( const container_t *candidate_shared_container, uint8_t *type) { if (*type == SHARED_CONTAINER_TYPE) { *type = const_CAST_shared(candidate_shared_container)->typecode; assert(*type != SHARED_CONTAINER_TYPE); return const_CAST_shared(candidate_shared_container)->container; } else { return candidate_shared_container; } }
cmpl $0x4, %esi jne 0x1982d movb 0x8(%rdi), %sil movq (%rdi), %rdi cmpb $0x3, %sil je 0x19847 movzbl %sil, %eax movl %edx, %esi cmpl $0x2, %eax jne 0x18dee jmp 0x124f5 movl %edx, %esi jmp 0x1fde2
/lucaderi[P]CRoaring/include/roaring/containers/containers.h
container_internal_validate
bool container_internal_validate(const container_t *container, uint8_t typecode, const char **reason) { if (container == NULL) { *reason = "container is NULL"; return false; } // Not using container_unwrap_shared because it asserts if shared containers // are nested if (typecode == SHARED_CONTAINER_TYPE) { const shared_container_t *shared_container = const_CAST_shared(container); if (croaring_refcount_get(&shared_container->counter) == 0) { *reason = "shared container has zero refcount"; return false; } if (shared_container->typecode == SHARED_CONTAINER_TYPE) { *reason = "shared container is nested"; return false; } if (shared_container->container == NULL) { *reason = "shared container has NULL container"; return false; } container = shared_container->container; typecode = shared_container->typecode; } switch (typecode) { case BITSET_CONTAINER_TYPE: return bitset_container_validate(const_CAST_bitset(container), reason); case ARRAY_CONTAINER_TYPE: return array_container_validate(const_CAST_array(container), reason); case RUN_CONTAINER_TYPE: return run_container_validate(const_CAST_run(container), reason); default: *reason = "invalid typecode"; return false; } }
testq %rdi, %rdi je 0x19870 cmpb $0x4, %sil jne 0x198a6 movl 0xc(%rdi), %eax testl %eax, %eax je 0x19879 movb 0x8(%rdi), %al cmpb $0x4, %al jne 0x19882 leaq 0xcbb7(%rip), %rax # 0x26425 jmp 0x19893 leaq 0xcb79(%rip), %rax # 0x263f0 jmp 0x198d9 leaq 0xcb82(%rip), %rax # 0x26402 jmp 0x19893 movq (%rdi), %r8 movb $0x1, %cl testq %r8, %r8 jne 0x1989d leaq 0xcbad(%rip), %rax # 0x26440 movq %rax, (%rdx) xorl %ecx, %ecx movl %esi, %eax movq %rdi, %r8 movl %eax, %esi movq %r8, %rdi testb %cl, %cl je 0x198dc cmpb $0x3, %sil je 0x198ca movzbl %sil, %eax cmpl $0x2, %eax je 0x198c2 cmpl $0x1, %eax jne 0x198d2 movq %rdx, %rsi jmp 0x18e7c movq %rdx, %rsi jmp 0x1255d movq %rdx, %rsi jmp 0x1fe93 leaq 0xcb8b(%rip), %rax # 0x26464 movq %rax, (%rdx) xorl %eax, %eax retq
/lucaderi[P]CRoaring/src/containers/containers.c
container_clone
container_t *container_clone(const container_t *c, uint8_t typecode) { // We do not want to allow cloning of shared containers. // c = container_unwrap_shared(c, &typecode); switch (typecode) { case BITSET_CONTAINER_TYPE: return bitset_container_clone(const_CAST_bitset(c)); case ARRAY_CONTAINER_TYPE: return array_container_clone(const_CAST_array(c)); case RUN_CONTAINER_TYPE: return run_container_clone(const_CAST_run(c)); case SHARED_CONTAINER_TYPE: // Shared containers are not cloneable. Are you mixing COW and // non-COW bitmaps? return NULL; default: assert(false); roaring_unreachable; return NULL; } }
decl %esi leaq 0xca70(%rip), %rax # 0x263c8 movslq (%rax,%rsi,4), %rcx addq %rax, %rcx jmpq *%rcx jmp 0x12b80 jmp 0x1ebc5 xorl %eax, %eax retq jmp 0x11be6
/lucaderi[P]CRoaring/src/containers/containers.c
container_init_iterator_last
roaring_container_iterator_t container_init_iterator_last(const container_t *c, uint8_t typecode, uint16_t *value) { switch (typecode) { case BITSET_CONTAINER_TYPE: { const bitset_container_t *bc = const_CAST_bitset(c); uint32_t wordindex = BITSET_CONTAINER_SIZE_IN_WORDS - 1; uint64_t word; while ((word = bc->words[wordindex]) == 0) { wordindex--; } // word is non-zero int32_t index = wordindex * 64 + (63 - roaring_leading_zeroes(word)); *value = index; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = index, }; } case ARRAY_CONTAINER_TYPE: { const array_container_t *ac = const_CAST_array(c); int32_t index = ac->cardinality - 1; *value = ac->array[index]; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = index, }; } case RUN_CONTAINER_TYPE: { const run_container_t *rc = const_CAST_run(c); int32_t run_index = rc->n_runs - 1; const rle16_t *last_run = &rc->runs[run_index]; *value = last_run->value + last_run->length; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = run_index, }; } default: assert(false); roaring_unreachable; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{0}; } }
cmpl $0x3, %esi je 0x199f1 cmpl $0x2, %esi jne 0x19a08 movslq (%rdi), %rax movq 0x8(%rdi), %rcx movzwl -0x2(%rcx,%rax,2), %ecx decq %rax jmp 0x19a31 movslq (%rdi), %rsi leaq -0x1(%rsi), %rax movq 0x8(%rdi), %rdi movzwl -0x2(%rdi,%rsi,4), %ecx addw -0x4(%rdi,%rsi,4), %cx jmp 0x19a31 movq 0x8(%rdi), %rax movl $0x3ff, %esi # imm = 0x3FF movl $0x1003f, %ecx # imm = 0x1003F movl %esi, %edi movq (%rax,%rdi,8), %rdi decl %esi addl $-0x40, %ecx testq %rdi, %rdi je 0x19a16 bsrq %rdi, %rax xorl $0x3f, %eax subl %eax, %ecx movl %ecx, %eax movw %cx, (%rdx) retq
/lucaderi[P]CRoaring/src/containers/containers.c
container_iterator_read_into_uint64
bool container_iterator_read_into_uint64(const container_t *c, uint8_t typecode, roaring_container_iterator_t *it, uint64_t high48, uint64_t *buf, uint32_t count, uint32_t *consumed, uint16_t *value_out) { *consumed = 0; if (count == 0) { return false; } switch (typecode) { case BITSET_CONTAINER_TYPE: { const bitset_container_t *bc = const_CAST_bitset(c); uint32_t wordindex = it->index / 64; uint64_t word = bc->words[wordindex] & (UINT64_MAX << (it->index % 64)); do { // Read set bits. while (word != 0 && *consumed < count) { *buf = high48 | (wordindex * 64 + roaring_trailing_zeroes(word)); word = word & (word - 1); buf++; (*consumed)++; } // Skip unset bits. while (word == 0 && wordindex + 1 < BITSET_CONTAINER_SIZE_IN_WORDS) { wordindex++; word = bc->words[wordindex]; } } while (word != 0 && *consumed < count); if (word != 0) { it->index = wordindex * 64 + roaring_trailing_zeroes(word); *value_out = it->index; return true; } return false; } case ARRAY_CONTAINER_TYPE: { const array_container_t *ac = const_CAST_array(c); uint32_t num_values = minimum_uint32(ac->cardinality - it->index, count); for (uint32_t i = 0; i < num_values; i++) { buf[i] = high48 | ac->array[it->index + i]; } *consumed += num_values; it->index += num_values; if (it->index < ac->cardinality) { *value_out = ac->array[it->index]; return true; } return false; } case RUN_CONTAINER_TYPE: { const run_container_t *rc = const_CAST_run(c); do { uint32_t largest_run_value = rc->runs[it->index].value + rc->runs[it->index].length; uint32_t num_values = minimum_uint32( largest_run_value - *value_out + 1, count - *consumed); for (uint32_t i = 0; i < num_values; i++) { buf[i] = high48 | (*value_out + i); } *value_out += num_values; buf += num_values; *consumed += num_values; // We check for `value == 0` because `it->value += num_values` // can overflow when `value == UINT16_MAX`, and `count > // length`. In this case `value` will overflow to 0. if (*value_out > largest_run_value || *value_out == 0) { it->index++; if (it->index < rc->n_runs) { *value_out = rc->runs[it->index].value; } else { return false; } } } while (*consumed < count); return true; } default: assert(false); roaring_unreachable; return 0; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdi, -0x8(%rsp) movq 0x38(%rsp), %rbx movl $0x0, (%rbx) testl %r9d, %r9d je 0x1a10f movq %rcx, %r10 cmpb $0x1, %sil je 0x1a116 movzbl %sil, %eax cmpl $0x2, %eax jne 0x1a1d5 movq -0x8(%rsp), %rax movl (%rax), %esi movl (%rdx), %eax movl %esi, %ecx subl %eax, %ecx cmpl %r9d, %ecx cmovael %r9d, %ecx cmpl %eax, %esi je 0x1a0e4 movq -0x8(%rsp), %rsi movq 0x8(%rsi), %rsi cmpl $0x1, %ecx movl %ecx, %r9d adcl $0x0, %r9d xorl %r11d, %r11d leal (%rax,%r11), %r14d movzwl (%rsi,%r14,2), %r14d orq %r10, %r14 movq %r14, (%r8,%r11,8) incq %r11 cmpq %r11, %r9 jne 0x1a0cc movl %ecx, (%rbx) addl (%rdx), %ecx movl %ecx, (%rdx) movq -0x8(%rsp), %rax cmpl (%rax), %ecx setl %al jge 0x1a329 movq -0x8(%rsp), %rax movq 0x8(%rax), %rax movslq %ecx, %rcx movzwl (%rax,%rcx,2), %eax jmp 0x1a1c6 xorl %eax, %eax jmp 0x1a329 movl (%rdx), %ecx leal 0x3f(%rcx), %eax testl %ecx, %ecx cmovnsl %ecx, %eax movl %eax, %esi sarl $0x6, %esi movq -0x8(%rsp), %rdi movq 0x8(%rdi), %rdi movq (%rdi,%rsi,8), %r14 andl $-0x40, %eax subl %eax, %ecx shrq %cl, %r14 shlq %cl, %r14 testq %r14, %r14 je 0x1a17f movl %esi, %eax shll $0x6, %eax movl (%rbx), %ecx cmpl %r9d, %ecx movl %r9d, %r11d cmoval %ecx, %r11d negl %r11d incl %ecx leal (%r11,%rcx), %ebp cmpl $0x1, %ebp je 0x1a17f bsfq %r14, %r15 orl %eax, %r15d orq %r10, %r15 movq %r15, (%r8) leaq -0x1(%r14), %r15 addq $0x8, %r8 movl %ecx, (%rbx) incl %ecx andq %r15, %r14 jne 0x1a158 testq %r14, %r14 jne 0x1a1aa leal 0x1(%rsi), %eax cmpl $0x3ff, %eax # imm = 0x3FF ja 0x1a1aa movl %eax, %eax leaq (%rdi,%rax,8), %rax incl %esi movq (%rax), %r14 testq %r14, %r14 jne 0x1a1aa addq $0x8, %rax cmpl $0x3ff, %esi # imm = 0x3FF jb 0x1a194 testq %r14, %r14 setne %al je 0x1a329 cmpl %r9d, (%rbx) jb 0x1a13d shll $0x6, %esi bsfq %r14, %rax orl %esi, %eax movl %eax, (%rdx) movq 0x40(%rsp), %rcx movw %ax, (%rcx) movb $0x1, %al jmp 0x1a329 movq -0x8(%rsp), %rax movq 0x8(%rax), %rdi movq 0x40(%rsp), %rax movzwl (%rax), %esi movdqa 0xc0e2(%rip), %xmm0 # 0x262d0 movdqa 0xc0ea(%rip), %xmm1 # 0x262e0 pcmpeqd %xmm2, %xmm2 movdqa 0xc1de(%rip), %xmm3 # 0x263e0 movslq (%rdx), %rax movzwl (%rdi,%rax,4), %r11d movzwl 0x2(%rdi,%rax,4), %ebp addl %r11d, %ebp movzwl %si, %esi movl %ebp, %r11d subl %esi, %r11d incl %r11d movl (%rbx), %eax movl %r9d, %r15d subl %eax, %r15d cmpl %r15d, %r11d cmovbl %r11d, %r15d movl %r15d, %r14d testl %r15d, %r15d je 0x1a2d5 leaq 0x1(%r14), %r12 leaq -0x1(%r14), %r11 movq %r11, %xmm4 pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1] andq $-0x2, %r12 negq %r12 movl $0x1, %r13d pxor %xmm1, %xmm4 movl %esi, %r11d movdqa %xmm0, %xmm5 movdqa %xmm5, %xmm6 pxor %xmm1, %xmm6 movdqa %xmm6, %xmm7 pcmpgtd %xmm4, %xmm7 pcmpeqd %xmm4, %xmm6 pshufd $0xf5, %xmm6, %xmm8 # xmm8 = xmm6[1,1,3,3] pand %xmm7, %xmm8 pshufd $0xf5, %xmm7, %xmm6 # xmm6 = xmm7[1,1,3,3] por %xmm8, %xmm6 movd %xmm6, %ecx notl %ecx testb $0x1, %cl je 0x1a2a2 movl %r11d, %ecx orq %r10, %rcx movq %rcx, -0x8(%r8,%r13,8) pxor %xmm2, %xmm6 pextrw $0x4, %xmm6, %ecx testb $0x1, %cl je 0x1a2bb leal 0x1(%r11), %ecx orq %r10, %rcx movq %rcx, (%r8,%r13,8) paddq %xmm3, %xmm5 leaq (%r12,%r13), %rcx addq $0x2, %rcx addq $0x2, %r13 addl $0x2, %r11d cmpq $0x1, %rcx jne 0x1a263 addl %r15d, %esi movq 0x40(%rsp), %rcx movw %si, (%rcx) addl %r15d, %eax movl %eax, (%rbx) movb $0x1, %al testw %si, %si je 0x1a2f3 movzwl %si, %ecx cmpl %ecx, %ebp jae 0x1a318 movslq (%rdx), %r11 leal 0x1(%r11), %ecx movl %ecx, (%rdx) movq -0x8(%rsp), %r15 cmpl (%r15), %ecx jge 0x1a316 movzwl 0x4(%rdi,%r11,4), %esi movq 0x40(%rsp), %rcx movw %si, (%rcx) jmp 0x1a318 xorl %eax, %eax testb %al, %al je 0x1a329 leaq (%r8,%r14,8), %r8 cmpl %r9d, (%rbx) jb 0x1a202 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/containers.c
array_container_from_bitset
array_container_t *array_container_from_bitset(const bitset_container_t *bits) { array_container_t *result = array_container_create_given_capacity(bits->cardinality); result->cardinality = bits->cardinality; #if CROARING_IS_X64 #if CROARING_COMPILER_SUPPORTS_AVX512 if (croaring_hardware_support() & ROARING_SUPPORTS_AVX512) { bitset_extract_setbits_avx512_uint16( bits->words, BITSET_CONTAINER_SIZE_IN_WORDS, result->array, bits->cardinality, 0); } else #endif { // sse version ends up being slower here // (bitset_extract_setbits_sse_uint16) // because of the sparsity of the data bitset_extract_setbits_uint16( bits->words, BITSET_CONTAINER_SIZE_IN_WORDS, result->array, 0); } #else // If the system is not x64, then we have no accelerated function. bitset_extract_setbits_uint16(bits->words, BITSET_CONTAINER_SIZE_IN_WORDS, result->array, 0); #endif return result; }
pushq %r14 pushq %rbx pushq %rax movq %rdi, %r14 movl (%rdi), %edi callq 0x11b2b movq %rax, %rbx movl (%r14), %eax movl %eax, (%rbx) callq 0x20570 movq 0x8(%r14), %rdi movq 0x8(%rbx), %rdx testb $0x2, %al jne 0x1a53c movl $0x400, %esi # imm = 0x400 xorl %ecx, %ecx callq 0x10173 jmp 0x1a54c movslq (%r14), %rcx movl $0x400, %esi # imm = 0x400 xorl %r8d, %r8d callq 0xfd98 movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r14 retq
/lucaderi[P]CRoaring/src/containers/convert.c
convert_run_optimize
container_t *convert_run_optimize(container_t *c, uint8_t typecode_original, uint8_t *typecode_after) { if (typecode_original == RUN_CONTAINER_TYPE) { container_t *newc = convert_run_to_efficient_container(CAST_run(c), typecode_after); if (newc != c) { container_free(c, typecode_original); } return newc; } else if (typecode_original == ARRAY_CONTAINER_TYPE) { // it might need to be converted to a run container. array_container_t *c_qua_array = CAST_array(c); int32_t n_runs = array_container_number_of_runs(c_qua_array); int32_t size_as_run_container = run_container_serialized_size_in_bytes(n_runs); int32_t card = array_container_cardinality(c_qua_array); int32_t size_as_array_container = array_container_serialized_size_in_bytes(card); if (size_as_run_container >= size_as_array_container) { *typecode_after = ARRAY_CONTAINER_TYPE; return c; } // else convert array to run container run_container_t *answer = run_container_create_given_capacity(n_runs); int prev = -2; int run_start = -1; assert(card > 0); for (int i = 0; i < card; ++i) { uint16_t cur_val = c_qua_array->array[i]; if (cur_val != prev + 1) { // new run starts; flush old one, if any if (run_start != -1) add_run(answer, run_start, prev); run_start = cur_val; } prev = c_qua_array->array[i]; } assert(run_start >= 0); // now prev is the last seen value add_run(answer, run_start, prev); *typecode_after = RUN_CONTAINER_TYPE; array_container_free(c_qua_array); return answer; } else if (typecode_original == BITSET_CONTAINER_TYPE) { // run conversions on bitset // does bitset need conversion to run? bitset_container_t *c_qua_bitset = CAST_bitset(c); int32_t n_runs = bitset_container_number_of_runs(c_qua_bitset); int32_t size_as_run_container = run_container_serialized_size_in_bytes(n_runs); int32_t size_as_bitset_container = bitset_container_serialized_size_in_bytes(); if (size_as_bitset_container <= size_as_run_container) { // no conversion needed. *typecode_after = BITSET_CONTAINER_TYPE; return c; } // bitset to runcontainer (ported from Java RunContainer( // BitmapContainer bc, int nbrRuns)) assert(n_runs > 0); // no empty bitmaps run_container_t *answer = run_container_create_given_capacity(n_runs); int long_ctr = 0; uint64_t cur_word = c_qua_bitset->words[0]; while (true) { while (cur_word == UINT64_C(0) && long_ctr < BITSET_CONTAINER_SIZE_IN_WORDS - 1) cur_word = c_qua_bitset->words[++long_ctr]; if (cur_word == UINT64_C(0)) { bitset_container_free(c_qua_bitset); *typecode_after = RUN_CONTAINER_TYPE; return answer; } int local_run_start = roaring_trailing_zeroes(cur_word); int run_start = local_run_start + 64 * long_ctr; uint64_t cur_word_with_1s = cur_word | (cur_word - 1); int run_end = 0; while (cur_word_with_1s == UINT64_C(0xFFFFFFFFFFFFFFFF) && long_ctr < BITSET_CONTAINER_SIZE_IN_WORDS - 1) cur_word_with_1s = c_qua_bitset->words[++long_ctr]; if (cur_word_with_1s == UINT64_C(0xFFFFFFFFFFFFFFFF)) { run_end = 64 + long_ctr * 64; // exclusive, I guess add_run(answer, run_start, run_end - 1); bitset_container_free(c_qua_bitset); *typecode_after = RUN_CONTAINER_TYPE; return answer; } int local_run_end = roaring_trailing_zeroes(~cur_word_with_1s); run_end = local_run_end + long_ctr * 64; add_run(answer, run_start, run_end - 1); cur_word = cur_word_with_1s & (cur_word_with_1s + 1); } return answer; } else { assert(false); roaring_unreachable; return NULL; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rdx, %r14 movq %rdi, %rbx cmpl $0x2, %esi je 0x1a99e movl %esi, %ebp movq %rbx, %rdi cmpl $0x3, %esi jne 0x1aa23 movq %r14, %rsi callq 0x1a787 movq %rax, %r15 cmpq %rbx, %rax je 0x1ab9b movzbl %bpl, %esi movq %rbx, %rdi callq 0x197ac jmp 0x1ab9b movq %rbx, %rdi callq 0x125f1 leal (,%rax,4), %ecx movl (%rbx), %r12d leal (%r12,%r12), %edx cmpl %edx, %ecx jge 0x1aa3d movl %eax, %edi callq 0x1eb13 movq %rax, %r15 testl %r12d, %r12d jle 0x1ab6d movq 0x8(%rbx), %rcx movl $0xffffffff, %edi # imm = 0xFFFFFFFF movl $0xfffffffe, %eax # imm = 0xFFFFFFFE xorl %esi, %esi movzwl (%rcx,%rsi,2), %edx leal 0x1(%rax), %r8d cmpl %edx, %r8d jne 0x1a9f0 movl %edi, %edx jmp 0x1aa10 cmpl $-0x1, %edi je 0x1aa10 movq 0x8(%r15), %r8 movslq (%r15), %r9 movw %di, (%r8,%r9,4) subl %edi, %eax movw %ax, 0x2(%r8,%r9,4) leal 0x1(%r9), %eax movl %eax, (%r15) movzwl (%rcx,%rsi,2), %eax incq %rsi movl %edx, %edi cmpq %rsi, %r12 jne 0x1a9df jmp 0x1ab77 callq 0x18ed6 leal 0x2(,%rax,4), %ecx cmpl $0x2000, %ecx # imm = 0x2000 jl 0x1aa49 movb $0x1, (%r14) jmp 0x1aa41 movb $0x2, (%r14) movq %rbx, %r15 jmp 0x1ab9b movl %eax, %edi callq 0x1eb13 movq %rax, %r15 movq 0x8(%rbx), %rax movq (%rax), %r12 xorl %r13d, %r13d testq %r12, %r12 sete %cl jne 0x1aa94 cmpl $0x3fe, %r13d # imm = 0x3FE jg 0x1aa94 movq 0x8(%rbx), %rdx movslq %r13d, %rsi leaq 0x1(%rsi), %rax movq 0x8(%rdx,%rsi,8), %r12 testq %r12, %r12 sete %cl jne 0x1aa97 cmpq $0x3fe, %rsi # imm = 0x3FE movq %rax, %rsi jl 0x1aa75 jmp 0x1aa97 movl %r13d, %eax testb %cl, %cl jne 0x1ab5f leaq -0x1(%r12), %rcx orq %r12, %rcx cmpq $-0x1, %rcx sete %bpl movl %eax, %r13d jne 0x1aae1 cmpl $0x3fe, %eax # imm = 0x3FE jg 0x1aae1 movq 0x8(%rbx), %rdx movslq %eax, %rsi leaq 0x1(%rsi), %r13 movq 0x8(%rdx,%rsi,8), %rcx cmpq $-0x1, %rcx sete %bpl jne 0x1aae1 cmpq $0x3fe, %rsi # imm = 0x3FE movq %r13, %rsi jl 0x1aac2 bsfq %r12, %rdx shll $0x6, %eax orl %edx, %eax testb %bpl, %bpl je 0x1ab1e movl %r13d, %ecx shll $0x6, %ecx movq 0x8(%r15), %rdx movslq (%r15), %rsi movw %ax, (%rdx,%rsi,4) subl %eax, %ecx addl $0x3f, %ecx movw %cx, 0x2(%rdx,%rsi,4) leal 0x1(%rsi), %eax movl %eax, (%r15) movq %rbx, %rdi callq 0x12b5d movb $0x3, (%r14) jmp 0x1ab54 movq %rcx, %rdx notq %rdx bsfq %rdx, %rdx movl %r13d, %esi shll $0x6, %esi movq 0x8(%r15), %rdi movslq (%r15), %r8 movw %ax, (%rdi,%r8,4) notl %eax addl %esi, %eax addl %edx, %eax movw %ax, 0x2(%rdi,%r8,4) leal 0x1(%r8), %eax movl %eax, (%r15) leaq 0x1(%rcx), %r12 andq %rcx, %r12 testb %bpl, %bpl je 0x1aa5d jmp 0x1ab9b movq %rbx, %rdi callq 0x12b5d movb $0x3, (%r14) jmp 0x1ab9b movl $0xfffffffe, %eax # imm = 0xFFFFFFFE movl $0xffffffff, %edx # imm = 0xFFFFFFFF movq 0x8(%r15), %rcx movslq (%r15), %rsi movw %dx, (%rcx,%rsi,4) subl %edx, %eax movw %ax, 0x2(%rcx,%rsi,4) leal 0x1(%rsi), %eax movl %eax, (%r15) movb $0x3, (%r14) movq %rbx, %rdi callq 0x11f9c movq %r15, %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/convert.c
array_bitset_container_intersection
void array_bitset_container_intersection(const array_container_t *src_1, const bitset_container_t *src_2, array_container_t *dst) { if (dst->capacity < src_1->cardinality) { array_container_grow(dst, src_1->cardinality, false); } int32_t newcard = 0; // dst could be src_1 const int32_t origcard = src_1->cardinality; for (int i = 0; i < origcard; ++i) { uint16_t key = src_1->array[i]; // this branchless approach is much faster... dst->array[newcard] = key; newcard += bitset_container_contains(src_2, key); /** * we could do it this way instead... * if (bitset_container_contains(src_2, key)) { * dst->array[newcard++] = key; * } * but if the result is unpredictible, the processor generates * many mispredicted branches. * Difference can be huge (from 3 cycles when predictible all the way * to 16 cycles when unpredictible. * See * https://github.com/lemire/Code-used-on-Daniel-Lemire-s-blog/blob/master/extra/bitset/c/arraybitsetintersection.c */ } dst->cardinality = newcard; }
pushq %r15 pushq %r14 pushq %rbx movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rdi), %esi cmpl %esi, 0x4(%rdx) jge 0x1af4b movq %rbx, %rdi xorl %edx, %edx callq 0x11fbf movslq (%r15), %rax testq %rax, %rax jle 0x1af8d movq 0x8(%r15), %rcx movq 0x8(%rbx), %rdx movq 0x8(%r14), %rdi xorl %r8d, %r8d xorl %esi, %esi movzwl (%rcx,%r8,2), %r9d movl %esi, %r10d movw %r9w, (%rdx,%r10,2) movl %r9d, %r10d shrl $0x6, %r10d movq (%rdi,%r10,8), %r10 btq %r9, %r10 adcl $0x0, %esi incq %r8 cmpq %r8, %rax jne 0x1af64 jmp 0x1af8f xorl %esi, %esi movl %esi, (%rbx) popq %rbx popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/src/containers/mixed_intersection.c
array_bitset_container_intersect
bool array_bitset_container_intersect(const array_container_t *src_1, const bitset_container_t *src_2) { const int32_t origcard = src_1->cardinality; for (int i = 0; i < origcard; ++i) { uint16_t key = src_1->array[i]; if (bitset_container_contains(src_2, key)) return true; } return false; }
movslq (%rdi), %rcx testq %rcx, %rcx setg %al jle 0x1aff5 movq 0x8(%rdi), %rdx movq 0x8(%rsi), %rsi movzwl (%rdx), %edi movl %edi, %r8d shrl $0x6, %r8d movq (%rsi,%r8,8), %r8 btq %rdi, %r8 jae 0x1aff6 retq movl $0x1, %edi movq %rdi, %rax cmpq %rdi, %rcx je 0x1b01c movzwl (%rdx,%rax,2), %r8d movl %r8d, %edi shrl $0x6, %edi movq (%rsi,%rdi,8), %r9 leaq 0x1(%rax), %rdi btq %r8, %r9 jae 0x1affb cmpq %rcx, %rax setb %al retq
/lucaderi[P]CRoaring/src/containers/mixed_intersection.c
run_bitset_container_intersection_cardinality
int run_bitset_container_intersection_cardinality( const run_container_t *src_1, const bitset_container_t *src_2) { if (run_container_is_full(src_1)) { return bitset_container_cardinality(src_2); } int answer = 0; for (int32_t rlepos = 0; rlepos < src_1->n_runs; ++rlepos) { rle16_t rle = src_1->runs[rlepos]; answer += bitset_lenrange_cardinality(src_2->words, rle.value, rle.length); } return answer; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq 0x8(%rdi), %r8 movl (%rdi), %eax movq %rax, -0x8(%rsp) cmpq $0x1, %rax jne 0x1b755 cmpw $0x0, (%r8) jne 0x1b755 cmpw $-0x1, 0x2(%r8) je 0x1b8de cmpl $0x0, -0x8(%rsp) jle 0x1b8da movq 0x8(%rsi), %r9 xorl %r10d, %r10d movabsq $0x5555555555555555, %r11 # imm = 0x5555555555555555 movabsq $0x3333333333333333, %rbx # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %r14 # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %r15 # imm = 0x101010101010101 xorl %eax, %eax movzwl (%r8,%r10,4), %esi movzwl 0x2(%r8,%r10,4), %edi leal (%rdi,%rsi), %edx movl %esi, %r13d shrl $0x6, %r13d movl %edx, %ebp shrl $0x6, %ebp movq (%r9,%r13,8), %r12 cmpl %ebp, %r13d jne 0x1b801 movb $0x3f, %cl subb %dil, %cl movq $-0x1, %rdx shrq %cl, %rdx movl %esi, %ecx shlq %cl, %rdx andq %rdx, %r12 movq %r12, %rcx shrq %rcx andq %r11, %rcx subq %rcx, %r12 movq %r12, %rcx andq %rbx, %rcx shrq $0x2, %r12 andq %rbx, %r12 addq %rcx, %r12 movq %r12, %rcx shrq $0x4, %rcx addq %r12, %rcx andq %r14, %rcx imulq %r15, %rcx shrq $0x38, %rcx jmp 0x1b8c8 movl %esi, %ecx shrq %cl, %r12 shlq %cl, %r12 movq %r12, %rcx shrq %rcx andq %r11, %rcx subq %rcx, %r12 movq %r12, %rcx andq %rbx, %rcx shrq $0x2, %r12 andq %rbx, %r12 addq %rcx, %r12 movq %r12, %rsi shrq $0x4, %rsi addq %r12, %rsi andq %r14, %rsi imulq %r15, %rsi shrq $0x38, %rsi incl %r13d movl %ebp, %ecx cmpl %ebp, %r13d jae 0x1b887 movl %r13d, %r12d movq (%r9,%r12,8), %rdi movq %rdi, %r13 shrq %r13 andq %r11, %r13 subq %r13, %rdi movq %rdi, %r13 andq %rbx, %r13 shrq $0x2, %rdi andq %rbx, %rdi addq %r13, %rdi movq %rdi, %r13 shrq $0x4, %r13 addq %rdi, %r13 andq %r14, %r13 imulq %r15, %r13 shrq $0x38, %r13 addl %r13d, %esi incq %r12 cmpq %r12, %rcx jne 0x1b847 movq (%r9,%rcx,8), %rdi notb %dl movl %edx, %ecx shlq %cl, %rdi shrq %cl, %rdi movq %rdi, %rcx shrq %rcx andq %r11, %rcx subq %rcx, %rdi movq %rdi, %rcx andq %rbx, %rcx shrq $0x2, %rdi andq %rbx, %rdi addq %rcx, %rdi movq %rdi, %rcx shrq $0x4, %rcx addq %rdi, %rcx andq %r14, %rcx imulq %r15, %rcx shrq $0x38, %rcx addl %esi, %ecx addl %ecx, %eax incq %r10 cmpq -0x8(%rsp), %r10 jne 0x1b791 jmp 0x1b8e0 xorl %eax, %eax jmp 0x1b8e0 movl (%rsi), %eax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/mixed_intersection.c
bitset_bitset_container_intersection
bool bitset_bitset_container_intersection(const bitset_container_t *src_1, const bitset_container_t *src_2, container_t **dst) { const int newCardinality = bitset_container_and_justcard(src_1, src_2); if (newCardinality > DEFAULT_MAX_SIZE) { *dst = bitset_container_create(); if (*dst != NULL) { bitset_container_and_nocard(src_1, src_2, CAST_bitset(*dst)); CAST_bitset(*dst)->cardinality = newCardinality; } return true; // it is a bitset } *dst = array_container_create_given_capacity(newCardinality); if (*dst != NULL) { CAST_array(*dst)->cardinality = newCardinality; bitset_extract_intersection_setbits_uint16( src_1->words, src_2->words, BITSET_CONTAINER_SIZE_IN_WORDS, CAST_array(*dst)->array, 0); } return false; // not a bitset }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 callq 0x15a6e movl %eax, %ebx cmpl $0x1001, %eax # imm = 0x1001 jl 0x1bb89 callq 0x12831 movq %rax, (%r12) testq %rax, %rax je 0x1bbb4 movq %r15, %rdi movq %r14, %rsi movq %rax, %rdx callq 0x15814 movq (%r12), %rax movl %ebx, (%rax) jmp 0x1bbb4 movl %ebx, %edi callq 0x11b2b movq %rax, (%r12) testq %rax, %rax je 0x1bbb4 movl %ebx, (%rax) movq 0x8(%r15), %rdi movq 0x8(%r14), %rsi movq 0x8(%rax), %rcx movl $0x400, %edx # imm = 0x400 xorl %r8d, %r8d callq 0xfffd cmpl $0x1001, %ebx # imm = 0x1001 setge %al addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/src/containers/mixed_intersection.c
array_run_container_union
void array_run_container_union(const array_container_t *src_1, const run_container_t *src_2, run_container_t *dst) { if (run_container_is_full(src_2)) { run_container_copy(src_2, dst); return; } // TODO: see whether the "2*" is spurious run_container_grow(dst, 2 * (src_1->cardinality + src_2->n_runs), false); int32_t rlepos = 0; int32_t arraypos = 0; rle16_t previousrle; if (src_2->runs[rlepos].value <= src_1->array[arraypos]) { previousrle = run_container_append_first(dst, src_2->runs[rlepos]); rlepos++; } else { previousrle = run_container_append_value_first(dst, src_1->array[arraypos]); arraypos++; } while ((rlepos < src_2->n_runs) && (arraypos < src_1->cardinality)) { if (src_2->runs[rlepos].value <= src_1->array[arraypos]) { run_container_append(dst, src_2->runs[rlepos], &previousrle); rlepos++; } else { run_container_append_value(dst, src_1->array[arraypos], &previousrle); arraypos++; } } if (arraypos < src_1->cardinality) { while (arraypos < src_1->cardinality) { run_container_append_value(dst, src_1->array[arraypos], &previousrle); arraypos++; } } else { while (rlepos < src_2->n_runs) { run_container_append(dst, src_2->runs[rlepos], &previousrle); rlepos++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rsi), %esi cmpl $0x1, %esi jne 0x1bf05 movq 0x8(%r14), %rax cmpw $0x0, (%rax) jne 0x1bf05 cmpw $-0x1, 0x2(%rax) je 0x1c130 addl (%r15), %esi addl %esi, %esi xorl %ebp, %ebp movq %rbx, %rdi xorl %edx, %edx callq 0x1ee05 movq 0x8(%r14), %rax movq 0x8(%r15), %rcx movzwl (%rcx), %ecx cmpw %cx, (%rax) jbe 0x1bf49 movzwl %cx, %eax movq 0x8(%rbx), %rcx movslq (%rbx), %rdx movw %ax, (%rcx,%rdx,4) movw $0x0, 0x2(%rcx,%rdx,4) leal 0x1(%rdx), %ecx movl %ecx, (%rbx) xorl %edx, %edx movl $0x1, %ebp jmp 0x1bf5c movl (%rax), %eax movq 0x8(%rbx), %rcx movslq (%rbx), %rdx movl %eax, (%rcx,%rdx,4) incl (%rbx) movl $0x1, %edx movl %eax, %ecx shrl $0x10, %ecx cmpl (%r14), %edx jge 0x1c03e cmpl (%r15), %ebp jge 0x1c03e movq 0x8(%r14), %r8 movslq %edx, %r10 movq 0x8(%r15), %rsi movslq %ebp, %rdi movzwl (%rsi,%rdi,2), %edi movzwl %ax, %esi movzwl %cx, %r9d cmpw %di, (%r8,%r10,4) jbe 0x1bfb3 movzwl %di, %r8d addl %esi, %r9d incl %r9d cmpl %r8d, %r9d jae 0x1bfed movq 0x8(%rbx), %rax movslq (%rbx), %rcx movl %r8d, (%rax,%rcx,4) incl (%rbx) xorl %ecx, %ecx jmp 0x1c009 movl (%r8,%r10,4), %r10d movl %r10d, %r8d shrl $0x10, %r8d movzwl %r10w, %edi leal (%rsi,%r9), %r11d incl %r11d cmpl %r11d, %edi jbe 0x1c012 movq 0x8(%rbx), %rax movslq (%rbx), %rcx movw %r10w, (%rax,%rcx,4) movw %r8w, 0x2(%rax,%rcx,4) leal 0x1(%rcx), %eax movl %eax, (%rbx) movl %r8d, %ecx movl %r10d, %eax jmp 0x1c037 jne 0x1c007 incl %ecx movq 0x8(%rbx), %rdi movslq (%rbx), %r8 movl %ecx, %r9d shll $0x10, %r9d orl %esi, %r9d movl %r9d, -0x4(%rdi,%r8,4) movl %eax, %edi incl %ebp movl %edi, %eax jmp 0x1bf61 addl %esi, %r9d addl %r8d, %edi cmpl %r9d, %edi jb 0x1c037 subl %esi, %edi movq 0x8(%rbx), %rcx movslq (%rbx), %r8 movl %edi, %r9d shll $0x10, %r9d orl %esi, %r9d movl %r9d, -0x4(%rcx,%r8,4) movl %edi, %ecx incl %edx jmp 0x1bf61 cmpl (%r15), %ebp jge 0x1c0a9 cmpl (%r15), %ebp jge 0x1c125 movslq %ebp, %rdx movq 0x8(%r15), %rsi movzwl (%rsi,%rdx,2), %esi movzwl %ax, %edi movzwl %cx, %r9d movzwl %si, %r8d addl %edi, %r9d incl %r9d cmpl %r8d, %r9d jae 0x1c07e movq 0x8(%rbx), %rax movslq (%rbx), %rcx movl %r8d, (%rax,%rcx,4) incl (%rbx) xorl %ecx, %ecx jmp 0x1c09a jne 0x1c098 incl %ecx movq 0x8(%rbx), %rsi movslq (%rbx), %r8 movl %ecx, %r9d shll $0x10, %r9d orl %edi, %r9d movl %r9d, -0x4(%rsi,%r8,4) movl %eax, %esi incq %rdx movslq (%r15), %rdi movl %esi, %eax cmpq %rdi, %rdx jl 0x1c04f jmp 0x1c125 cmpl (%r14), %edx jge 0x1c125 movslq %edx, %rdx movq 0x8(%r14), %rsi movl (%rsi,%rdx,4), %r9d movl %r9d, %edi shrl $0x10, %edi movzwl %ax, %esi movzwl %cx, %r10d movzwl %r9w, %r8d leal (%rsi,%r10), %r11d incl %r11d cmpl %r11d, %r8d jbe 0x1c0f3 movq 0x8(%rbx), %rax movslq (%rbx), %rcx movw %r9w, (%rax,%rcx,4) movw %di, 0x2(%rax,%rcx,4) leal 0x1(%rcx), %eax movl %eax, (%rbx) movl %edi, %ecx movl %r9d, %eax jmp 0x1c11a addl %esi, %r10d addl %edi, %r8d cmpl %r10d, %r8d jb 0x1c11a subl %esi, %r8d movq 0x8(%rbx), %rcx movslq (%rbx), %rdi movl %r8d, %r9d shll $0x10, %r9d orl %esi, %r9d movl %r9d, -0x4(%rcx,%rdi,4) movl %r8d, %ecx incq %rdx movslq (%r14), %rsi cmpq %rsi, %rdx jl 0x1c0b1 addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq movq %r14, %rdi movq %rbx, %rsi addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp jmp 0x1ee8f
/lucaderi[P]CRoaring/src/containers/mixed_union.c
array_run_container_inplace_union
void array_run_container_inplace_union(const array_container_t *src_1, run_container_t *src_2) { if (run_container_is_full(src_2)) { return; } const int32_t maxoutput = src_1->cardinality + src_2->n_runs; const int32_t neededcapacity = maxoutput + src_2->n_runs; if (src_2->capacity < neededcapacity) run_container_grow(src_2, neededcapacity, true); memmove(src_2->runs + maxoutput, src_2->runs, src_2->n_runs * sizeof(rle16_t)); rle16_t *inputsrc2 = src_2->runs + maxoutput; int32_t rlepos = 0; int32_t arraypos = 0; int src2nruns = src_2->n_runs; src_2->n_runs = 0; rle16_t previousrle; if (inputsrc2[rlepos].value <= src_1->array[arraypos]) { previousrle = run_container_append_first(src_2, inputsrc2[rlepos]); rlepos++; } else { previousrle = run_container_append_value_first(src_2, src_1->array[arraypos]); arraypos++; } while ((rlepos < src2nruns) && (arraypos < src_1->cardinality)) { if (inputsrc2[rlepos].value <= src_1->array[arraypos]) { run_container_append(src_2, inputsrc2[rlepos], &previousrle); rlepos++; } else { run_container_append_value(src_2, src_1->array[arraypos], &previousrle); arraypos++; } } if (arraypos < src_1->cardinality) { while (arraypos < src_1->cardinality) { run_container_append_value(src_2, src_1->array[arraypos], &previousrle); arraypos++; } } else { while (rlepos < src2nruns) { run_container_append(src_2, inputsrc2[rlepos], &previousrle); rlepos++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movq %rsi, %rbx movq %rdi, %r14 movslq (%rsi), %rax cmpq $0x1, %rax jne 0x1c171 movq 0x8(%rbx), %rcx cmpw $0x0, (%rcx) jne 0x1c171 cmpw $-0x1, 0x2(%rcx) je 0x1c3a9 movslq (%r14), %r15 addq %rax, %r15 leal (%r15,%rax), %esi cmpl %esi, 0x4(%rbx) jge 0x1c18d movq %rbx, %rdi movl $0x1, %edx callq 0x1ee05 movq 0x8(%rbx), %rsi leaq (%rsi,%r15,4), %rdi movslq (%rbx), %rdx shlq $0x2, %rdx callq 0x1190 movq 0x8(%rbx), %rsi leaq (%rsi,%r15,4), %rax movslq (%rbx), %rcx movl $0x0, (%rbx) movq 0x8(%r14), %rdx movzwl (%rdx), %edx cmpw %dx, (%rax) jbe 0x1c1da movzwl %dx, %edx movw %dx, (%rsi) movw $0x0, 0x2(%rsi) movl $0x1, (%rbx) xorl %edi, %edi movl $0x1, %r8d jmp 0x1c1e8 movl (%rax), %edx movl %edx, (%rsi) incl (%rbx) movl $0x1, %edi xorl %r8d, %r8d movl %edx, %esi shrl $0x10, %esi cmpl %ecx, %edi jge 0x1c2c7 cmpl (%r14), %r8d jge 0x1c2c7 movslq %edi, %r11 movq 0x8(%r14), %r9 movslq %r8d, %r10 movzwl (%r9,%r10,2), %r10d movzwl %dx, %r9d movzwl %si, %r15d cmpw %r10w, (%rax,%r11,4) jbe 0x1c23c movzwl %r10w, %r11d leal (%r15,%r9), %ebp incl %ebp cmpl %r11d, %ebp jae 0x1c274 movq 0x8(%rbx), %rdx movslq (%rbx), %rsi movl %r11d, (%rdx,%rsi,4) incl (%rbx) xorl %esi, %esi jmp 0x1c28f movl (%rax,%r11,4), %ebp movl %ebp, %r11d shrl $0x10, %r11d movzwl %bp, %r10d leal (%r9,%r15), %r12d incl %r12d cmpl %r12d, %r10d jbe 0x1c29a movq 0x8(%rbx), %rdx movslq (%rbx), %rsi movw %bp, (%rdx,%rsi,4) movw %r11w, 0x2(%rdx,%rsi,4) leal 0x1(%rsi), %edx movl %edx, (%rbx) movl %r11d, %esi movl %ebp, %edx jmp 0x1c2c0 jne 0x1c28c incl %esi movq 0x8(%rbx), %r10 movslq (%rbx), %r11 movl %esi, %ebp shll $0x10, %ebp orl %r9d, %ebp movl %ebp, -0x4(%r10,%r11,4) movl %edx, %r10d incl %r8d movl %r10d, %edx jmp 0x1c1ed addl %r9d, %r15d addl %r11d, %r10d cmpl %r15d, %r10d jb 0x1c2c0 subl %r9d, %r10d movq 0x8(%rbx), %rsi movslq (%rbx), %r11 movl %r10d, %ebp shll $0x10, %ebp orl %r9d, %ebp movl %ebp, -0x4(%rsi,%r11,4) movl %r10d, %esi incl %edi jmp 0x1c1ed cmpl (%r14), %r8d jge 0x1c332 cmpl (%r14), %r8d jge 0x1c3a9 movslq %r8d, %rax movq 0x8(%r14), %rcx movzwl (%rcx,%rax,2), %ecx movzwl %dx, %edi movzwl %si, %r9d movzwl %cx, %r8d addl %edi, %r9d incl %r9d cmpl %r8d, %r9d jae 0x1c307 movq 0x8(%rbx), %rdx movslq (%rbx), %rsi movl %r8d, (%rdx,%rsi,4) incl (%rbx) xorl %esi, %esi jmp 0x1c323 jne 0x1c321 incl %esi movq 0x8(%rbx), %rcx movslq (%rbx), %r8 movl %esi, %r9d shll $0x10, %r9d orl %edi, %r9d movl %r9d, -0x4(%rcx,%r8,4) movl %edx, %ecx incq %rax movslq (%r14), %rdi movl %ecx, %edx cmpq %rdi, %rax jl 0x1c2d8 jmp 0x1c3a9 cmpl %ecx, %edi jge 0x1c3a9 movslq %edi, %rdi movl (%rax,%rdi,4), %r11d movl %r11d, %r9d shrl $0x10, %r9d movzwl %dx, %r8d movzwl %si, %r14d movzwl %r11w, %r10d leal (%r8,%r14), %ebp incl %ebp cmpl %ebp, %r10d jbe 0x1c37a movq 0x8(%rbx), %rdx movslq (%rbx), %rsi movw %r11w, (%rdx,%rsi,4) movw %r9w, 0x2(%rdx,%rsi,4) leal 0x1(%rsi), %edx movl %edx, (%rbx) movl %r9d, %esi movl %r11d, %edx jmp 0x1c3a1 addl %r8d, %r14d addl %r9d, %r10d cmpl %r14d, %r10d jb 0x1c3a1 subl %r8d, %r10d movq 0x8(%rbx), %rsi movslq (%rbx), %r9 movl %r10d, %r11d shll $0x10, %r11d orl %r8d, %r11d movl %r11d, -0x4(%rsi,%r9,4) movl %r10d, %esi incq %rdi cmpq %rdi, %rcx jne 0x1c339 popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/mixed_union.c
array_container_equal_bitset
bool array_container_equal_bitset(const array_container_t* container1, const bitset_container_t* container2) { if (container2->cardinality != BITSET_UNKNOWN_CARDINALITY) { if (container2->cardinality != container1->cardinality) { return false; } } int32_t pos = 0; for (int32_t i = 0; i < BITSET_CONTAINER_SIZE_IN_WORDS; ++i) { uint64_t w = container2->words[i]; while (w != 0) { uint64_t t = w & (~w + 1); uint16_t r = i * 64 + roaring_trailing_zeroes(w); if (pos >= container1->cardinality) { return false; } if (container1->array[pos] != r) { return false; } ++pos; w ^= t; } } return (pos == container1->cardinality); }
movl (%rsi), %eax cmpl $-0x1, %eax je 0x1c6cb cmpl (%rdi), %eax jne 0x1c74c pushq %rbx movq 0x8(%rsi), %rax xorl %edx, %edx xorl %esi, %esi xorl %ecx, %ecx movq (%rax,%rdx,8), %r10 movl %edx, %r8d shll $0x6, %r8d movq %r10, %r9 testq %r10, %r10 je 0x1c71b cmpl (%rdi), %ecx jge 0x1c710 bsfq %r9, %r10 orl %r8d, %r10d movq 0x8(%rdi), %r11 movslq %ecx, %rbx cmpw %r10w, (%r11,%rbx,2) jne 0x1c710 incl %ecx leaq -0x1(%r9), %r10 andq %r9, %r10 movb $0x1, %r11b jmp 0x1c716 xorl %r11d, %r11d movq %r9, %r10 testb %r11b, %r11b jne 0x1c6e1 testq %r9, %r9 jne 0x1c73b cmpq $0x3ff, %rdx # imm = 0x3FF leaq 0x1(%rdx), %r8 setae %sil movq %r8, %rdx cmpq $0x400, %r8 # imm = 0x400 jne 0x1c6d6 testb $0x1, %sil je 0x1c748 cmpl (%rdi), %ecx sete %al jmp 0x1c74a xorl %eax, %eax popq %rbx retq xorl %eax, %eax retq
/lucaderi[P]CRoaring/src/containers/mixed_equal.c
run_container_equals_array
bool run_container_equals_array(const run_container_t* container1, const array_container_t* container2) { if (run_container_cardinality(container1) != container2->cardinality) return false; int32_t pos = 0; for (int i = 0; i < container1->n_runs; ++i) { const uint32_t run_start = container1->runs[i].value; const uint32_t le = container1->runs[i].length; if (container2->array[pos] != run_start) { return false; } if (container2->array[pos + le] != run_start + le) { return false; } pos += le + 1; } return true; }
pushq %r14 pushq %rbx pushq %rax movq %rsi, %rbx movq %rdi, %r14 callq 0x1fa90 cmpl (%rbx), %eax jne 0x1c7be movslq (%r14), %rcx testq %rcx, %rcx setle %al jle 0x1c7c0 movq 0x8(%r14), %rax movq 0x8(%rbx), %rdx xorl %esi, %esi xorl %edi, %edi movzwl (%rax,%rsi,4), %r8d movslq %edi, %r9 cmpw %r8w, (%rdx,%r9,2) jne 0x1c7a9 movzwl 0x2(%rax,%rsi,4), %r9d leal (%rdi,%r9), %r10d movzwl (%rdx,%r10,2), %r10d addl %r9d, %r8d cmpl %r10d, %r8d jne 0x1c7a9 addl %r9d, %edi incl %edi movb $0x1, %r8b jmp 0x1c7ac xorl %r8d, %r8d testb %r8b, %r8b je 0x1c7be incq %rsi cmpq %rsi, %rcx jne 0x1c779 setbe %al jmp 0x1c7c0 xorl %eax, %eax addq $0x8, %rsp popq %rbx popq %r14 retq
/lucaderi[P]CRoaring/src/containers/mixed_equal.c
array_container_is_subset_bitset
bool array_container_is_subset_bitset(const array_container_t* container1, const bitset_container_t* container2) { if (container2->cardinality != BITSET_UNKNOWN_CARDINALITY) { if (container2->cardinality < container1->cardinality) { return false; } } for (int i = 0; i < container1->cardinality; ++i) { if (!bitset_container_contains(container2, container1->array[i])) { return false; } } return true; }
movl (%rsi), %eax cmpl $-0x1, %eax je 0x1c8ea cmpl (%rdi), %eax jge 0x1c8ea xorl %eax, %eax retq movslq (%rdi), %rcx testq %rcx, %rcx setle %al jle 0x1c8e9 movq 0x8(%rdi), %rdx movq 0x8(%rsi), %rsi movzwl (%rdx), %edi movl %edi, %r8d shrl $0x6, %r8d movq (%rsi,%r8,8), %r8 btq %rdi, %r8 jae 0x1c8e9 movl $0x1, %edi movq %rdi, %rax cmpq %rdi, %rcx je 0x1c937 movzwl (%rdx,%rax,2), %r8d movl %r8d, %edi shrl $0x6, %edi movq (%rsi,%rdi,8), %r9 leaq 0x1(%rax), %rdi btq %r8, %r9 jb 0x1c916 cmpq %rcx, %rax setae %al retq
/lucaderi[P]CRoaring/src/containers/mixed_subset.c
run_container_is_subset_array
bool run_container_is_subset_array(const run_container_t* container1, const array_container_t* container2) { if (run_container_cardinality(container1) > container2->cardinality) return false; int32_t start_pos = -1, stop_pos = -1; for (int i = 0; i < container1->n_runs; ++i) { int32_t start = container1->runs[i].value; int32_t stop = start + container1->runs[i].length; start_pos = advanceUntil(container2->array, stop_pos, container2->cardinality, start); stop_pos = advanceUntil(container2->array, stop_pos, container2->cardinality, stop); if (stop_pos == container2->cardinality) { return false; } else if (stop_pos - start_pos != stop - start || container2->array[start_pos] != start || container2->array[stop_pos] != stop) { return false; } } return true; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rsi, %rbx movq %rdi, %r14 callq 0x1fa90 movl (%rbx), %ecx cmpl %ecx, %eax jle 0x1c961 xorl %eax, %eax jmp 0x1cb02 movslq (%r14), %rdx testq %rdx, %rdx setle %al jle 0x1cb02 movq 0x8(%r14), %rsi movq 0x8(%rbx), %rdi leal -0x1(%rcx), %r8d movl $0xffffffff, %ebx # imm = 0xFFFFFFFF xorl %r9d, %r9d movzwl (%rsi,%r9,4), %r10d movl %r10d, %r11d leal 0x1(%rbx), %eax movslq %eax, %r15 movl %eax, %ebp cmpl %ecx, %eax jge 0x1c9ec movl %eax, %ebp cmpw %r11w, (%rdi,%r15,2) jae 0x1c9ec leal 0x2(%rbx), %ebp xorl %r14d, %r14d movl %r8d, %r12d cmpl %ecx, %ebp jge 0x1c9d5 movl $0x1, %r14d movslq %ebp, %r12 cmpw %r11w, (%rdi,%r12,2) jae 0x1c9cf movl %r14d, %r12d addl %r14d, %r14d leal (%rax,%r12,2), %ebp cmpl %ecx, %ebp jl 0x1c9b4 movl %r8d, %ebp sarl %r14d movl %ebp, %r12d movslq %r12d, %r13 cmpw %r11w, (%rdi,%r13,2) jne 0x1c9e4 movl %r12d, %ebp jmp 0x1c9ec movl %ecx, %ebp jae 0x1cac0 movzwl 0x2(%rsi,%r9,4), %r14d addl %r14d, %r10d cmpl %ecx, %eax jge 0x1ca42 cmpw %r10w, (%rdi,%r15,2) jae 0x1ca42 addl $0x2, %ebx xorl %r15d, %r15d movl %r8d, %r12d cmpl %ecx, %ebx jge 0x1ca33 movl $0x1, %r15d movslq %ebx, %r12 cmpw %r10w, (%rdi,%r12,2) jae 0x1ca2d movl %r15d, %ebx addl %r15d, %r15d leal (%rax,%rbx,2), %ebx cmpl %ecx, %ebx jl 0x1ca13 movl %r8d, %ebx sarl %r15d movl %ebx, %r12d movslq %r12d, %rbx cmpw %r10w, (%rdi,%rbx,2) jne 0x1ca89 movl %r12d, %ebx jmp 0x1ca44 movl %eax, %ebx xorl %eax, %eax cmpl %ecx, %ebx je 0x1cb02 movl %ebx, %r15d subl %ebp, %r15d cmpl %r14d, %r15d jne 0x1cb02 movslq %ebp, %rax cmpw %r11w, (%rdi,%rax,2) jne 0x1c95a movslq %ebx, %rax movzwl (%rdi,%rax,2), %eax cmpl %eax, %r10d jne 0x1c95a incq %r9 cmpq %rdx, %r9 jne 0x1c984 jmp 0x1caff movl %ecx, %ebx jb 0x1ca44 leal (%r15,%rax), %ebx incl %ebx cmpl %r12d, %ebx je 0x1ca3d addl %eax, %r15d leal (%r15,%r12), %ebx sarl %ebx movslq %ebx, %rax cmpw %r10w, (%rdi,%rax,2) je 0x1ca44 jae 0x1cab3 movl %ebx, %r15d movl %r12d, %ebx leal 0x1(%r15), %eax movl %ebx, %r12d cmpl %ebx, %eax jne 0x1ca9b jmp 0x1ca44 leal (%r14,%rax), %ebp incl %ebp cmpl %r12d, %ebp je 0x1c9df addl %eax, %r14d leal (%r14,%r12), %ebp sarl %ebp movslq %ebp, %r13 cmpw %r11w, (%rdi,%r13,2) je 0x1c9ec jae 0x1caee movl %ebp, %r14d movl %r12d, %ebp leal 0x1(%r14), %r13d movl %ebp, %r12d cmpl %ebp, %r13d jne 0x1cad2 jmp 0x1c9ec setae %al addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/lucaderi[P]CRoaring/src/containers/mixed_subset.c
array_array_container_lazy_xor
bool array_array_container_lazy_xor(const array_container_t *src_1, const array_container_t *src_2, container_t **dst) { int totalCardinality = src_1->cardinality + src_2->cardinality; // // We assume that operations involving bitset containers will be faster than // operations involving solely array containers, except maybe when array // containers are small. Indeed, for example, it is cheap to compute the // exclusive union between an array and a bitset container, generally more // so than between a large array and another array. So it is advantageous to // favour bitset containers during the computation. Of course, if we convert // array containers eagerly to bitset containers, we may later need to // revert the bitset containers to array containerr to satisfy the Roaring // format requirements, but such one-time conversions at the end may not be // overly expensive. We arrived to this design based on extensive // benchmarking on unions. For XOR/exclusive union, we simply followed the // heuristic used by the unions (see mixed_union.c). Further tuning is // possible. // if (totalCardinality <= ARRAY_LAZY_LOWERBOUND) { *dst = array_container_create_given_capacity(totalCardinality); if (*dst != NULL) array_container_xor(src_1, src_2, CAST_array(*dst)); return false; // not a bitset } *dst = bitset_container_from_array(src_1); bool returnval = true; // expect a bitset (maybe, for XOR??) if (*dst != NULL) { bitset_container_t *ourbitset = CAST_bitset(*dst); bitset_flip_list(ourbitset->words, src_2->array, src_2->cardinality); ourbitset->cardinality = BITSET_UNKNOWN_CARDINALITY; } return returnval; }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl (%rsi), %ebx addl (%rdi), %ebx cmpl $0x400, %ebx # imm = 0x400 jle 0x1daad movq %r15, %rdi callq 0x1a334 movq %rax, (%r12) testq %rax, %rax je 0x1dacb movq %rax, %r15 movq 0x8(%rax), %rdi movq 0x8(%r14), %rsi movslq (%r14), %rdx callq 0x10406 movl $0xffffffff, (%r15) # imm = 0xFFFFFFFF jmp 0x1dacb movl %ebx, %edi callq 0x11b2b movq %rax, (%r12) testq %rax, %rax je 0x1dacb movq %r15, %rdi movq %r14, %rsi movq %rax, %rdx callq 0x121a8 cmpl $0x401, %ebx # imm = 0x401 setge %al addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/src/containers/mixed_xor.c
array_bitset_container_andnot
void array_bitset_container_andnot(const array_container_t *src_1, const bitset_container_t *src_2, array_container_t *dst) { // follows Java implementation as of June 2016 if (dst->capacity < src_1->cardinality) { array_container_grow(dst, src_1->cardinality, false); } int32_t newcard = 0; const int32_t origcard = src_1->cardinality; for (int i = 0; i < origcard; ++i) { uint16_t key = src_1->array[i]; dst->array[newcard] = key; newcard += 1 - bitset_container_contains(src_2, key); } dst->cardinality = newcard; }
pushq %r15 pushq %r14 pushq %rbx movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rdi), %esi cmpl %esi, 0x4(%rdx) jge 0x1dc7b movq %rbx, %rdi xorl %edx, %edx callq 0x11fbf movslq (%r15), %rax testq %rax, %rax jle 0x1dcbd movq 0x8(%r15), %rcx movq 0x8(%rbx), %rdx movq 0x8(%r14), %rdi xorl %r8d, %r8d xorl %esi, %esi movzwl (%rcx,%r8,2), %r9d movl %esi, %r10d movw %r9w, (%rdx,%r10,2) movl %r9d, %r10d shrl $0x6, %r10d movq (%rdi,%r10,8), %r10 btq %r9, %r10 sbbl $-0x1, %esi incq %r8 cmpq %r8, %rax jne 0x1dc94 jmp 0x1dcbf xorl %esi, %esi movl %esi, (%rbx) popq %rbx popq %r14 popq %r15 retq
/lucaderi[P]CRoaring/src/containers/mixed_andnot.c
absl::Substitute[abi:cxx11](std::basic_string_view<char, std::char_traits<char>>, absl::substitute_internal::Arg const&, absl::substitute_internal::Arg const&, absl::substitute_internal::Arg const&)
ABSL_MUST_USE_RESULT inline std::string Substitute( absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2) { std::string result; SubstituteAndAppend(&result, format, a0, a1, a2); return result; }
pushq %r15 pushq %r14 pushq %rbx subq $0x30, %rsp movq %rdi, %rbx leaq 0x10(%rdi), %r15 movq %r15, (%rdi) movq $0x0, 0x8(%rdi) movb $0x0, 0x10(%rdi) movups (%rcx), %xmm0 movq %rsp, %rcx movaps %xmm0, (%rcx) movups (%r8), %xmm0 movaps %xmm0, 0x10(%rcx) movups (%r9), %xmm0 movaps %xmm0, 0x20(%rcx) movl $0x3, %r8d callq 0x174e14 movq %rbx, %rax addq $0x30, %rsp popq %rbx popq %r14 popq %r15 retq movq %rax, %r14 movq (%rbx), %rdi cmpq %r15, %rdi je 0x87141 movq (%r15), %rsi incq %rsi callq 0x6d6d0 movq %r14, %rdi callq 0x6dd70 nop
/google[P]bloaty/third_party/abseil-cpp/absl/strings/substitute.h
bloaty::Rollup::CreateDiffModeRollupOutput(bloaty::Rollup*, bloaty::Options const&, bloaty::RollupOutput*) const
void CreateDiffModeRollupOutput(Rollup* base, const Options& options, RollupOutput* output) const { RollupRow* row = &output->toplevel_row_; row->size.vm = vm_total_; row->size.file = file_total_; row->filtered_size.vm = filtered_vm_total_; row->filtered_size.file = filtered_file_total_; row->vmpercent = 100; row->filepercent = 100; if (base) { row->size.vm -= base->vm_total_; row->size.file -= base->file_total_; } output->diff_mode_ = true; CreateRows(row, base, options, true); }
movq %rcx, %rax movq %rdx, %rcx movq %rsi, %rdx movdqu (%rdi), %xmm0 movdqu %xmm0, 0x38(%rax) movups 0x10(%rdi), %xmm1 movups %xmm1, 0x48(%rax) movdqa 0x1bcbd2(%rip), %xmm1 # 0x2446e0 movdqu %xmm1, 0x68(%rax) testq %rsi, %rsi je 0x87b25 movdqu (%rdx), %xmm1 psubq %xmm1, %xmm0 movdqu %xmm0, 0x38(%rax) movb $0x1, 0xc0(%rax) addq $0x18, %rax movq %rax, %rsi movl $0x1, %r8d jmp 0x7f464
/google[P]bloaty/src/bloaty.cc
google::protobuf::internal::InternalMetadataWithArenaBase<google::protobuf::UnknownFieldSet, google::protobuf::internal::InternalMetadataWithArena>::~InternalMetadataWithArenaBase()
~InternalMetadataWithArenaBase() { if (have_unknown_fields() && arena() == NULL) { delete PtrValue<Container>(); } ptr_ = NULL; }
pushq %r14 pushq %rbx pushq %rax movq %rdi, %rbx movq (%rdi), %r14 testb $0x1, %r14b je 0x93684 movq %r14, %rax andq $-0x2, %rax cmpq $0x0, 0x18(%rax) jne 0x93684 andq $-0x2, %r14 je 0x93684 movq (%r14), %rax cmpq 0x8(%r14), %rax je 0x93663 movq %r14, %rdi callq 0x1b1fea movq (%r14), %rdi testq %rdi, %rdi je 0x93677 movq 0x10(%r14), %rsi subq %rdi, %rsi callq 0x6d6d0 movl $0x20, %esi movq %r14, %rdi callq 0x6d6d0 movq $0x0, (%rbx) addq $0x8, %rsp popq %rbx popq %r14 retq movq %rax, %rdi callq 0x79c3c nop
/google[P]bloaty/third_party/protobuf/src/google/protobuf/metadata_lite.h
google::protobuf::io::EpsCopyOutputStream::WriteStringMaybeAliased(unsigned int, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, unsigned char*)
uint8* WriteStringMaybeAliased(uint32 num, const std::string& s, uint8* ptr) { std::ptrdiff_t size = s.size(); if (PROTOBUF_PREDICT_FALSE( size >= 128 || end_ - ptr + 16 - TagSize(num << 3) - 1 < size)) { return WriteStringMaybeAliasedOutline(num, s, ptr); } ptr = UnsafeVarint((num << 3) | 2, ptr); *ptr++ = static_cast<uint8>(size); std::memcpy(ptr, s.data(), size); return ptr + size; }
pushq %r14 pushq %rbx pushq %rax movq %rcx, %rbx movq 0x8(%rdx), %r14 cmpq $0x7f, %r14 jg 0x9376e movq (%rdi), %rcx leal (,%rsi,8), %eax movl $0x1, %r8d cmpl $0x80, %eax jb 0x936f1 movl $0x2, %r8d cmpl $0x4000, %eax # imm = 0x4000 jb 0x936f1 movl $0x3, %r8d cmpl $0x200000, %eax # imm = 0x200000 jb 0x936f1 cmpl $0x10000000, %eax # imm = 0x10000000 movl $0x5, %r8d sbbq $0x0, %r8 addq %rbx, %r8 notq %r8 addq %r8, %rcx addq $0x10, %rcx cmpq %r14, %rcx jl 0x9376e cmpl $0x7f, %eax ja 0x93711 orb $0x2, %al movb %al, (%rbx) incq %rbx jmp 0x9374d movl %eax, %ecx orb $-0x7e, %cl movb %cl, (%rbx) movl %eax, %ecx shrl $0x7, %ecx cmpl $0x3fff, %eax # imm = 0x3FFF ja 0x9372d movb %cl, 0x1(%rbx) addq $0x2, %rbx jmp 0x9374d addq $0x2, %rbx movl %ecx, %eax movl %eax, %esi orb $-0x80, %sil movb %sil, -0x1(%rbx) shrl $0x7, %ecx incq %rbx cmpl $0x3fff, %eax # imm = 0x3FFF ja 0x93731 movb %cl, -0x1(%rbx) leaq 0x1(%rbx), %rdi movb %r14b, (%rbx) movq (%rdx), %rsi movq %r14, %rdx callq 0x6d5a0 leaq (%rbx,%r14), %rax incq %rax addq $0x8, %rsp popq %rbx popq %r14 retq movq %rbx, %rcx addq $0x8, %rsp popq %rbx popq %r14 jmp 0x18362a nop
/google[P]bloaty/third_party/protobuf/src/google/protobuf/io/coded_stream.h
google::protobuf::internal::InternalMetadataWithArenaBase<google::protobuf::UnknownFieldSet, google::protobuf::internal::InternalMetadataWithArena>::mutable_unknown_fields_slow()
PROTOBUF_NOINLINE T* mutable_unknown_fields_slow() { Arena* my_arena = arena(); Container* container = Arena::Create<Container>(my_arena); // Two-step assignment works around a bug in clang's static analyzer: // https://bugs.llvm.org/show_bug.cgi?id=34198. ptr_ = container; ptr_ = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(ptr_) | kTagContainer); container->arena = my_arena; return &(container->unknown_fields); }
pushq %r14 pushq %rbx pushq %rax movq %rdi, %rbx movq (%rdi), %r14 testb $0x1, %r14b jne 0x938f8 testq %r14, %r14 je 0x938ce cmpq $0x0, 0x70(%r14) jne 0x93902 leaq 0x59(%rip), %rdx # 0x93918 movl $0x20, %esi movq %r14, %rdi callq 0x6ec6c jmp 0x938d8 movl $0x20, %edi callq 0x6df30 xorps %xmm0, %xmm0 movups %xmm0, 0x10(%rax) movups %xmm0, (%rax) movq %rax, %rcx orq $0x1, %rcx movq %rcx, (%rbx) movq %r14, 0x18(%rax) addq $0x8, %rsp popq %rbx popq %r14 retq andq $-0x2, %r14 movq 0x18(%r14), %r14 jmp 0x938ac leaq 0x498b5f(%rip), %rsi # 0x52c468 movl $0x20, %edx movq %r14, %rdi callq 0x17cdda jmp 0x938b8
/google[P]bloaty/third_party/protobuf/src/google/protobuf/metadata_lite.h
bloaty::(anonymous namespace)::ElfFile::GetRegion(unsigned long, unsigned long) const
string_view GetRegion(uint64_t start, uint64_t n) const { return StrictSubstr(data_, start, n); }
pushq %rax movq %rdx, %rax movq %rsi, %rdx movq %rax, %rsi addq %rdx, %rsi jb 0x94e61 movq 0x8(%rdi), %rcx cmpq %rcx, %rsi ja 0x94e72 movq %rcx, %rsi subq %rdx, %rsi jb 0x94e83 cmpq %rax, %rsi cmovbq %rsi, %rax addq 0x10(%rdi), %rdx popq %rcx retq leaq 0x1b28cc(%rip), %rdi # 0x247734 movl $0x4b, %esi callq 0x9ed94 leaq 0x1b28a6(%rip), %rdi # 0x24771f movl $0x5d, %esi callq 0x9ed94 leaq 0x1b0973(%rip), %rdi # 0x2457fd leaq 0x1b0952(%rip), %rsi # 0x2457e3 xorl %eax, %eax callq 0x6db70
/google[P]bloaty/src/elf.cc
bloaty::wasm::WebAssemblyObjectFile::GetBuildId[abi:cxx11]() const
std::string GetBuildId() const override { // Use the sourceMappingURL as the build ID to be able to match to the // source map. std::string id; FindSection(file_data().data(), "sourceMappingURL", [&id](Section& section) { uint32_t size = ReadVarUInt32(&section.contents); string_view source_mapping_url = ReadPiece(size, &section.contents); id.assign(source_mapping_url); }); return id; }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x30, %rsp movq %rdi, %rbx leaq 0x10(%rdi), %r12 movq %r12, (%rdi) movq $0x0, 0x8(%rdi) movb $0x0, 0x10(%rdi) movq 0x8(%rsi), %rax movq 0x28(%rax), %r14 movq 0x30(%rax), %r15 leaq 0x18(%rsp), %r13 movq %r13, -0x10(%r13) leaq 0x1a873d(%rip), %rsi # 0x248de4 leaq 0x1a8746(%rip), %rdx # 0x248df4 leaq 0x8(%rsp), %rdi callq 0x7b4ac leaq 0x28(%rsp), %rcx movq %rbx, (%rcx) leaq 0x8(%rsp), %rdx movq %r14, %rdi movq %r15, %rsi callq 0xa08a1 movq 0x8(%rsp), %rdi cmpq %r13, %rdi je 0xa06e7 movq 0x18(%rsp), %rsi incq %rsi callq 0x6d6d0 movq %rbx, %rax addq $0x30, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %r14 movq 0x8(%rsp), %rdi cmpq %r13, %rdi je 0xa0717 movq 0x18(%rsp), %rsi incq %rsi callq 0x6d6d0 jmp 0xa0717 movq %rax, %r14 movq (%rbx), %rdi cmpq %r12, %rdi je 0xa072b movq (%r12), %rsi incq %rsi callq 0x6d6d0 movq %r14, %rdi callq 0x6dd70 nop
/google[P]bloaty/src/webassembly.cc
char const* __cxxabiv1::(anonymous namespace)::parse_block_invoke<__cxxabiv1::(anonymous namespace)::Db>(char const*, char const*, __cxxabiv1::(anonymous namespace)::Db&)
const char* parse_block_invoke(const char* first, const char* last, C& db) { if (last - first >= 13) { const char test[] = "_block_invoke"; const char* t = first; for (int i = 0; i < 13; ++i, ++t) { if (*t != test[i]) return first; } if (t != last) { if (*t == '_') { // must have at least 1 decimal digit if (++t == last || !std::isdigit(*t)) return first; ++t; } // parse zero or more digits while (t != last && isdigit(*t)) ++t; } if (db.names.empty()) return first; db.names.back().first.insert(0, "invocation function for block in "); first = t; } return first; }
movq %rdi, %rax movq %rsi, %rcx subq %rdi, %rcx cmpq $0xd, %rcx jl 0xa24ce pushq %rbx leaq 0xd(%rax), %rbx xorl %ecx, %ecx leaq 0x1a741d(%rip), %rdi # 0x249873 movb (%rax,%rcx), %r8b cmpb (%rcx,%rdi), %r8b jne 0xa24cd incq %rcx cmpq $0xd, %rcx jne 0xa2456 cmpq %rsi, %rbx je 0xa24a6 cmpb $0x5f, (%rbx) jne 0xa248b leaq 0xe(%rax), %rcx cmpq %rsi, %rcx je 0xa24cd movsbl (%rcx), %ecx addl $-0x30, %ecx cmpl $0x9, %ecx ja 0xa24cd leaq 0xf(%rax), %rbx cmpq %rsi, %rbx je 0xa24a6 movsbl (%rbx), %ecx addl $-0x30, %ecx cmpl $0x9, %ecx ja 0xa24a6 incq %rbx cmpq %rsi, %rbx jne 0xa2490 movq %rsi, %rbx movq 0x10(%rdx), %rdi cmpq %rdi, 0x8(%rdx) je 0xa24cd addq $-0x40, %rdi leaq 0x1a73c6(%rip), %rcx # 0x249881 movl $0x21, %r8d xorl %esi, %esi xorl %edx, %edx callq 0xa4ad0 movq %rbx, %rax popq %rbx retq
/google[P]bloaty/third_party/demumble/third_party/libcxxabi/cxa_demangle.cpp
re2::RE2::ReverseProgramSize() const
int RE2::ReverseProgramSize() const { if (prog_ == NULL) return -1; Prog* prog = ReverseProg(); if (prog == NULL) return -1; return prog->size(); }
pushq %rbx movl $0xffffffff, %ebx # imm = 0xFFFFFFFF cmpq $0x0, 0x70(%rdi) je 0xb9306 callq 0xb912c testq %rax, %rax je 0xb9306 movl 0x10(%rax), %ebx movl %ebx, %eax popq %rbx retq
/google[P]bloaty/third_party/re2/re2/re2.cc
re2::RE2::NamedCapturingGroups[abi:cxx11]() const
const std::map<std::string, int>& RE2::NamedCapturingGroups() const { std::call_once(named_groups_once_, [](const RE2* re) { if (re->suffix_regexp_ != NULL) re->named_groups_ = re->suffix_regexp_->NamedCaptures(); if (re->named_groups_ == NULL) re->named_groups_ = empty_named_groups; }, this); return *named_groups_; }
pushq %r15 pushq %r14 pushq %rbx subq $0x20, %rsp movq %rdi, %rbx addq $0xcc, %rdi leaq 0x8(%rsp), %rax movq %rbx, (%rax) leaq 0x7(%rsp), %rcx leaq 0x10(%rsp), %rdx movq %rcx, (%rdx) movq %rax, 0x8(%rdx) movq 0x4c9b57(%rip), %r14 # 0x582f88 movq %rdx, %fs:(%r14) movq 0x4c9b74(%rip), %r15 # 0x582fb0 leaq 0x256b(%rip), %rax # 0xbb9ae movq %rax, %fs:(%r15) movq 0x4c9b7a(%rip), %rsi # 0x582fc8 callq 0x6d660 testl %eax, %eax jne 0xb9472 xorl %eax, %eax movq %rax, %fs:(%r14) movq %rax, %fs:(%r15) movq 0xb8(%rbx), %rax addq $0x20, %rsp popq %rbx popq %r14 popq %r15 retq movl %eax, %edi callq 0x6d470 xorl %ecx, %ecx movq %rcx, %fs:(%r14) movq %rcx, %fs:(%r15) movq %rax, %rdi callq 0x6dd70 nop
/google[P]bloaty/third_party/re2/re2/re2.cc
re2::Compiler::CachedRuneByteSuffix(unsigned char, unsigned char, bool, int)
int Compiler::CachedRuneByteSuffix(uint8_t lo, uint8_t hi, bool foldcase, int next) { uint64_t key = MakeRuneCacheKey(lo, hi, foldcase, next); std::unordered_map<uint64_t, int>::const_iterator it = rune_cache_.find(key); if (it != rune_cache_.end()) return it->second; int id = UncachedRuneByteSuffix(lo, hi, foldcase, next); rune_cache_[key] = id; return id; }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x10, %rsp movl %edx, %r9d movslq %r8d, %rax shlq $0x11, %rax movl %esi, %edx shll $0x9, %edx orq %rax, %rdx movl %r9d, %eax leaq (%rdx,%rax,2), %rax movl %ecx, %r10d orq %rax, %r10 movq %r10, 0x8(%rsp) movq 0x50(%rdi), %rbx movq 0x58(%rdi), %r14 xorl %r12d, %r12d movq %r10, %rax xorl %edx, %edx divq %r14 movq (%rbx,%rdx,8), %rax testq %rax, %rax je 0xc188d movq (%rax), %r15 movq %rax, %r12 cmpq 0x8(%r15), %r10 je 0xc188d movq %rdx, %r11 movq %r15, %r13 movq (%r15), %r15 testq %r15, %r15 je 0xc188a movq 0x8(%r15), %rbx movq %rbx, %rax xorl %edx, %edx divq %r14 movl $0x0, %r12d cmpq %r11, %rdx jne 0xc188d movq %r13, %r12 cmpq %rbx, %r10 jne 0xc185e jmp 0xc188d xorl %r12d, %r12d testq %r12, %r12 je 0xc1898 movq (%r12), %rax jmp 0xc189a xorl %eax, %eax testq %rax, %rax je 0xc18a4 movl 0x10(%rax), %ebx jmp 0xc18c9 leaq 0x50(%rdi), %r14 movzbl %sil, %esi movzbl %r9b, %edx movzbl %cl, %ecx callq 0xc1794 movl %eax, %ebx leaq 0x8(%rsp), %rsi movq %r14, %rdi callq 0xc3332 movl %ebx, (%rax) movl %ebx, %eax addq $0x10, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq nop
/google[P]bloaty/third_party/re2/re2/compile.cc
re2::Compiler::IsCachedRuneByteSuffix(int)
T& operator[](int pos) const { return ptr_[pos]; }
movslq %esi, %rax movq 0x38(%rdi), %rcx movq 0x50(%rdi), %r9 movzbl 0x4(%rcx,%rax,8), %edx movzbl 0x5(%rcx,%rax,8), %esi movzwl 0x6(%rcx,%rax,8), %r8d andl $0x1, %r8d movl (%rcx,%rax,8), %eax shrl $0x4, %eax shlq $0x11, %rax shll $0x9, %edx orq %rax, %rdx leaq (%rdx,%rsi,2), %rcx orq %r8, %rcx movq 0x58(%rdi), %r8 xorl %r10d, %r10d movq %rcx, %rax xorl %edx, %edx divq %r8 movq (%r9,%rdx,8), %rax testq %rax, %rax je 0xc1966 movq (%rax), %r9 movq %rax, %r10 cmpq 0x8(%r9), %rcx je 0xc1966 movq %rdx, %rsi movq %r9, %r11 movq (%r9), %r9 testq %r9, %r9 je 0xc1963 movq 0x8(%r9), %rdi movq %rdi, %rax xorl %edx, %edx divq %r8 movl $0x0, %r10d cmpq %rsi, %rdx jne 0xc1966 movq %r11, %r10 cmpq %rdi, %rcx jne 0xc1937 jmp 0xc1966 xorl %r10d, %r10d testq %r10, %r10 je 0xc1973 cmpq $0x0, (%r10) setne %al retq xorl %eax, %eax retq
/google[P]bloaty/third_party/re2/util/pod_array.h
re2::DFA::AnalyzeSearch(re2::DFA::SearchParams*)
bool DFA::AnalyzeSearch(SearchParams* params) { const StringPiece& text = params->text; const StringPiece& context = params->context; // Sanity check: make sure that text lies within context. if (text.begin() < context.begin() || text.end() > context.end()) { LOG(DFATAL) << "context does not contain text"; params->start = DeadState; return true; } // Determine correct search type. int start; uint32_t flags; if (params->run_forward) { if (text.begin() == context.begin()) { start = kStartBeginText; flags = kEmptyBeginText|kEmptyBeginLine; } else if (text.begin()[-1] == '\n') { start = kStartBeginLine; flags = kEmptyBeginLine; } else if (Prog::IsWordChar(text.begin()[-1] & 0xFF)) { start = kStartAfterWordChar; flags = kFlagLastWord; } else { start = kStartAfterNonWordChar; flags = 0; } } else { if (text.end() == context.end()) { start = kStartBeginText; flags = kEmptyBeginText|kEmptyBeginLine; } else if (text.end()[0] == '\n') { start = kStartBeginLine; flags = kEmptyBeginLine; } else if (Prog::IsWordChar(text.end()[0] & 0xFF)) { start = kStartAfterWordChar; flags = kFlagLastWord; } else { start = kStartAfterNonWordChar; flags = 0; } } if (params->anchored) start |= kStartAnchored; StartInfo* info = &start_[start]; // Try once without cache_lock for writing. // Try again after resetting the cache // (ResetCache will relock cache_lock for writing). if (!AnalyzeSearchHelper(params, info, flags)) { ResetCache(params->cache_lock); if (!AnalyzeSearchHelper(params, info, flags)) { LOG(DFATAL) << "Failed to analyze start state."; params->failed = true; return false; } } if (ExtraDebug) fprintf(stderr, "anchored=%d fwd=%d flags=%#x state=%s first_byte=%d\n", params->anchored, params->run_forward, flags, DumpState(info->start).c_str(), info->first_byte.load()); params->start = info->start; params->first_byte = info->first_byte.load(std::memory_order_acquire); return true; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x188, %rsp # imm = 0x188 movq %rsi, %rbx movq (%rsi), %rax movq 0x10(%rsi), %rcx cmpq %rcx, %rax jb 0xc515f movq 0x8(%rbx), %rdx addq %rax, %rdx movq 0x18(%rbx), %rsi addq %rcx, %rsi cmpq %rsi, %rdx jbe 0xc51a2 leaq 0x185d1d(%rip), %rsi # 0x24ae83 leaq 0x8(%rsp), %rdi movl $0x674, %edx # imm = 0x674 callq 0xbba32 leaq 0x10(%rsp), %rdi leaq 0x185dfa(%rip), %rsi # 0x24af7b movl $0x1d, %edx callq 0x6d7d0 leaq 0x8(%rsp), %rdi callq 0xbbae8 movq $0x1, 0x28(%rbx) jmp 0xc525e movq %rdi, %r14 cmpb $0x1, 0x22(%rbx) jne 0xc51b5 cmpq %rcx, %rax je 0xc51cc movb -0x1(%rax), %al jmp 0xc51bc cmpq %rsi, %rdx je 0xc51cc movb (%rdx), %al cmpb $0xa, %al jne 0xc51d5 movl $0x1, %ebp movl $0x2, %eax jmp 0xc5208 movl $0x5, %ebp xorl %eax, %eax jmp 0xc5208 movl %eax, %ecx andb $-0x21, %cl addb $-0x41, %cl cmpb $0x1a, %cl setb %cl leal -0x30(%rax), %edx cmpb $0xa, %dl setb %dl cmpb $0x5f, %al sete %al orb %dl, %al orb %cl, %al movzbl %al, %ebp xorb $0x1, %al movzbl %al, %eax leaq 0x4(,%rax,2), %rax shll $0x9, %ebp movzbl 0x20(%rbx), %ecx orl %ecx, %eax shll $0x4, %eax leaq (%r14,%rax), %r15 addq $0xe8, %r15 movq %r14, %rdi movq %rbx, %rsi movq %r15, %rdx movl %ebp, %ecx callq 0xc52c4 testb %al, %al jne 0xc5250 movq 0x38(%rbx), %rsi movq %r14, %rdi callq 0xc4ebc movq %r14, %rdi movq %rbx, %rsi movq %r15, %rdx movl %ebp, %ecx callq 0xc52c4 testb %al, %al je 0xc526e movq (%r15), %rax movq %rax, 0x28(%rbx) movl 0x8(%r15), %eax movl %eax, 0x30(%rbx) movb $0x1, %al addq $0x188, %rsp # imm = 0x188 popq %rbx popq %r14 popq %r15 popq %rbp retq leaq 0x185c0e(%rip), %rsi # 0x24ae83 leaq 0x8(%rsp), %rdi movl $0x6a3, %edx # imm = 0x6A3 callq 0xbba32 leaq 0x10(%rsp), %rdi leaq 0x185d09(%rip), %rsi # 0x24af99 movl $0x1e, %edx callq 0x6d7d0 leaq 0x8(%rsp), %rdi callq 0xbbae8 movb $0x1, 0x40(%rbx) xorl %eax, %eax jmp 0xc5260 jmp 0xc52ae movq %rax, %rbx leaq 0x8(%rsp), %rdi callq 0xbbae8 movq %rbx, %rdi callq 0x6dd70 nop
/google[P]bloaty/third_party/re2/re2/dfa.cc
re2::Regexp::ParseState::DoConcatenation()
void Regexp::ParseState::DoConcatenation() { Regexp* r1 = stacktop_; if (r1 == NULL || IsMarker(r1->op())) { // empty concatenation is special case Regexp* re = new Regexp(kRegexpEmptyMatch, flags_); PushRegexp(re); } DoCollapse(kRegexpConcat); }
pushq %r14 pushq %rbx pushq %rax movq %rdi, %rbx movq 0x20(%rdi), %rax testq %rax, %rax je 0xcab9f cmpb $0x16, (%rax) jb 0xcabc6 movl $0x28, %edi callq 0x6df30 movq %rax, %r14 movl (%rbx), %edx movq %rax, %rdi movl $0x2, %esi callq 0xbbfb0 movq %rbx, %rdi movq %r14, %rsi callq 0xca154 movq %rbx, %rdi movl $0x5, %esi addq $0x8, %rsp popq %rbx popq %r14 jmp 0xcb9d6 movq %rax, %rbx movq %r14, %rdi callq 0x6d670 movq %rbx, %rdi callq 0x6dd70 nop
/google[P]bloaty/third_party/re2/re2/parse.cc
re2::Regexp::ParseState::DoRightParen()
bool Regexp::ParseState::DoRightParen() { // Finish the current concatenation and alternation. DoAlternation(); // The stack should be: LeftParen regexp // Remove the LeftParen, leaving the regexp, // parenthesized. Regexp* r1; Regexp* r2; if ((r1 = stacktop_) == NULL || (r2 = r1->down_) == NULL || r2->op() != kLeftParen) { status_->set_code(kRegexpMissingParen); status_->set_error_arg(whole_regexp_); return false; } // Pop off r1, r2. Will Decref or reuse below. stacktop_ = r2->down_; // Restore flags from when paren opened. Regexp* re = r2; flags_ = re->parse_flags(); // Rewrite LeftParen as capture if needed. if (re->cap_ > 0) { re->op_ = kRegexpCapture; // re->cap_ is already set re->AllocSub(1); re->sub()[0] = FinishRegexp(r1); re->simple_ = re->ComputeSimple(); } else { re->Decref(); re = r1; } return PushRegexp(re); }
pushq %r15 pushq %r14 pushq %rbx movq %rdi, %rbx callq 0xcac94 movq 0x20(%rbx), %r15 testq %r15, %r15 je 0xcac4a movq 0x10(%r15), %r14 testq %r14, %r14 je 0xcac4a cmpb $0x16, (%r14) jne 0xcac4a movq 0x10(%r14), %rax movq %rax, 0x20(%rbx) movzwl 0x2(%r14), %eax movl %eax, (%rbx) cmpl $0x0, 0x18(%r14) jle 0xcac64 movb $0xb, (%r14) movw $0x1, 0x6(%r14) movq %r15, %rsi callq 0xca0fe cmpw $0x2, 0x6(%r14) jb 0xcac71 movq 0x8(%r14), %rcx jmp 0xcac75 movq 0x18(%rbx), %rax movl $0x6, (%rax) movups 0x8(%rbx), %xmm0 movups %xmm0, 0x8(%rax) xorl %eax, %eax popq %rbx popq %r14 popq %r15 retq movq %r14, %rdi callq 0xbc232 movq %r15, %r14 jmp 0xcac84 leaq 0x8(%r14), %rcx movq %rax, (%rcx) movq %r14, %rdi callq 0xd0dae movb %al, 0x1(%r14) movq %rbx, %rdi movq %r14, %rsi callq 0xca154 movb $0x1, %al jmp 0xcac5e nop
/google[P]bloaty/third_party/re2/re2/parse.cc
re2::Regexp::ParseState::DoAlternation()
void Regexp::ParseState::DoAlternation() { DoVerticalBar(); // Now stack top is kVerticalBar. Regexp* r1 = stacktop_; stacktop_ = r1->down_; r1->Decref(); DoCollapse(kRegexpAlternate); }
pushq %rbx movq %rdi, %rbx callq 0xcaada movq 0x20(%rbx), %rdi movq 0x10(%rdi), %rax movq %rax, 0x20(%rbx) callq 0xbc232 movq %rbx, %rdi movl $0x6, %esi popq %rbx jmp 0xcb9d6
/google[P]bloaty/third_party/re2/re2/parse.cc
re2::Regexp::ParseState::DoFinish()
Regexp* Regexp::ParseState::DoFinish() { DoAlternation(); Regexp* re = stacktop_; if (re != NULL && re->down_ != NULL) { status_->set_code(kRegexpMissingParen); status_->set_error_arg(whole_regexp_); return NULL; } stacktop_ = NULL; return FinishRegexp(re); }
pushq %rbx movq %rdi, %rbx callq 0xcac94 movq 0x20(%rbx), %rsi testq %rsi, %rsi je 0xcaceb cmpq $0x0, 0x10(%rsi) je 0xcaceb movq 0x18(%rbx), %rax movl $0x6, (%rax) movups 0x8(%rbx), %xmm0 movups %xmm0, 0x8(%rax) xorl %eax, %eax popq %rbx retq movq $0x0, 0x20(%rbx) popq %rbx jmp 0xca0fe nop
/google[P]bloaty/third_party/re2/re2/parse.cc
re2::ByteMapBuilder::Mark(int, int)
void ByteMapBuilder::Mark(int lo, int hi) { DCHECK_GE(lo, 0); DCHECK_GE(hi, 0); DCHECK_LE(lo, 255); DCHECK_LE(hi, 255); DCHECK_LE(lo, hi); // Ignore any [0-255] ranges. They cause us to recolor every range, which // has no effect on the eventual result and is therefore a waste of time. if (lo == 0 && hi == 255) return; ranges_.emplace_back(lo, hi); }
pushq %rax movl %esi, 0x4(%rsp) movl %edx, (%rsp) xorl $0xff, %edx orl %esi, %edx je 0xce720 addq $0x440, %rdi # imm = 0x440 leaq 0x4(%rsp), %rsi movq %rsp, %rdx callq 0xcfd96 popq %rax retq
/google[P]bloaty/third_party/re2/re2/prog.cc
cs_open
CAPSTONE_API cs_open(cs_arch arch, cs_mode mode, csh *handle) { cs_err err; struct cs_struct *ud; if (!cs_mem_malloc || !cs_mem_calloc || !cs_mem_realloc || !cs_mem_free || !cs_vsnprintf) // Error: before cs_open(), dynamic memory management must be initialized // with cs_option(CS_OPT_MEM) return CS_ERR_MEMSETUP; if (arch < CS_ARCH_MAX && cs_arch_init[arch]) { // verify if requested mode is valid if (mode & cs_arch_disallowed_mode_mask[arch]) { *handle = 0; return CS_ERR_MODE; } ud = cs_mem_calloc(1, sizeof(*ud)); if (!ud) { // memory insufficient return CS_ERR_MEM; } ud->errnum = CS_ERR_OK; ud->arch = arch; ud->mode = mode; // by default, do not break instruction into details ud->detail = CS_OPT_OFF; // default skipdata setup ud->skipdata_setup.mnemonic = SKIPDATA_MNEM; err = cs_arch_init[ud->arch](ud); if (err) { cs_mem_free(ud); *handle = 0; return err; } *handle = (uintptr_t)ud; return CS_ERR_OK; } else { *handle = 0; return CS_ERR_ARCH; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq 0x4b08f1(%rip), %rax # 0x5838e0 movq %rax, %xmm0 movq 0x4b08dc(%rip), %xmm1 # 0x5838d8 punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0] movq 0x4b08e8(%rip), %xmm0 # 0x5838f0 movq 0x4b08d8(%rip), %xmm2 # 0x5838e8 punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0] pxor %xmm0, %xmm0 pcmpeqd %xmm0, %xmm2 pcmpeqd %xmm0, %xmm1 movdqa %xmm1, %xmm0 shufps $0xdd, %xmm2, %xmm0 # xmm0 = xmm0[1,3],xmm2[1,3] shufps $0x88, %xmm2, %xmm1 # xmm1 = xmm1[0,2],xmm2[0,2] andps %xmm0, %xmm1 movmskps %xmm1, %ecx movl $0x8, %r14d testl %ecx, %ecx jne 0xd3077 cmpq $0x0, 0x4b08b4(%rip) # 0x5838f8 je 0xd3077 movq %rdx, %rbx movl %edi, %r15d cmpl $0xc, %edi ja 0xd306b movl %esi, %ebp movl %r15d, %r13d leaq 0x181c63(%rip), %rcx # 0x254cc0 testl %esi, (%rcx,%r13,4) je 0xd3089 movl $0x5, %r14d jmp 0xd3071 movl $0x2, %r14d xorl %r12d, %r12d movq %r12, (%rbx) movl %r14d, %eax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movl $0x1, %edi movl $0xb8, %esi callq *%rax testq %rax, %rax je 0xd30e2 movq %rax, %r12 xorl %r14d, %r14d movl %r14d, 0x50(%rax) movl %r15d, (%rax) movl %ebp, 0x4(%rax) movl %r14d, 0x60(%rax) leaq 0x181f05(%rip), %rax # 0x254fba movq %rax, 0x88(%r12) leaq 0x45c41c(%rip), %rax # 0x52f4e0 movq %r12, %rdi callq *(%rax,%r13,8) testl %eax, %eax je 0xd3074 movl %eax, %ebp movq %r12, %rdi callq *0x4b0816(%rip) # 0x5838f0 xorl %r12d, %r12d movl %ebp, %r14d jmp 0xd3074 movl $0x1, %r14d jmp 0xd3077
/google[P]bloaty/third_party/capstone/cs.c
cs_close
CAPSTONE_API cs_close(csh *handle) { struct cs_struct *ud; struct insn_mnem *next, *tmp; if (*handle == 0) // invalid handle return CS_ERR_CSH; ud = (struct cs_struct *)(*handle); if (ud->printer_info) cs_mem_free(ud->printer_info); // free the linked list of customized mnemonic tmp = ud->mnem_list; while(tmp) { next = tmp->next; cs_mem_free(tmp); tmp = next; } cs_mem_free(ud->insn_cache); memset(ud, 0, sizeof(*ud)); cs_mem_free(ud); // invalidate this handle by ZERO out its value. // this is to make sure it is unusable after cs_close() *handle = 0; return CS_ERR_OK; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movq (%rdi), %r14 testq %r14, %r14 je 0xd3156 movq %rdi, %rbx movq 0x10(%r14), %rdi testq %rdi, %rdi je 0xd310b callq *0x4b07e5(%rip) # 0x5838f0 movq 0xb0(%r14), %rdi testq %rdi, %rdi je 0xd3129 movq 0x28(%rdi), %r15 callq *0x4b07cf(%rip) # 0x5838f0 movq %r15, %rdi testq %r15, %r15 jne 0xd3117 movq 0x70(%r14), %rdi callq *0x4b07bd(%rip) # 0x5838f0 xorl %ebp, %ebp movl $0xb8, %edx movq %r14, %rdi xorl %esi, %esi callq 0x6d3c0 movq %r14, %rdi callq *0x4b07a3(%rip) # 0x5838f0 movq $0x0, (%rbx) jmp 0xd315b movl $0x4, %ebp movl %ebp, %eax addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq
/google[P]bloaty/third_party/capstone/cs.c
DecodeArmMOVTWInstruction
static DecodeStatus DecodeArmMOVTWInstruction(MCInst *Inst, unsigned Insn, uint64_t Address, const void *Decoder) { DecodeStatus S = MCDisassembler_Success; unsigned Rd = fieldFromInstruction_4(Insn, 12, 4); unsigned pred = fieldFromInstruction_4(Insn, 28, 4); unsigned imm = 0; imm |= (fieldFromInstruction_4(Insn, 0, 12) << 0); imm |= (fieldFromInstruction_4(Insn, 16, 4) << 12); if (MCInst_getOpcode(Inst) == ARM_MOVTi16) if (!Check(&S, DecodeGPRnopcRegisterClass(Inst, Rd, Address, Decoder))) return MCDisassembler_Fail; if (!Check(&S, DecodeGPRnopcRegisterClass(Inst, Rd, Address, Decoder))) return MCDisassembler_Fail; MCOperand_CreateImm0(Inst, imm); if (!Check(&S, DecodePredicateOperand(Inst, pred, Address, Decoder))) return MCDisassembler_Fail; return S; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movl %esi, %ebx movq %rdi, %r14 movl %esi, %r12d shrl $0xc, %r12d andl $0xf, %r12d callq 0xd43b8 movl $0x3, %r15d cmpl $0xdc, %eax jne 0xe2adc movq %r14, %rdi movl %r12d, %esi callq 0xe1cf2 movl %eax, %r15d movb $0x1, %al cmpl $0x3, %r15d je 0xe2ad6 cmpl $0x1, %r15d je 0xe2ad6 testl %r15d, %r15d je 0xe2ad4 movl $0x3, %r15d xorl %eax, %eax xorl %ebp, %ebp testb %al, %al je 0xe2b49 movq %r14, %rdi movl %r12d, %esi callq 0xe1cf2 movl %eax, %ebp testl %eax, %eax je 0xe2b00 movb $0x1, %al cmpl $0x3, %ebp je 0xe2afb cmpl $0x1, %ebp je 0xe2b02 xorl %eax, %eax movl %r15d, %ebp jmp 0xe2b02 xorl %eax, %eax testb %al, %al je 0xe2b47 movl %ebx, %eax shrl $0x4, %eax movl %ebx, %ecx andl $0xfff, %ecx # imm = 0xFFF andl $0xf000, %eax # imm = 0xF000 orl %ecx, %eax shrl $0x1c, %ebx movl %eax, %esi movq %r14, %rdi callq 0xd4468 movq %r14, %rdi movl %ebx, %esi callq 0xe1b78 movl %eax, %ecx leaq 0x186422(%rip), %rdx # 0x268f5c movslq (%rdx,%rcx,4), %rcx addq %rdx, %rcx jmpq *%rcx movl %eax, %ebp jmp 0xe2b49 xorl %ebp, %ebp movl %ebp, %eax popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/google[P]bloaty/third_party/capstone/arch/ARM/ARMDisassembler.c
DecodeSTRPreImm
static DecodeStatus DecodeSTRPreImm(MCInst *Inst, unsigned Insn, uint64_t Address, const void *Decoder) { DecodeStatus S = MCDisassembler_Success; unsigned pred; unsigned Rn = fieldFromInstruction_4(Insn, 16, 4); unsigned Rt = fieldFromInstruction_4(Insn, 12, 4); unsigned imm = fieldFromInstruction_4(Insn, 0, 12); imm |= fieldFromInstruction_4(Insn, 16, 4) << 13; imm |= fieldFromInstruction_4(Insn, 23, 1) << 12; pred = fieldFromInstruction_4(Insn, 28, 4); if (Rn == 0xF || Rn == Rt) S = MCDisassembler_SoftFail; if (!Check(&S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) return MCDisassembler_Fail; if (!Check(&S, DecodeGPRRegisterClass(Inst, Rt, Address, Decoder))) return MCDisassembler_Fail; if (!Check(&S, DecodeAddrModeImm12Operand(Inst, imm, Address, Decoder))) return MCDisassembler_Fail; if (!Check(&S, DecodePredicateOperand(Inst, pred, Address, Decoder))) return MCDisassembler_Fail; return S; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movl %esi, %ebx movq %rdi, %r14 movl %esi, %eax shrl $0x10, %eax andl $0xf, %eax movl %esi, %r12d shrl $0xc, %r12d andl $0xf, %r12d movl %esi, %ecx andl $0xfff, %ecx # imm = 0xFFF movl %eax, %edx shll $0xd, %edx orl %ecx, %edx movl %esi, %ebp shrl $0xb, %ebp andl $0x1000, %ebp # imm = 0x1000 orl %edx, %ebp xorl %ecx, %ecx cmpl %r12d, %eax setne %cl cmpl $0xf, %eax leal 0x1(%rcx,%rcx), %ecx movl $0x1, %r15d cmovnel %ecx, %r15d leaq 0x19523f(%rip), %r13 # 0x278050 movzwl (%r13,%rax,2), %esi callq 0xd443b movzwl (%r13,%r12,2), %esi movq %r14, %rdi callq 0xd443b movq %r14, %rdi movl %ebp, %esi callq 0xe2d61 movl %eax, %ebp testl %eax, %eax je 0xe2e4d movb $0x1, %al cmpl $0x3, %ebp je 0xe2e48 cmpl $0x1, %ebp je 0xe2e4f xorl %eax, %eax movl %r15d, %ebp jmp 0xe2e4f xorl %eax, %eax testb %al, %al je 0xe2e76 shrl $0x1c, %ebx movq %r14, %rdi movl %ebx, %esi callq 0xe1b78 movl %eax, %ecx leaq 0x186123(%rip), %rdx # 0x268f8c movslq (%rdx,%rcx,4), %rcx addq %rdx, %rcx jmpq *%rcx movl %eax, %ebp jmp 0xe2e78 xorl %ebp, %ebp movl %ebp, %eax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/google[P]bloaty/third_party/capstone/arch/ARM/ARMDisassembler.c
DecodeSORegMemOperand
static DecodeStatus DecodeSORegMemOperand(MCInst *Inst, unsigned Val, uint64_t Address, const void *Decoder) { DecodeStatus S = MCDisassembler_Success; ARM_AM_ShiftOpc ShOp; unsigned shift; unsigned Rn = fieldFromInstruction_4(Val, 13, 4); unsigned Rm = fieldFromInstruction_4(Val, 0, 4); unsigned type = fieldFromInstruction_4(Val, 5, 2); unsigned imm = fieldFromInstruction_4(Val, 7, 5); unsigned U = fieldFromInstruction_4(Val, 12, 1); ShOp = ARM_AM_lsl; switch (type) { case 0: ShOp = ARM_AM_lsl; break; case 1: ShOp = ARM_AM_lsr; break; case 2: ShOp = ARM_AM_asr; break; case 3: ShOp = ARM_AM_ror; break; } if (ShOp == ARM_AM_ror && imm == 0) ShOp = ARM_AM_rrx; if (!Check(&S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) return MCDisassembler_Fail; if (!Check(&S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))) return MCDisassembler_Fail; if (U) shift = ARM_AM_getAM2Opc(ARM_AM_add, imm, ShOp, 0); else shift = ARM_AM_getAM2Opc(ARM_AM_sub, imm, ShOp, 0); MCOperand_CreateImm0(Inst, shift); return S; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movl %esi, %ebx movq %rdi, %r14 movl %esi, %eax shrl $0x5, %eax andl $0x3, %eax leaq 0x18602b(%rip), %rcx # 0x268fac movslq (%rcx,%rax,4), %rax addq %rcx, %rax jmpq *%rax movl $0x4000, %eax # imm = 0x4000 jmp 0xe2fa6 movl $0x2000, %eax # imm = 0x2000 jmp 0xe2fa6 movl $0x8000, %eax # imm = 0x8000 movb $0x1, %cl jmp 0xe2fa8 movl $0x6000, %eax # imm = 0x6000 xorl %ecx, %ecx movl %ebx, %ebp shrl $0x7, %ebp andl $0x1f, %ebp movl %ebx, %r12d andl $0xf, %r12d testb %cl, %cl movl $0xa000, %r15d # imm = 0xA000 cmovel %eax, %r15d testl %ebp, %ebp cmovnel %eax, %r15d movl %ebx, %eax shrl $0xc, %eax andl $0x1e, %eax leaq 0x195078(%rip), %r13 # 0x278050 movzwl (%rax,%r13), %esi movq %r14, %rdi callq 0xd443b movzwl (%r13,%r12,2), %esi movq %r14, %rdi callq 0xd443b movl $0x1000, %eax # imm = 0x1000 andl %eax, %ebx orl %ebp, %ebx orl %r15d, %ebx xorl %eax, %ebx movq %r14, %rdi movq %rbx, %rsi callq 0xd4468 movl $0x3, %eax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/google[P]bloaty/third_party/capstone/arch/ARM/ARMDisassembler.c