name
string
code
string
asm
string
file
string
bitset_container_select
bool bitset_container_select(const bitset_container_t *container, uint32_t *start_rank, uint32_t rank, uint32_t *element) { int card = bitset_container_cardinality(container); if(rank >= *start_rank + card) { *start_rank += card; return false; } const uint64_t *words = container->words; int32_t size; for (int i = 0; i < BITSET_CONTAINER_SIZE_IN_WORDS; i += 1) { size = roaring_hamming(words[i]); if(rank <= *start_rank + size) { uint64_t w = container->words[i]; uint16_t base = i*64; while (w != 0) { uint64_t t = w & (~w + 1); int r = roaring_trailing_zeroes(w); if(*start_rank == rank) { *element = r+base; return true; } w ^= t; *start_rank += 1; } } else *start_rank += size; } assert(false); roaring_unreachable; }
movl (%rsi), %r9d movl (%rdi), %eax addl %r9d, %eax cmpl %edx, %eax jbe 0x98f1 pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq 0x8(%rdi), %r8 xorl %edi, %edi movabsq $0x5555555555555555, %r10 # imm = 0x5555555555555555 movabsq $0x3333333333333333, %r11 # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %rbx # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %r14 # imm = 0x101010101010101 movq (%r8,%rdi,8), %r15 movq %r15, %r12 shrq %r12 andq %r10, %r12 movq %r15, %r13 subq %r12, %r13 movq %r13, %r12 andq %r11, %r12 shrq $0x2, %r13 andq %r11, %r13 addq %r12, %r13 movq %r13, %r12 shrq $0x4, %r12 addq %r13, %r12 andq %rbx, %r12 imulq %r14, %r12 shrq $0x38, %r12 addl %r9d, %r12d cmpl %edx, %r12d jae 0x98cd movl %r12d, (%rsi) jmp 0x98e9 testq %r15, %r15 je 0x98e6 cmpl %r9d, %edx je 0x98f5 leaq -0x1(%r15), %r12 andq %r12, %r15 incl %r9d movl %r9d, (%rsi) jmp 0x98cd movl %r9d, %r12d incq %rdi movl %r12d, %r9d jmp 0x9888 movl %eax, (%rsi) jmp 0x9909 shll $0x6, %edi bsfq %r15, %rsi orl %edi, %esi movl %esi, (%rcx) popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 cmpl %edx, %eax seta %al retq
/RoaringBitmap[P]CRoaring/src/containers/bitset.c
bitset_container_minimum
uint16_t bitset_container_minimum(const bitset_container_t *container) { for (int32_t i = 0; i < BITSET_CONTAINER_SIZE_IN_WORDS; ++i ) { uint64_t w = container->words[i]; if (w != 0) { int r = roaring_trailing_zeroes(w); return r + i * 64; } } return UINT16_MAX; }
movq 0x8(%rdi), %rax xorl %ecx, %ecx cmpq $-0x10000, %rcx # imm = 0xFFFF0000 je 0x9930 movq (%rax), %rdx testq %rdx, %rdx jne 0x9935 addq $-0x40, %rcx addq $0x8, %rax jmp 0x9915 movw $0xffff, %ax # imm = 0xFFFF retq bsfq %rdx, %rax subl %ecx, %eax retq
/RoaringBitmap[P]CRoaring/src/containers/bitset.c
bitset_container_get_index
inline bool bitset_container_get(const bitset_container_t *bitset, uint16_t pos) { const uint64_t word = bitset->words[pos >> 6]; return (word >> (pos & 63)) & 1; }
movq 0x8(%rdi), %rcx movl %esi, %r11d shrl $0x6, %r11d movq (%rcx,%r11,8), %rax btq %rsi, %rax jae 0x9c5f pushq %r15 pushq %r14 pushq %rbx andl $0x3f, %esi xorl %ebx, %ebx movabsq $0x5555555555555555, %r10 # imm = 0x5555555555555555 movabsq $0x3333333333333333, %r9 # imm = 0x3333333333333333 movabsq $0xf0f0f0f0f0f0f0f, %r8 # imm = 0xF0F0F0F0F0F0F0F movabsq $0x101010101010101, %rdi # imm = 0x101010101010101 xorl %edx, %edx cmpq %rbx, %r11 je 0x9c0e movq (%rcx,%rbx,8), %r14 movq %r14, %r15 shrq %r15 andq %r10, %r15 subq %r15, %r14 movq %r14, %r15 andq %r9, %r15 shrq $0x2, %r14 andq %r9, %r14 addq %r15, %r14 movq %r14, %r15 shrq $0x4, %r15 addq %r14, %r15 andq %r8, %r15 imulq %rdi, %r15 shrq $0x38, %r15 addl %r15d, %edx incq %rbx jmp 0x9bcc movabsq $0x2, %r11 movl %esi, %ecx shlq %cl, %r11 decq %r11 andq %r11, %rax movq %rax, %rcx shrq %rcx andq %r10, %rcx subq %rcx, %rax movq %rax, %rcx andq %r9, %rcx shrq $0x2, %rax andq %r9, %rax addq %rcx, %rax movq %rax, %rcx shrq $0x4, %rcx addq %rax, %rcx andq %r8, %rcx imulq %rdi, %rcx shrq $0x38, %rcx leal (%rdx,%rcx), %eax decl %eax popq %rbx popq %r14 popq %r15 retq movl $0xffffffff, %eax # imm = 0xFFFFFFFF retq
/RoaringBitmap[P]CRoaring/include/roaring/containers/bitset.h
bitset_container_from_array
bitset_container_t *bitset_container_from_array(const array_container_t *ac) { bitset_container_t *ans = bitset_container_create(); int limit = array_container_cardinality(ac); for (int i = 0; i < limit; ++i) bitset_container_set(ans, ac->array[i]); return ans; }
pushq %rbx movq %rdi, %rbx callq 0x34ce movl (%rbx), %edx xorl %esi, %esi testl %edx, %edx cmovlel %esi, %edx cmpq %rsi, %rdx je 0x9ceb movq 0x8(%rbx), %rcx movq 0x8(%rax), %rdi movzwl (%rcx,%rsi,2), %ecx movl %ecx, %r8d shrl $0x6, %r8d movq (%rdi,%r8,8), %r9 movq %r9, %r10 btsq %rcx, %r10 xorq %r10, %r9 shrq %cl, %r9 addl %r9d, (%rax) movq %r10, (%rdi,%r8,8) incq %rsi jmp 0x9cb6 popq %rbx retq
/RoaringBitmap[P]CRoaring/src/containers/convert.c
bitset_container_from_run
bitset_container_t *bitset_container_from_run(const run_container_t *arr) { int card = run_container_cardinality(arr); bitset_container_t *answer = bitset_container_create(); for (int rlepos = 0; rlepos < arr->n_runs; ++rlepos) { rle16_t vl = arr->runs[rlepos]; bitset_set_lenrange(answer->words, vl.value, vl.length); } answer->cardinality = card; return answer; }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx pushq %rax movq %rdi, %rbx callq 0xb25b movl %eax, %ebp xorl %r15d, %r15d callq 0x34ce movq %rax, %r14 movslq (%rbx), %rax cmpq %rax, %r15 jge 0x9d2e movq 0x8(%rbx), %rax movzwl (%rax,%r15,4), %esi movzwl 0x2(%rax,%r15,4), %edx movq 0x8(%r14), %rdi callq 0x9d3f incq %r15 jmp 0x9d09 movl %ebp, (%r14) movq %r14, %rax addq $0x8, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/convert.c
bitset_set_lenrange
static inline void bitset_set_lenrange(uint64_t *words, uint32_t start, uint32_t lenminusone) { uint32_t firstword = start / 64; uint32_t endword = (start + lenminusone) / 64; if (firstword == endword) { words[firstword] |= ((~UINT64_C(0)) >> ((63 - lenminusone) % 64)) << (start % 64); return; } uint64_t temp = words[endword]; words[firstword] |= (~UINT64_C(0)) << (start % 64); for (uint32_t i = firstword + 1; i < endword; i += 2) words[i] = words[i + 1] = ~UINT64_C(0); words[endword] = temp | (~UINT64_C(0)) >> (((~start + 1) - lenminusone - 1) % 64); }
movl %esi, %r8d shrl $0x6, %r8d leal (%rdx,%rsi), %eax movl %eax, %ecx shrl $0x6, %ecx cmpl %ecx, %r8d jne 0x9d6a movb $0x3f, %cl subb %dl, %cl pushq $-0x1 popq %rax shrq %cl, %rax movl %esi, %ecx shlq %cl, %rax movl %r8d, %ecx orq %rax, (%rdi,%rcx,8) retq movl %ecx, %edx movq (%rdi,%rdx,8), %r9 pushq $-0x1 popq %r10 movq %r10, %r11 movl %esi, %ecx shlq %cl, %r11 movl %r8d, %ecx orq %r11, (%rdi,%rcx,8) incl %r8d pcmpeqd %xmm0, %xmm0 cmpq %rdx, %r8 jae 0x9d9b movdqu %xmm0, (%rdi,%r8,8) addq $0x2, %r8 jmp 0x9d8a notb %al movl %eax, %ecx shrq %cl, %r10 orq %r10, %r9 movq %r9, (%rdi,%rdx,8) retq
/RoaringBitmap[P]CRoaring/include/roaring/bitset_util.h
array_container_from_bitset
array_container_t *array_container_from_bitset(const bitset_container_t *bits) { array_container_t *result = array_container_create_given_capacity(bits->cardinality); result->cardinality = bits->cardinality; #if CROARING_IS_X64 #if CROARING_COMPILER_SUPPORTS_AVX512 if (croaring_hardware_support() & ROARING_SUPPORTS_AVX512) { bitset_extract_setbits_avx512_uint16( bits->words, BITSET_CONTAINER_SIZE_IN_WORDS, result->array, bits->cardinality, 0); } else #endif { // sse version ends up being slower here // (bitset_extract_setbits_sse_uint16) // because of the sparsity of the data bitset_extract_setbits_uint16( bits->words, BITSET_CONTAINER_SIZE_IN_WORDS, result->array, 0); } #else // If the system is not x64, then we have no accelerated function. bitset_extract_setbits_uint16(bits->words, BITSET_CONTAINER_SIZE_IN_WORDS, result->array, 0); #endif return result; }
pushq %r14 pushq %rbx pushq %rax movq %rdi, %r14 movl (%rdi), %edi callq 0x29ea movq %rax, %rbx movl (%r14), %eax movl %eax, (%rbx) callq 0xbc68 movq 0x8(%r14), %rdi movq 0x8(%rbx), %rdx testb $0x2, %al jne 0x9e35 movl $0x400, %esi # imm = 0x400 xorl %ecx, %ecx callq 0xdf27 jmp 0x9e45 movslq (%r14), %rcx movl $0x400, %esi # imm = 0x400 xorl %r8d, %r8d callq 0xdba6 movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r14 retq
/RoaringBitmap[P]CRoaring/src/containers/convert.c
run_container_from_array
run_container_t *run_container_from_array(const array_container_t *c) { int32_t n_runs = array_container_number_of_runs(c); run_container_t *answer = run_container_create_given_capacity(n_runs); int prev = -2; int run_start = -1; int32_t card = c->cardinality; if (card == 0) return answer; for (int i = 0; i < card; ++i) { const uint16_t cur_val = c->array[i]; if (cur_val != prev + 1) { // new run starts; flush old one, if any if (run_start != -1) add_run(answer, run_start, prev); run_start = cur_val; } prev = c->array[i]; } // now prev is the last seen value add_run(answer, run_start, prev); // assert(run_container_cardinality(answer) == c->cardinality); return answer; }
pushq %rbx movq %rdi, %rbx callq 0x32e9 movl %eax, %edi callq 0xa749 movl (%rbx), %ecx testl %ecx, %ecx je 0x9eda xorl %edx, %edx testl %ecx, %ecx cmovlel %edx, %ecx pushq $-0x2 popq %rsi pushq $-0x1 popq %rdi cmpq %rdx, %rcx je 0x9ec3 movq 0x8(%rbx), %r9 movzwl (%r9,%rdx,2), %r8d leal 0x1(%rsi), %r10d cmpl %r8d, %r10d jne 0x9e92 movl %r8d, %esi movl %edi, %r8d jmp 0x9ebb cmpl $-0x1, %edi je 0x9eb8 movq 0x8(%rax), %r10 movslq (%rax), %r11 movw %di, (%r10,%r11,4) subl %edi, %esi movw %si, 0x2(%r10,%r11,4) leal 0x1(%r11), %esi movl %esi, (%rax) movzwl (%r9,%rdx,2), %esi jmp 0x9ebb movl %r8d, %esi incq %rdx movl %r8d, %edi jmp 0x9e73 movq 0x8(%rax), %rcx movslq (%rax), %rdx movw %di, (%rcx,%rdx,4) subl %edi, %esi movw %si, 0x2(%rcx,%rdx,4) leal 0x1(%rdx), %ecx movl %ecx, (%rax) popq %rbx retq
/RoaringBitmap[P]CRoaring/src/containers/convert.c
run_container_create_given_capacity
run_container_t *run_container_create_given_capacity(int32_t size) { run_container_t *run; /* Allocate the run container itself. */ if ((run = (run_container_t *)roaring_malloc(sizeof(run_container_t))) == NULL) { return NULL; } if (size <= 0) { // we don't want to rely on malloc(0) run->runs = NULL; } else if ((run->runs = (rle16_t *)roaring_malloc(sizeof(rle16_t) * size)) == NULL) { roaring_free(run); return NULL; } run->capacity = size; run->n_runs = 0; return run; }
pushq %rbp pushq %rbx pushq %rax movl %edi, %ebp pushq $0x10 popq %rdi callq 0xbc19 testq %rax, %rax je 0xa77e movq %rax, %rbx testl %ebp, %ebp jle 0xa782 movl %ebp, %edi shlq $0x2, %rdi callq 0xbc19 movq %rax, 0x8(%rbx) testq %rax, %rax jne 0xa787 movq %rbx, %rdi callq 0xbc31 xorl %ebx, %ebx jmp 0xa78d andq $0x0, 0x8(%rbx) movl %ebp, 0x4(%rbx) andl $0x0, (%rbx) movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
run_container_offset
void run_container_offset(const run_container_t *c, container_t **loc, container_t **hic, uint16_t offset) { run_container_t *lo = NULL, *hi = NULL; bool split; int lo_cap, hi_cap; int top, pivot; top = (1 << 16) - offset; pivot = run_container_index_equalorlarger(c, top); if (pivot == -1) { split = false; lo_cap = c->n_runs; hi_cap = 0; } else { split = c->runs[pivot].value < top; lo_cap = pivot + (split ? 1 : 0); hi_cap = c->n_runs - pivot; } if (loc && lo_cap) { lo = run_container_create_given_capacity(lo_cap); memcpy(lo->runs, c->runs, lo_cap * sizeof(rle16_t)); lo->n_runs = lo_cap; for (int i = 0; i < lo_cap; ++i) { lo->runs[i].value += offset; } *loc = (container_t *)lo; } if (hic && hi_cap) { hi = run_container_create_given_capacity(hi_cap); memcpy(hi->runs, c->runs + pivot, hi_cap * sizeof(rle16_t)); hi->n_runs = hi_cap; for (int i = 0; i < hi_cap; ++i) { hi->runs[i].value += offset; } *hic = (container_t *)hi; } // Fix the split. if (split) { if (lo != NULL) { // Add the missing run to 'lo', exhausting length. lo->runs[lo->n_runs - 1].length = (1 << 16) - lo->runs[lo->n_runs - 1].value - 1; } if (hi != NULL) { // Fix the first run in 'hi'. hi->runs[0].length -= UINT16_MAX - hi->runs[0].value + 1; hi->runs[0].value = 0; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movl %ecx, %ebp movq %rdx, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq %rdi, %r14 movl $0x10000, %ebx # imm = 0x10000 subl %ecx, %ebx movzwl %bx, %esi callq 0xa592 movl %eax, %r13d cmpl $-0x1, %eax je 0xa86d movq 0x8(%r14), %rax movl %r13d, %ecx movzwl (%rax,%rcx,4), %eax xorl %ecx, %ecx cmpl %eax, %ebx seta %cl movl %ecx, %ebx addl %r13d, %ebx movl (%r14), %r12d subl %r13d, %r12d jmp 0xa875 movl (%r14), %ebx xorl %ecx, %ecx xorl %r12d, %r12d xorl %r15d, %r15d cmpq $0x0, 0x8(%rsp) movl %ecx, 0x4(%rsp) je 0xa8ce testl %ebx, %ebx je 0xa8ce movl %ebx, %edi callq 0xa749 movq %rax, %r15 movq 0x8(%rax), %rdi movq 0x8(%r14), %rsi movslq %ebx, %rdx shlq $0x2, %rdx callq 0x10a0 movl %ebx, (%r15) xorl %eax, %eax testl %ebx, %ebx cmovlel %eax, %ebx cmpq %rax, %rbx je 0xa8c2 movq 0x8(%r15), %rcx addw %bp, (%rcx,%rax,4) incq %rax jmp 0xa8b0 movq 0x8(%rsp), %rax movq %r15, (%rax) movl 0x4(%rsp), %ecx xorl %ebx, %ebx cmpq $0x0, 0x10(%rsp) je 0xa92d testl %r12d, %r12d je 0xa92d movl %r12d, %edi callq 0xa749 movq %rax, %rbx movq 0x8(%rax), %rdi movslq %r13d, %rsi shlq $0x2, %rsi addq 0x8(%r14), %rsi movslq %r12d, %rdx shlq $0x2, %rdx callq 0x10a0 movl %r12d, (%rbx) xorl %eax, %eax testl %r12d, %r12d cmovlel %eax, %r12d cmpq %rax, %r12 je 0xa921 movq 0x8(%rbx), %rcx addw %bp, (%rcx,%rax,4) incq %rax jmp 0xa90f movq 0x10(%rsp), %rax movq %rbx, (%rax) movl 0x4(%rsp), %ecx testb %cl, %cl je 0xa95d testq %r15, %r15 je 0xa949 movq 0x8(%r15), %rax movslq (%r15), %rcx movzwl -0x4(%rax,%rcx,4), %edx notl %edx movw %dx, -0x2(%rax,%rcx,4) testq %rbx, %rbx je 0xa95d movq 0x8(%rbx), %rax movzwl (%rax), %ecx addw %cx, 0x2(%rax) andw $0x0, (%rax) addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
run_container_union
void run_container_union(const run_container_t *src_1, const run_container_t *src_2, run_container_t *dst) { // TODO: this could be a lot more efficient // we start out with inexpensive checks const bool if1 = run_container_is_full(src_1); const bool if2 = run_container_is_full(src_2); if (if1 || if2) { if (if1) { run_container_copy(src_1, dst); return; } if (if2) { run_container_copy(src_2, dst); return; } } const int32_t neededcapacity = src_1->n_runs + src_2->n_runs; if (dst->capacity < neededcapacity) run_container_grow(dst, neededcapacity, false); dst->n_runs = 0; int32_t rlepos = 0; int32_t xrlepos = 0; rle16_t previousrle; if (src_1->runs[rlepos].value <= src_2->runs[xrlepos].value) { previousrle = run_container_append_first(dst, src_1->runs[rlepos]); rlepos++; } else { previousrle = run_container_append_first(dst, src_2->runs[xrlepos]); xrlepos++; } while ((xrlepos < src_2->n_runs) && (rlepos < src_1->n_runs)) { rle16_t newrl; if (src_1->runs[rlepos].value <= src_2->runs[xrlepos].value) { newrl = src_1->runs[rlepos]; rlepos++; } else { newrl = src_2->runs[xrlepos]; xrlepos++; } run_container_append(dst, newrl, &previousrle); } while (xrlepos < src_2->n_runs) { run_container_append(dst, src_2->runs[xrlepos], &previousrle); xrlepos++; } while (rlepos < src_1->n_runs) { run_container_append(dst, src_1->runs[rlepos], &previousrle); rlepos++; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %rdx, %rbx movq %rsi, %r15 movq %rdi, %r14 callq 0xa5df movl %eax, %ebp movq %r15, %rdi callq 0xa5df movl %ebp, %ecx orb %al, %cl testb $0x1, %cl je 0xaaa7 testb %bpl, %bpl je 0xaa8a movq %r14, %rdi jmp 0xaa91 testb $0x1, %al je 0xaaa7 movq %r15, %rdi movq %rbx, %rsi addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0xaa0d movl (%r15), %esi addl (%r14), %esi cmpl %esi, 0x4(%rbx) jge 0xaabc movq %rbx, %rdi xorl %edx, %edx callq 0xa988 andl $0x0, (%rbx) movq 0x8(%r14), %rax movzwl (%rax), %ecx movq 0x8(%r15), %rdx xorl %ebp, %ebp xorl %r13d, %r13d cmpw (%rdx), %cx setbe %bpl seta %r13b movq 0x8(%rbx), %rcx cmovaq %rdx, %rax movl (%rax), %eax movl %eax, (%rcx) movq %rbx, 0x10(%rsp) incl (%rbx) leaq 0xc(%rsp), %rcx movl %eax, (%rcx) movl (%r15), %eax movl %ebp, %r12d movl %r13d, %ebx cmpl %eax, %r13d jge 0xab45 cmpl (%r14), %ebp jge 0xab45 movq 0x8(%r14), %rax leaq (%rax,%r12,4), %rax movzwl (%rax), %ecx movq 0x8(%r15), %rdx leaq (%rdx,%rbx,4), %rdx xorl %esi, %esi xorl %edi, %edi cmpw (%rdx), %cx setbe %sil seta %dil cmovaq %rdx, %rax addl %edi, %r13d addl %esi, %ebp movl (%rax), %esi movq 0x10(%rsp), %rdi leaq 0xc(%rsp), %rdx callq 0xaba4 jmp 0xaaf4 leaq 0xc(%rsp), %r13 movq 0x10(%rsp), %rbp cltq cmpq %rax, %rbx jge 0xab70 movq 0x8(%r15), %rax movl (%rax,%rbx,4), %esi movq %rbp, %rdi movq %r13, %rdx callq 0xaba4 incq %rbx movl (%r15), %eax jmp 0xab4f leaq 0xc(%rsp), %r15 movslq (%r14), %rax cmpq %rax, %r12 jge 0xab95 movq 0x8(%r14), %rax movl (%rax,%r12,4), %esi movq %rbp, %rdi movq %r15, %rdx callq 0xaba4 incq %r12 jmp 0xab75 addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
run_container_append
static inline void run_container_append(run_container_t *run, rle16_t vl, rle16_t *previousrl) { const uint32_t previousend = previousrl->value + previousrl->length; if (vl.value > previousend + 1) { // we add a new one run->runs[run->n_runs] = vl; run->n_runs++; *previousrl = vl; } else { uint32_t newend = vl.value + vl.length + UINT32_C(1); if (newend > previousend) { // we merge previousrl->length = (uint16_t)(newend - 1 - previousrl->value); run->runs[run->n_runs - 1] = *previousrl; } } }
movl %esi, %eax shrl $0x10, %eax movzwl (%rdx), %ecx movzwl 0x2(%rdx), %r9d movzwl %si, %r8d leal (%r9,%rcx), %r10d incl %r10d cmpl %r10d, %r8d jbe 0xabe1 movq 0x8(%rdi), %rcx movslq (%rdi), %r8 movw %si, (%rcx,%r8,4) movw %ax, 0x2(%rcx,%r8,4) leal 0x1(%r8), %ecx movl %ecx, (%rdi) movw %si, (%rdx) movw %ax, 0x2(%rdx) retq addl %ecx, %r9d addl %eax, %r8d cmpl %r9d, %r8d jb 0xac01 subl %ecx, %r8d movw %r8w, 0x2(%rdx) movq 0x8(%rdi), %rax movslq (%rdi), %rcx movl (%rdx), %edx movl %edx, -0x4(%rax,%rcx,4) retq
/RoaringBitmap[P]CRoaring/include/roaring/containers/run.h
run_container_smart_append_exclusive
void run_container_smart_append_exclusive(run_container_t *src, const uint16_t start, const uint16_t length) { int old_end; rle16_t *last_run = src->n_runs ? src->runs + (src->n_runs - 1) : NULL; rle16_t *appended_last_run = src->runs + src->n_runs; if (!src->n_runs || (start > (old_end = last_run->value + last_run->length + 1))) { *appended_last_run = CROARING_MAKE_RLE16(start, length); src->n_runs++; return; } if (old_end == start) { // we merge last_run->length += (length + 1); return; } int new_end = start + length + 1; if (start == last_run->value) { // wipe out previous if (new_end < old_end) { *last_run = CROARING_MAKE_RLE16(new_end, old_end - new_end - 1); return; } else if (new_end > old_end) { *last_run = CROARING_MAKE_RLE16(old_end, new_end - old_end - 1); return; } else { src->n_runs--; return; } } last_run->length = start - last_run->value - 1; if (new_end < old_end) { *appended_last_run = CROARING_MAKE_RLE16(new_end, old_end - new_end - 1); src->n_runs++; } else if (new_end > old_end) { *appended_last_run = CROARING_MAKE_RLE16(old_end, new_end - old_end - 1); src->n_runs++; } }
pushq %rbx movslq (%rdi), %rcx movq 0x8(%rdi), %rax testq %rcx, %rcx je 0xae77 leaq (%rax,%rcx,4), %rax movzwl %si, %ebx movzwl -0x4(%rax), %r11d movzwl -0x2(%rax), %r10d leal (%r10,%r11), %r8d leal (%r10,%r11), %r9d incl %r9d cmpl %ebx, %r9d jae 0xae84 movw %si, (%rax) movw %dx, 0x2(%rax) incl %ecx movl %ecx, (%rdi) popq %rbx retq jne 0xae92 leal (%rdx,%r10), %ecx incl %ecx movw %cx, -0x2(%rax) jmp 0xae82 movzwl %dx, %edx leal (%rdx,%rbx), %r10d incl %r10d addl %ebx, %edx cmpw %bx, %r11w jne 0xaeba cmpl %r8d, %edx jae 0xaed9 notl %edx addl %edx, %r8d movw %r10w, -0x4(%rax) movw %r8w, -0x2(%rax) jmp 0xae82 notl %r11d addl %r11d, %esi movw %si, -0x2(%rax) cmpl %r8d, %edx jae 0xaeec notl %edx addl %edx, %r8d movw %r10w, (%rax) movw %r8w, 0x2(%rax) jmp 0xae7e jbe 0xaefe notl %r8d addl %r8d, %edx movw %r9w, -0x4(%rax) movw %dx, -0x2(%rax) jmp 0xae82 jbe 0xae82 notl %r8d addl %r8d, %edx movw %r9w, (%rax) movw %dx, 0x2(%rax) jmp 0xae7e decl %ecx jmp 0xae80
/RoaringBitmap[P]CRoaring/src/containers/run.c
run_container_intersection
void run_container_intersection(const run_container_t *src_1, const run_container_t *src_2, run_container_t *dst) { const bool if1 = run_container_is_full(src_1); const bool if2 = run_container_is_full(src_2); if (if1 || if2) { if (if1) { run_container_copy(src_2, dst); return; } if (if2) { run_container_copy(src_1, dst); return; } } // TODO: this could be a lot more efficient, could use SIMD optimizations const int32_t neededcapacity = src_1->n_runs + src_2->n_runs; if (dst->capacity < neededcapacity) run_container_grow(dst, neededcapacity, false); dst->n_runs = 0; int32_t rlepos = 0; int32_t xrlepos = 0; int32_t start = src_1->runs[rlepos].value; int32_t end = start + src_1->runs[rlepos].length + 1; int32_t xstart = src_2->runs[xrlepos].value; int32_t xend = xstart + src_2->runs[xrlepos].length + 1; while ((rlepos < src_1->n_runs) && (xrlepos < src_2->n_runs)) { if (end <= xstart) { ++rlepos; if (rlepos < src_1->n_runs) { start = src_1->runs[rlepos].value; end = start + src_1->runs[rlepos].length + 1; } } else if (xend <= start) { ++xrlepos; if (xrlepos < src_2->n_runs) { xstart = src_2->runs[xrlepos].value; xend = xstart + src_2->runs[xrlepos].length + 1; } } else { // they overlap const int32_t lateststart = start > xstart ? start : xstart; int32_t earliestend; if (end == xend) { // improbable earliestend = end; rlepos++; xrlepos++; if (rlepos < src_1->n_runs) { start = src_1->runs[rlepos].value; end = start + src_1->runs[rlepos].length + 1; } if (xrlepos < src_2->n_runs) { xstart = src_2->runs[xrlepos].value; xend = xstart + src_2->runs[xrlepos].length + 1; } } else if (end < xend) { earliestend = end; rlepos++; if (rlepos < src_1->n_runs) { start = src_1->runs[rlepos].value; end = start + src_1->runs[rlepos].length + 1; } } else { // end > xend earliestend = xend; xrlepos++; if (xrlepos < src_2->n_runs) { xstart = src_2->runs[xrlepos].value; xend = xstart + src_2->runs[xrlepos].length + 1; } } dst->runs[dst->n_runs].value = (uint16_t)lateststart; dst->runs[dst->n_runs].length = (uint16_t)(earliestend - lateststart - 1); dst->n_runs++; } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 callq 0xa5df movl %eax, %ebp movq %r14, %rdi callq 0xa5df orb %bpl, %al testb $0x1, %al je 0xaf52 testb %bpl, %bpl cmovneq %r14, %r15 movq %r15, %rdi movq %rbx, %rsi addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0xaa0d movl (%r14), %esi addl (%r15), %esi cmpl %esi, 0x4(%rbx) jge 0xaf67 movq %rbx, %rdi xorl %edx, %edx callq 0xa988 andl $0x0, (%rbx) movq 0x8(%r15), %rax movzwl (%rax), %edx movq %rax, 0x10(%rsp) movzwl 0x2(%rax), %ecx leal 0x1(%rdx,%rcx), %r12d movq 0x8(%r14), %rcx movzwl (%rcx), %edi movzwl 0x2(%rcx), %esi addl %edi, %esi incl %esi xorl %ebp, %ebp xorl %r8d, %r8d xorl %r9d, %r9d movl (%r15), %r13d movl %r12d, %r10d movl %esi, %r11d cmpl %r13d, %r8d jge 0xb0c9 movl (%r14), %esi cmpl %esi, %r9d jge 0xb0c9 cmpl %edi, %r10d jle 0xafdb cmpl %edx, %r11d jg 0xaffd incl %r9d cmpl %esi, %r9d jge 0xaf9f movslq %r9d, %rsi movzwl (%rcx,%rsi,4), %edi movzwl 0x2(%rcx,%rsi,4), %esi leal (%rdi,%rsi), %r11d incl %r11d jmp 0xaf9f incl %r8d cmpl %r13d, %r8d jge 0xaf9f movslq %r8d, %rsi movq 0x10(%rsp), %rax movzwl (%rax,%rsi,4), %edx movzwl 0x2(%rax,%rsi,4), %esi leal (%rdx,%rsi), %r10d incl %r10d jmp 0xaf9f movl %ebp, 0xc(%rsp) cmpl %edi, %edx movl %edi, %eax cmovgl %edx, %eax cmpl %r11d, %r10d jne 0xb04c incl %r8d movl %r10d, %r12d cmpl %r13d, %r8d jge 0xb032 movslq %r8d, %r11 movq 0x10(%rsp), %r12 movzwl (%r12,%r11,4), %edx movzwl 0x2(%r12,%r11,4), %r11d leal (%rdx,%r11), %r12d incl %r12d incl %r9d cmpl %esi, %r9d jge 0xb072 movslq %r9d, %rsi movzwl (%rcx,%rsi,4), %edi movzwl 0x2(%rcx,%rsi,4), %esi addl %edi, %esi incl %esi jmp 0xb0a5 jge 0xb077 incl %r8d cmpl %r13d, %r8d jge 0xb094 movslq %r8d, %rsi movq 0x10(%rsp), %r12 movzwl (%r12,%rsi,4), %edx movzwl 0x2(%r12,%rsi,4), %esi leal (%rdx,%rsi), %r12d incl %r12d jmp 0xb097 movl %r10d, %esi jmp 0xb0a5 incl %r9d cmpl %esi, %r9d jge 0xb09c movslq %r9d, %rsi movzwl (%rcx,%rsi,4), %edi movzwl 0x2(%rcx,%rsi,4), %esi addl %edi, %esi incl %esi movl %r10d, %r12d jmp 0xb0a2 movl %r10d, %r12d movl %r11d, %esi jmp 0xb0a5 movl %r10d, %r12d movl %r11d, %esi movl %r11d, %r10d movl 0xc(%rsp), %ebp movq 0x8(%rbx), %r11 movl %ebp, %r13d movw %ax, (%r11,%r13,4) notl %eax addl %r10d, %eax movw %ax, 0x2(%r11,%r13,4) incl %ebp movl %ebp, (%rbx) jmp 0xaf96 addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
run_container_intersection_cardinality
int run_container_intersection_cardinality(const run_container_t *src_1, const run_container_t *src_2) { const bool if1 = run_container_is_full(src_1); const bool if2 = run_container_is_full(src_2); if (if1 || if2) { if (if1) { return run_container_cardinality(src_2); } if (if2) { return run_container_cardinality(src_1); } } int answer = 0; int32_t rlepos = 0; int32_t xrlepos = 0; int32_t start = src_1->runs[rlepos].value; int32_t end = start + src_1->runs[rlepos].length + 1; int32_t xstart = src_2->runs[xrlepos].value; int32_t xend = xstart + src_2->runs[xrlepos].length + 1; while ((rlepos < src_1->n_runs) && (xrlepos < src_2->n_runs)) { if (end <= xstart) { ++rlepos; if (rlepos < src_1->n_runs) { start = src_1->runs[rlepos].value; end = start + src_1->runs[rlepos].length + 1; } } else if (xend <= start) { ++xrlepos; if (xrlepos < src_2->n_runs) { xstart = src_2->runs[xrlepos].value; xend = xstart + src_2->runs[xrlepos].length + 1; } } else { // they overlap const int32_t lateststart = start > xstart ? start : xstart; int32_t earliestend; if (end == xend) { // improbable earliestend = end; rlepos++; xrlepos++; if (rlepos < src_1->n_runs) { start = src_1->runs[rlepos].value; end = start + src_1->runs[rlepos].length + 1; } if (xrlepos < src_2->n_runs) { xstart = src_2->runs[xrlepos].value; xend = xstart + src_2->runs[xrlepos].length + 1; } } else if (end < xend) { earliestend = end; rlepos++; if (rlepos < src_1->n_runs) { start = src_1->runs[rlepos].value; end = start + src_1->runs[rlepos].length + 1; } } else { // end > xend earliestend = xend; xrlepos++; if (xrlepos < src_2->n_runs) { xstart = src_2->runs[xrlepos].value; xend = xstart + src_2->runs[xrlepos].length + 1; } } answer += earliestend - lateststart; } } return answer; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movq %rsi, %rbx movq %rdi, %r14 callq 0xa5df movl %eax, %ebp movq %rbx, %rdi callq 0xa5df orb %bpl, %al testb $0x1, %al je 0xb113 testb %bpl, %bpl cmovneq %rbx, %r14 movq %r14, %rdi popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp jmp 0xb25b movq 0x8(%r14), %rcx movzwl (%rcx), %esi movzwl 0x2(%rcx), %eax leal (%rsi,%rax), %r15d incl %r15d movq 0x8(%rbx), %rdx movzwl (%rdx), %r8d movzwl 0x2(%rdx), %eax leal (%r8,%rax), %r12d incl %r12d movl (%r14), %edi xorl %eax, %eax xorl %r9d, %r9d xorl %r10d, %r10d movl %r15d, %r11d movl %r12d, %ebp cmpl %edi, %r9d jge 0xb252 movl (%rbx), %r12d cmpl %r12d, %r10d jge 0xb252 cmpl %r8d, %r11d jle 0xb185 cmpl %esi, %ebp jg 0xb1a3 incl %r10d cmpl %r12d, %r10d jge 0xb149 movslq %r10d, %r14 movzwl (%rdx,%r14,4), %r8d movzwl 0x2(%rdx,%r14,4), %r14d leal (%r8,%r14), %ebp incl %ebp jmp 0xb149 incl %r9d cmpl %edi, %r9d jge 0xb149 movslq %r9d, %r11 movzwl (%rcx,%r11,4), %esi movzwl 0x2(%rcx,%r11,4), %r11d addl %esi, %r11d incl %r11d jmp 0xb149 cmpl %r8d, %esi movl %r8d, %r14d cmovgl %esi, %r14d cmpl %ebp, %r11d jne 0xb1ef incl %r9d movl %r11d, %r15d cmpl %edi, %r9d jge 0xb1d1 movslq %r9d, %r15 movzwl (%rcx,%r15,4), %esi movzwl 0x2(%rcx,%r15,4), %r15d addl %esi, %r15d incl %r15d incl %r10d cmpl %r12d, %r10d jge 0xb20f movslq %r10d, %r12 movzwl (%rdx,%r12,4), %r8d movzwl 0x2(%rdx,%r12,4), %r12d addl %r8d, %r12d incl %r12d jmp 0xb247 jge 0xb214 incl %r9d cmpl %edi, %r9d jge 0xb236 movslq %r9d, %r15 movzwl (%rcx,%r15,4), %esi movzwl 0x2(%rcx,%r15,4), %r15d addl %esi, %r15d incl %r15d jmp 0xb239 movl %r11d, %r12d jmp 0xb247 incl %r10d cmpl %r12d, %r10d jge 0xb23e movslq %r10d, %r15 movzwl (%rdx,%r15,4), %r8d movzwl 0x2(%rdx,%r15,4), %r15d leal (%r8,%r15), %r12d incl %r12d movl %r11d, %r15d jmp 0xb244 movl %r11d, %r15d movl %ebp, %r12d jmp 0xb247 movl %r11d, %r15d movl %ebp, %r12d movl %ebp, %r11d subl %r14d, %eax addl %r11d, %eax jmp 0xb143 popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
run_container_rank
int run_container_rank(const run_container_t *container, uint16_t x) { int sum = 0; uint32_t x32 = x; for (int i = 0; i < container->n_runs; i++) { uint32_t startpoint = container->runs[i].value; uint32_t length = container->runs[i].length; uint32_t endpoint = length + startpoint; if (x <= endpoint) { if (x < startpoint) break; return sum + (x32 - startpoint) + 1; } else { sum += length + 1; } } return sum; }
movl (%rdi), %ecx xorl %edx, %edx testl %ecx, %ecx cmovlel %edx, %ecx xorl %eax, %eax cmpq %rdx, %rcx je 0xb8cf movq 0x8(%rdi), %r9 movzwl (%r9,%rdx,4), %r8d movzwl 0x2(%r9,%rdx,4), %r9d leal (%r9,%r8), %r10d cmpl %esi, %r10d jae 0xb8c0 addl %r9d, %eax incl %eax incq %rdx jmp 0xb899 cmpw %si, %r8w ja 0xb8cf addl %eax, %esi subl %r8d, %esi incl %esi movl %esi, %eax retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
avx2_run_container_to_uint32_array
ALLOW_UNALIGNED int _avx2_run_container_to_uint32_array(void *vout, const run_container_t *cont, uint32_t base) { int outpos = 0; uint32_t *out = (uint32_t *)vout; for (int i = 0; i < cont->n_runs; ++i) { uint32_t run_start = base + cont->runs[i].value; uint16_t le = cont->runs[i].length; if (le < 8) { for (int j = 0; j <= le; ++j) { uint32_t val = run_start + j; memcpy(out + outpos, &val, sizeof(uint32_t)); // should be compiled as a MOV on x64 outpos++; } } else { int j = 0; __m256i run_start_v = _mm256_set1_epi32(run_start); // [8,8,8,8....] __m256i inc = _mm256_set1_epi32(8); // used for generate sequence: // [0, 1, 2, 3...], [8, 9, 10,...] __m256i delta = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7); for (j = 0; j + 8 <= le; j += 8) { __m256i val_v = _mm256_add_epi32(run_start_v, delta); _mm256_storeu_si256((__m256i *)(out + outpos), val_v); delta = _mm256_add_epi32(inc, delta); outpos += 8; } for (; j <= le; ++j) { uint32_t val = run_start + j; memcpy(out + outpos, &val, sizeof(uint32_t)); // should be compiled as a MOV on x64 outpos++; } } } return outpos; }
pushq %rbx xorl %ecx, %ecx vpmovsxbd 0x3c71(%rip), %ymm0 # 0xf664 movl $0x8, %r8d vpbroadcastd 0x3c5e(%rip), %ymm1 # 0xf660 xorl %eax, %eax movslq (%rsi), %r9 cmpq %r9, %rcx jge 0xba88 movq 0x8(%rsi), %r10 movzwl (%r10,%rcx,4), %r9d addl %edx, %r9d movzwl 0x2(%r10,%rcx,4), %r10d cltq cmpl $0x7, %r10d ja 0xba3b incl %r10d subl $0x1, %r10d jb 0xba80 movl %r9d, (%rdi,%rax,4) incq %rax incl %r9d jmp 0xba29 vmovd %r9d, %xmm2 vpbroadcastd %xmm2, %ymm2 movl %r10d, %r11d andl $-0x8, %r11d movl %r8d, %ebx vmovdqa %ymm0, %ymm3 cmpl %r10d, %ebx ja 0xba7b vpaddd %ymm2, %ymm3, %ymm4 vmovdqu %ymm4, (%rdi,%rax,4) vpaddd %ymm1, %ymm3, %ymm3 addq $0x8, %rax addl $0x8, %ebx jmp 0xba53 leal (%r9,%r11), %ebx movl %ebx, (%rdi,%rax,4) incq %rax incl %r11d cmpl %r10d, %r11d jbe 0xba6e incq %rcx jmp 0xba04 popq %rbx vzeroupper retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
avx512_run_container_cardinality
static inline int _avx512_run_container_cardinality( const run_container_t *run) { const int32_t n_runs = run->n_runs; const rle16_t *runs = run->runs; /* by initializing with n_runs, we omit counting the +1 for each pair. */ int sum = n_runs; int32_t k = 0; const int32_t step = sizeof(__m512i) / sizeof(rle16_t); if (n_runs > step) { __m512i total = _mm512_setzero_si512(); for (; k + step <= n_runs; k += step) { __m512i ymm1 = _mm512_loadu_si512((const __m512i *)(runs + k)); __m512i justlengths = _mm512_srli_epi32(ymm1, 16); total = _mm512_add_epi32(total, justlengths); } __m256i lo = _mm512_extracti32x8_epi32(total, 0); __m256i hi = _mm512_extracti32x8_epi32(total, 1); // a store might be faster than extract? uint32_t buffer[sizeof(__m256i) / sizeof(rle16_t)]; _mm256_storeu_si256((__m256i *)buffer, lo); sum += (buffer[0] + buffer[1]) + (buffer[2] + buffer[3]) + (buffer[4] + buffer[5]) + (buffer[6] + buffer[7]); _mm256_storeu_si256((__m256i *)buffer, hi); sum += (buffer[0] + buffer[1]) + (buffer[2] + buffer[3]) + (buffer[4] + buffer[5]) + (buffer[6] + buffer[7]); } for (; k < n_runs; ++k) { sum += runs[k].length; } return sum; }
movl (%rdi), %ecx movq 0x8(%rdi), %rdx cmpl $0x11, %ecx jl 0xbac1 movl %ecx, %esi andl $0x7ffffff0, %esi # imm = 0x7FFFFFF0 vpxor %xmm0, %xmm0, %xmm0 pushq $0x10 popq %rax cmpq %rcx, %rax ja 0xbac7 vpsrld $0x10, -0x40(%rdx,%rax,4), %zmm1 vpaddd %zmm0, %zmm1, %zmm0 addq $0x10, %rax jmp 0xbaa7 xorl %esi, %esi movl %ecx, %eax jmp 0xbb02 vextracti64x4 $0x1, %zmm0, %ymm1 vpaddd %zmm1, %zmm0, %zmm0 vextracti128 $0x1, %ymm0, %xmm1 vpaddd %xmm1, %xmm0, %xmm0 vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3] vpaddd %xmm1, %xmm0, %xmm0 vpshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1] vpaddd %xmm1, %xmm0, %xmm0 vmovd %xmm0, %eax addl %ecx, %eax jmp 0xbb02 movzwl 0x2(%rdx,%rsi,4), %edi addl %edi, %eax incq %rsi cmpl %esi, %ecx jg 0xbaf8 vzeroupper retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
avx2_run_container_cardinality
ALLOW_UNALIGNED int run_container_cardinality(const run_container_t *run) { const int32_t n_runs = run->n_runs; const rle16_t *runs = run->runs; /* by initializing with n_runs, we omit counting the +1 for each pair. */ int sum = n_runs; for (int k = 0; k < n_runs; ++k) { sum += runs[k].length; } return sum; }
movl (%rdi), %ecx movq 0x8(%rdi), %rdx cmpl $0x9, %ecx jl 0xbb43 movl %ecx, %esi andl $0x7ffffff8, %esi # imm = 0x7FFFFFF8 vpxor %xmm0, %xmm0, %xmm0 pushq $0x8 popq %rax movq %rdx, %rdi cmpq %rcx, %rax ja 0xbb49 vlddqu (%rdi), %ymm1 vpsrld $0x10, %ymm1, %ymm1 vpaddd %ymm0, %ymm1, %ymm0 addq $0x20, %rdi addq $0x8, %rax jmp 0xbb27 xorl %esi, %esi movl %ecx, %eax jmp 0xbb70 vextracti128 $0x1, %ymm0, %xmm1 vphaddd %xmm0, %xmm1, %xmm0 vphaddd %xmm0, %xmm0, %xmm0 vphaddd %xmm0, %xmm0, %xmm0 vmovd %xmm0, %eax addl %ecx, %eax jmp 0xbb70 movzwl 0x2(%rdx,%rsi,4), %edi addl %edi, %eax incq %rsi cmpl %esi, %ecx jg 0xbb66 vzeroupper retq
/RoaringBitmap[P]CRoaring/src/containers/run.c
roaring_bitmap_aligned_malloc
static void* roaring_bitmap_aligned_malloc(size_t alignment, size_t size) { void* p; #ifdef _MSC_VER p = _aligned_malloc(size, alignment); #elif defined(__MINGW32__) || defined(__MINGW64__) p = __mingw_aligned_malloc(size, alignment); #else // somehow, if this is used before including "x86intrin.h", it creates an // implicit defined warning. if (posix_memalign(&p, alignment, size) != 0) return NULL; #endif return p; }
pushq %rax movq %rsi, %rdx movq %rdi, %rsi movq %rsp, %rdi callq 0x10f0 testl %eax, %eax jne 0xbc5f movq (%rsp), %rax jmp 0xbc61 xorl %eax, %eax popq %rcx retq
/RoaringBitmap[P]CRoaring/src/memory.c
roaring_bitmap_aligned_free
static void roaring_bitmap_aligned_free(void* memblock) { #ifdef _MSC_VER _aligned_free(memblock); #elif defined(__MINGW32__) || defined(__MINGW64__) __mingw_aligned_free(memblock); #else free(memblock); #endif }
jmp 0x1030
/RoaringBitmap[P]CRoaring/src/memory.c
croaring_hardware_support
int croaring_hardware_support(void) { static #if CROARING_ATOMIC_IMPL == CROARING_ATOMIC_IMPL_C _Atomic #endif int support = 0xFFFFFFF; if (support == 0xFFFFFFF) { bool has_avx2 = (croaring_detect_supported_architectures() & CROARING_AVX2) == CROARING_AVX2; bool has_avx512 = false; #if CROARING_COMPILER_SUPPORTS_AVX512 has_avx512 = (croaring_detect_supported_architectures() & CROARING_AVX512_REQUIRED) == CROARING_AVX512_REQUIRED; #endif // CROARING_COMPILER_SUPPORTS_AVX512 support = (has_avx2 ? ROARING_SUPPORTS_AVX2 : 0) | (has_avx512 ? ROARING_SUPPORTS_AVX512 : 0); } return support; }
movl 0xd45e(%rip), %eax # 0x190cc cmpl $0xfffffff, %eax # imm = 0xFFFFFFF jne 0xbc9f pushq %rbx callq 0xbca6 movl %eax, %ebx callq 0xbca6 notl %eax xorl %ecx, %ecx testl %eax, 0xd43c(%rip) # 0x190c8 sete %cl shrl $0x2, %ebx andl $0x1, %ebx leal (%rbx,%rcx,2), %eax xchgl %eax, 0xd42e(%rip) # 0x190cc popq %rbx movl 0xd427(%rip), %eax # 0x190cc retq
/RoaringBitmap[P]CRoaring/src/isadetection.c
intersect_vector16_cardinality
int32_t intersect_vector16_cardinality(const uint16_t *__restrict__ A, size_t s_a, const uint16_t *__restrict__ B, size_t s_b) { size_t count = 0; size_t i_a = 0, i_b = 0; const int vectorlength = sizeof(__m128i) / sizeof(uint16_t); const size_t st_a = (s_a / vectorlength) * vectorlength; const size_t st_b = (s_b / vectorlength) * vectorlength; __m128i v_a, v_b; if ((i_a < st_a) && (i_b < st_b)) { v_a = _mm_lddqu_si128((__m128i *)&A[i_a]); v_b = _mm_lddqu_si128((__m128i *)&B[i_b]); while ((A[i_a] == 0) || (B[i_b] == 0)) { const __m128i res_v = _mm_cmpestrm( v_b, vectorlength, v_a, vectorlength, _SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK); const int r = _mm_extract_epi32(res_v, 0); count += _mm_popcnt_u32(r); const uint16_t a_max = A[i_a + vectorlength - 1]; const uint16_t b_max = B[i_b + vectorlength - 1]; if (a_max <= b_max) { i_a += vectorlength; if (i_a == st_a) break; v_a = _mm_lddqu_si128((__m128i *)&A[i_a]); } if (b_max <= a_max) { i_b += vectorlength; if (i_b == st_b) break; v_b = _mm_lddqu_si128((__m128i *)&B[i_b]); } } if ((i_a < st_a) && (i_b < st_b)) while (true) { const __m128i res_v = _mm_cmpistrm( v_b, v_a, _SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK); const int r = _mm_extract_epi32(res_v, 0); count += _mm_popcnt_u32(r); const uint16_t a_max = A[i_a + vectorlength - 1]; const uint16_t b_max = B[i_b + vectorlength - 1]; if (a_max <= b_max) { i_a += vectorlength; if (i_a == st_a) break; v_a = _mm_lddqu_si128((__m128i *)&A[i_a]); } if (b_max <= a_max) { i_b += vectorlength; if (i_b == st_b) break; v_b = _mm_lddqu_si128((__m128i *)&B[i_b]); } } } // intersect the tail using scalar intersection while (i_a < s_a && i_b < s_b) { uint16_t a = A[i_a]; uint16_t b = B[i_b]; if (a < b) { i_a++; } else if (b < a) { i_b++; } else { count++; i_a++; i_b++; } } return (int32_t)count; }
pushq %rbp pushq %r14 pushq %rbx movq %rdx, %r8 movq %rsi, %rbx andq $-0x8, %rbx setne %al movq %rcx, %r14 andq $-0x8, %r14 setne %dl testb %dl, %al jne 0xc182 xorl %r10d, %r10d xorl %r11d, %r11d xorl %r9d, %r9d jmp 0xc255 vlddqu (%rdi), %xmm1 vlddqu (%r8), %xmm2 xorl %r10d, %r10d movl $0x8, %eax xorl %r11d, %r11d xorl %r9d, %r9d cmpw $0x0, (%rdi,%r11,2) je 0xc1a9 cmpw $0x0, (%r8,%r10,2) jne 0xc1f6 movl %eax, %edx vpcmpestrm $0x1, %xmm1, %xmm2 vmovd %xmm0, %edx popcntl %edx, %edx addl %edx, %r9d movzwl 0xe(%rdi,%r11,2), %edx movzwl 0xe(%r8,%r10,2), %ebp cmpw %bp, %dx ja 0xc1dc addq $0x8, %r11 cmpq %rbx, %r11 je 0xc252 vlddqu (%rdi,%r11,2), %xmm1 cmpw %dx, %bp ja 0xc199 addq $0x8, %r10 cmpq %r14, %r10 je 0xc28a vlddqu (%r8,%r10,2), %xmm2 jmp 0xc199 cmpq %rbx, %r11 setae %al cmpq %r14, %r10 setae %dl orb %al, %dl jne 0xc255 movzwl 0xe(%r8,%r10,2), %eax vpcmpistrm $0x1, %xmm1, %xmm2 movl %r9d, %edx vmovd %xmm0, %r9d popcntl %r9d, %r9d addl %edx, %r9d movzwl 0xe(%rdi,%r11,2), %edx cmpw %ax, %dx ja 0xc23c addq $0x8, %r11 cmpq %rbx, %r11 je 0xc252 vlddqu (%rdi,%r11,2), %xmm1 cmpw %dx, %ax ja 0xc20c addq $0x8, %r10 cmpq %r14, %r10 je 0xc28a vlddqu (%r8,%r10,2), %xmm2 jmp 0xc206 movq %rbx, %r11 cmpq %rsi, %r11 jae 0xc282 cmpq %rcx, %r10 jae 0xc282 movzwl (%rdi,%r11,2), %eax movzwl (%r8,%r10,2), %edx cmpw %dx, %ax jae 0xc273 incq %r11 jmp 0xc255 jbe 0xc27a incq %r10 jmp 0xc255 incl %r9d incq %r11 jmp 0xc275 movl %r9d, %eax popq %rbx popq %r14 popq %rbp retq movq %r14, %r10 jmp 0xc255
/RoaringBitmap[P]CRoaring/src/array_util.c
difference_vector16
int32_t difference_vector16(const uint16_t *__restrict__ A, size_t s_a, const uint16_t *__restrict__ B, size_t s_b, uint16_t *C) { // we handle the degenerate case if (s_a == 0) return 0; if (s_b == 0) { if (A != C) memcpy(C, A, sizeof(uint16_t) * s_a); return (int32_t)s_a; } // handle the leading zeroes, it is messy but it allows us to use the fast // _mm_cmpistrm instrinsic safely int32_t count = 0; if ((A[0] == 0) || (B[0] == 0)) { if ((A[0] == 0) && (B[0] == 0)) { A++; s_a--; B++; s_b--; } else if (A[0] == 0) { C[count++] = 0; A++; s_a--; } else { B++; s_b--; } } // at this point, we have two non-empty arrays, made of non-zero // increasing values. size_t i_a = 0, i_b = 0; const size_t vectorlength = sizeof(__m128i) / sizeof(uint16_t); const size_t st_a = (s_a / vectorlength) * vectorlength; const size_t st_b = (s_b / vectorlength) * vectorlength; if ((i_a < st_a) && (i_b < st_b)) { // this is the vectorized code path __m128i v_a, v_b; //, v_bmax; // we load a vector from A and a vector from B v_a = _mm_lddqu_si128((__m128i *)&A[i_a]); v_b = _mm_lddqu_si128((__m128i *)&B[i_b]); // we have a runningmask which indicates which values from A have been // spotted in B, these don't get written out. __m128i runningmask_a_found_in_b = _mm_setzero_si128(); /**** * start of the main vectorized loop *****/ while (true) { // afoundinb will contain a mask indicate for each entry in A // whether it is seen // in B const __m128i a_found_in_b = _mm_cmpistrm( v_b, v_a, _SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK); runningmask_a_found_in_b = _mm_or_si128(runningmask_a_found_in_b, a_found_in_b); // we always compare the last values of A and B const uint16_t a_max = A[i_a + vectorlength - 1]; const uint16_t b_max = B[i_b + vectorlength - 1]; if (a_max <= b_max) { // Ok. In this code path, we are ready to write our v_a // because there is no need to read more from B, they will // all be large values. const int bitmask_belongs_to_difference = _mm_extract_epi32(runningmask_a_found_in_b, 0) ^ 0xFF; /*** next few lines are probably expensive *****/ __m128i sm16 = _mm_loadu_si128((const __m128i *)shuffle_mask16 + bitmask_belongs_to_difference); __m128i p = _mm_shuffle_epi8(v_a, sm16); _mm_storeu_si128((__m128i *)&C[count], p); // can overflow count += _mm_popcnt_u32(bitmask_belongs_to_difference); // we advance a i_a += vectorlength; if (i_a == st_a) // no more break; runningmask_a_found_in_b = _mm_setzero_si128(); v_a = _mm_lddqu_si128((__m128i *)&A[i_a]); } if (b_max <= a_max) { // in this code path, the current v_b has become useless i_b += vectorlength; if (i_b == st_b) break; v_b = _mm_lddqu_si128((__m128i *)&B[i_b]); } } // at this point, either we have i_a == st_a, which is the end of the // vectorized processing, // or we have i_b == st_b, and we are not done processing the vector... // so we need to finish it off. if (i_a < st_a) { // we have unfinished business... uint16_t buffer[8]; // buffer to do a masked load memset(buffer, 0, 8 * sizeof(uint16_t)); memcpy(buffer, B + i_b, (s_b - i_b) * sizeof(uint16_t)); v_b = _mm_lddqu_si128((__m128i *)buffer); const __m128i a_found_in_b = _mm_cmpistrm( v_b, v_a, _SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK); runningmask_a_found_in_b = _mm_or_si128(runningmask_a_found_in_b, a_found_in_b); const int bitmask_belongs_to_difference = _mm_extract_epi32(runningmask_a_found_in_b, 0) ^ 0xFF; __m128i sm16 = _mm_loadu_si128((const __m128i *)shuffle_mask16 + bitmask_belongs_to_difference); __m128i p = _mm_shuffle_epi8(v_a, sm16); _mm_storeu_si128((__m128i *)&C[count], p); // can overflow count += _mm_popcnt_u32(bitmask_belongs_to_difference); i_a += vectorlength; } // at this point we should have i_a == st_a and i_b == st_b } // do the tail using scalar code while (i_a < s_a && i_b < s_b) { uint16_t a = A[i_a]; uint16_t b = B[i_b]; if (b < a) { i_b++; } else if (a < b) { C[count] = a; count++; i_a++; } else { //== i_a++; i_b++; } } if (i_a < s_a) { if (C == A) { assert((size_t)count <= i_a); if ((size_t)count < i_a) { memmove(C + count, A + i_a, sizeof(uint16_t) * (s_a - i_a)); } } else { for (size_t i = 0; i < (s_a - i_a); i++) { C[count + i] = A[i + i_a]; } } count += (int32_t)(s_a - i_a); } return count; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x48, %rsp testq %rsi, %rsi je 0xc2ce movq %rsi, %rbx movq %rdi, %r15 testq %rcx, %rcx je 0xc2d5 movq %rdx, %r13 movq %rcx, %r12 cmpw $0x0, (%r15) movzwl (%rdx), %eax je 0xc2f0 xorl %r14d, %r14d testw %ax, %ax jne 0xc318 addq $0x2, %r13 decq %r12 jmp 0xc318 xorl %ebp, %ebp jmp 0xc49c cmpq %r8, %r15 je 0xc2e9 leaq (%rbx,%rbx), %rdx movq %r8, %rdi movq %r15, %rsi callq 0x10a0 movl %ebx, %ebp jmp 0xc49c testw %ax, %ax je 0xc307 andw $0x0, (%r8) addq $0x2, %r15 decq %rbx pushq $0x1 popq %r14 jmp 0xc318 addq $0x2, %r15 decq %rbx addq $0x2, %r13 decq %r12 xorl %r14d, %r14d movq %rbx, %rax andq $-0x8, %rax sete %cl movq %r12, 0x8(%rsp) andq $-0x8, %r12 sete %dl orb %cl, %dl movq %r8, (%rsp) je 0xc340 xorl %r12d, %r12d xorl %eax, %eax jmp 0xc3d3 vlddqu (%r15), %xmm2 vpxor %xmm3, %xmm3, %xmm3 xorl %ecx, %ecx leaq 0x3cae(%rip), %r10 # 0x10000 movq %r13, %rdx xorl %ebp, %ebp vlddqu (%rdx), %xmm1 movzwl 0xe(%r13,%rcx,2), %edx vpcmpistrm $0x1, %xmm2, %xmm1 vpor %xmm0, %xmm3, %xmm3 movzwl 0xe(%r15,%rbp,2), %esi cmpw %dx, %si ja 0xc3b1 addq $0x8, %rbp vmovd %xmm3, %edi xorl $0xff, %edi movslq %edi, %rdi popcntl %edi, %r9d shlq $0x4, %rdi vpshufb (%rdi,%r10), %xmm2, %xmm0 movslq %r14d, %r14 vmovdqu %xmm0, (%r8,%r14,2) addl %r9d, %r14d cmpq %rax, %rbp je 0xc3d0 vlddqu (%r15,%rbp,2), %xmm2 vpxor %xmm3, %xmm3, %xmm3 cmpw %si, %dx ja 0xc361 addq $0x8, %rcx cmpq %r12, %rcx je 0xc4ad leaq (,%rcx,2), %rdx addq %r13, %rdx jmp 0xc357 movq %rcx, %r12 leaq (%rbx,%rbx), %rdx movq %rdx, 0x10(%rsp) subq %rax, %rdx subq %rax, %rdx leaq (%r15,%rax,2), %rcx movslq %r14d, %rdi leaq (%r8,%rdi,2), %rsi xorl %ebp, %ebp movq 0x8(%rsp), %r8 leaq (%rax,%rbp), %r9 cmpq %rbx, %r9 jae 0xc43e cmpq %r8, %r12 jae 0xc43e movzwl (%rcx,%rbp,2), %r10d movzwl (%r13,%r12,2), %r11d cmpw %r10w, %r11w jb 0xc42a jbe 0xc423 movw %r10w, (%rsi,%rbp,2) incq %rbp addq $-0x2, %rdx jmp 0xc3f4 leaq (%rax,%rbp), %r9 incq %r9 incq %r12 addl %ebp, %r14d movq %r9, %rax movq (%rsp), %r8 movq 0x10(%rsp), %rdx jmp 0xc3dc cmpq %rbx, %r9 jae 0xc467 cmpq (%rsp), %r15 je 0xc471 subq %rax, %rbx movq %rbx, %rax subq %rbp, %rax movq %rbp, %rdx cmpq %rdx, %rbx je 0xc497 movzwl (%rcx,%rdx,2), %edi movw %di, (%rsi,%rdx,2) incq %rdx jmp 0xc455 movl %r14d, %eax addq %rbp, %rax movl %eax, %ebp jmp 0xc49c addq %rbp, %rdi cmpq %rdi, %r9 jbe 0xc48e leaq (%rsi,%rbp,2), %rdi leaq (%rcx,%rbp,2), %rsi subq %rax, %rbx subq %rbp, %rbx callq 0x10a0 jmp 0xc494 subq %rax, %rbx subq %rbp, %rbx movq %rbx, %rax addl %r14d, %eax addl %eax, %ebp movl %ebp, %eax addq $0x48, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq cmpq %rax, %rbp jae 0xc539 vpxor %xmm0, %xmm0, %xmm0 leaq 0x30(%rsp), %rdi vmovdqa %xmm0, (%rdi) leaq (,%r12,2), %rsi addq %r13, %rsi movq 0x8(%rsp), %rax leal (%rax,%rax), %edx andl $0xe, %edx vmovaps %xmm2, 0x20(%rsp) vmovdqa %xmm3, 0x10(%rsp) callq 0x10a0 movq (%rsp), %r8 leaq 0x30(%rsp), %rax vlddqu (%rax), %xmm0 vmovdqa 0x20(%rsp), %xmm1 vpcmpistrm $0x1, %xmm1, %xmm0 vpor 0x10(%rsp), %xmm0, %xmm0 vmovd %xmm0, %eax xorl $0xff, %eax cltq popcntl %eax, %ecx shlq $0x4, %rax leaq 0x3add(%rip), %rdx # 0x10000 vpshufb (%rax,%rdx), %xmm1, %xmm0 movslq %r14d, %r14 vmovdqu %xmm0, (%r8,%r14,2) addl %ecx, %r14d addq $0x8, %rbp movq %rbp, %rax jmp 0xc3d3
/RoaringBitmap[P]CRoaring/src/array_util.c
intersect_skewed_uint16_cardinality
int32_t intersect_skewed_uint16_cardinality(const uint16_t *small, size_t size_s, const uint16_t *large, size_t size_l) { size_t pos = 0, idx_l = 0, idx_s = 0; if (0 == size_s) { return 0; } uint16_t val_l = large[idx_l], val_s = small[idx_s]; while (true) { if (val_l < val_s) { idx_l = advanceUntil(large, (int32_t)idx_l, (int32_t)size_l, val_s); if (idx_l == size_l) break; val_l = large[idx_l]; } else if (val_s < val_l) { idx_s++; if (idx_s == size_s) break; val_s = small[idx_s]; } else { pos++; idx_s++; if (idx_s == size_s) break; val_s = small[idx_s]; idx_l = advanceUntil(large, (int32_t)idx_l, (int32_t)size_l, val_s); if (idx_l == size_l) break; val_l = large[idx_l]; } } return (int32_t)pos; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %rcx, 0x10(%rsp) testq %rsi, %rsi je 0xc8dd movq %rdx, %rbx movq %rsi, %r12 movq %rdi, %r13 movzwl (%rdi), %r14d xorl %eax, %eax xorl %esi, %esi xorl %ebp, %ebp movq %rdx, %rcx movzwl (%rcx), %ecx incq %rbp cmpw %r14w, %cx jb 0xc87e jbe 0xc8a4 cmpq %rbp, %r12 je 0xc8df movzwl (%r13,%rbp,2), %r14d jmp 0xc866 movq %rax, 0x8(%rsp) movzwl %r14w, %ecx movq %rbx, %rdi movq 0x10(%rsp), %r15 movl %r15d, %edx callq 0xc8f5 movslq %eax, %rsi cmpq %r15, %rsi je 0xc8ee decq %rbp jmp 0xc8d2 incl %eax cmpq %rbp, %r12 je 0xc8df movq %rax, 0x8(%rsp) movzwl (%r13,%rbp,2), %r14d movzwl %r14w, %ecx movq %rbx, %rdi movq 0x10(%rsp), %r15 movl %r15d, %edx callq 0xc8f5 movslq %eax, %rsi cmpq %r15, %rsi je 0xc8ee leaq (%rbx,%rsi,2), %rcx movq 0x8(%rsp), %rax jmp 0xc863 xorl %eax, %eax addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq 0x8(%rsp), %rax jmp 0xc8df
/RoaringBitmap[P]CRoaring/src/array_util.c
advanceUntil
static inline int32_t advanceUntil(const uint16_t *array, int32_t pos, int32_t length, uint16_t min) { int32_t lower = pos + 1; if ((lower >= length) || (array[lower] >= min)) { return lower; } int32_t spansize = 1; while ((lower + spansize < length) && (array[lower + spansize] < min)) { spansize <<= 1; } int32_t upper = (lower + spansize < length) ? lower + spansize : length - 1; if (array[upper] == min) { return upper; } if (array[upper] < min) { // means // array // has no // item // >= min // pos = array.length; return length; } // we know that the next-smallest span was too small lower += (spansize >> 1); int32_t mid = 0; while (lower + 1 != upper) { mid = (lower + upper) >> 1; if (array[mid] == min) { return mid; } else if (array[mid] < min) { lower = mid; } else { upper = mid; } } return upper; }
incl %esi cmpl %edx, %esi jge 0xc924 movl %edx, %eax movslq %esi, %rdx cmpw %cx, (%rdi,%rdx,2) jae 0xc924 pushq $0x1 popq %rdx leal (%rdx,%rsi), %r8d cmpl %eax, %r8d jge 0xc927 movslq %r8d, %r9 movzwl (%rdi,%r9,2), %r9d cmpw %cx, %r9w jae 0xc934 addl %edx, %edx jmp 0xc909 movl %esi, %eax retq leal -0x1(%rax), %r8d movslq %eax, %r9 movzwl -0x2(%rdi,%r9,2), %r9d cmpw %cx, %r9w jne 0xc93e movl %r8d, %eax retq jb 0xc926 sarl %edx addl %esi, %edx leal 0x1(%rdx), %eax cmpl %r8d, %eax je 0xc93a leal (%r8,%rdx), %eax sarl %eax movslq %eax, %rsi cmpw %cx, (%rdi,%rsi,2) je 0xc926 jae 0xc962 movl %eax, %edx movl %r8d, %eax movl %eax, %r8d jmp 0xc944
/RoaringBitmap[P]CRoaring/include/roaring/array_util.h
intersect_uint16_cardinality
int32_t intersect_uint16_cardinality(const uint16_t *A, const size_t lenA, const uint16_t *B, const size_t lenB) { int32_t answer = 0; if (lenA == 0 || lenB == 0) return 0; const uint16_t *endA = A + lenA; const uint16_t *endB = B + lenB; while (1) { while (*A < *B) { SKIP_FIRST_COMPARE: if (++A == endA) return answer; } while (*A > *B) { if (++B == endB) return answer; } if (*A == *B) { ++answer; if (++A == endA || ++B == endB) return answer; } else { goto SKIP_FIRST_COMPARE; } } // return answer; // NOTREACHED }
testq %rsi, %rsi sete %r8b testq %rcx, %rcx sete %r9b xorl %eax, %eax orb %r8b, %r9b je 0xca61 retq leaq (%rdi,%rsi,2), %rsi leaq (%rdx,%rcx,2), %rcx xorl %eax, %eax movzwl (%rdx), %r8d movzwl (%rdi), %r9d cmpw %r8w, %r9w jb 0xca90 movzwl (%rdx), %r8d cmpw %r8w, %r9w jbe 0xca8e addq $0x2, %rdx cmpq %rcx, %rdx jne 0xca79 jmp 0xca60 je 0xca9b addq $0x2, %rdi cmpq %rsi, %rdi jne 0xca6f jmp 0xca60 incl %eax addq $0x2, %rdi cmpq %rsi, %rdi je 0xca60 addq $0x2, %rdx cmpq %rcx, %rdx jne 0xca6b jmp 0xca60
/RoaringBitmap[P]CRoaring/src/array_util.c
intersect_uint16_nonempty
bool intersect_uint16_nonempty(const uint16_t *A, const size_t lenA, const uint16_t *B, const size_t lenB) { if (lenA == 0 || lenB == 0) return 0; const uint16_t *endA = A + lenA; const uint16_t *endB = B + lenB; while (1) { while (*A < *B) { SKIP_FIRST_COMPARE: if (++A == endA) return false; } while (*A > *B) { if (++B == endB) return false; } if (*A == *B) { return true; } else { goto SKIP_FIRST_COMPARE; } } return false; // NOTREACHED }
testq %rsi, %rsi sete %al testq %rcx, %rcx sete %r8b orb %al, %r8b je 0xcac6 xorl %eax, %eax retq leaq (%rdi,%rsi,2), %rax leaq (%rdx,%rcx,2), %rcx movzwl (%rdx), %esi movzwl (%rdi), %r8d cmpw %si, %r8w jb 0xcaf1 movzwl (%rdx), %esi cmpw %si, %r8w jbe 0xcaef addq $0x2, %rdx cmpq %rcx, %rdx jne 0xcadb jmp 0xcac3 je 0xcafc addq $0x2, %rdi cmpq %rax, %rdi jne 0xcad1 jmp 0xcac3 movb $0x1, %al retq
/RoaringBitmap[P]CRoaring/src/array_util.c
intersection_uint32
size_t intersection_uint32(const uint32_t *A, const size_t lenA, const uint32_t *B, const size_t lenB, uint32_t *out) { const uint32_t *initout = out; if (lenA == 0 || lenB == 0) return 0; const uint32_t *endA = A + lenA; const uint32_t *endB = B + lenB; while (1) { while (*A < *B) { SKIP_FIRST_COMPARE: if (++A == endA) return (out - initout); } while (*A > *B) { if (++B == endB) return (out - initout); } if (*A == *B) { *out++ = *A; if (++A == endA || ++B == endB) return (out - initout); } else { goto SKIP_FIRST_COMPARE; } } // return (out - initout); // NOTREACHED }
testq %rsi, %rsi sete %al testq %rcx, %rcx sete %r9b orb %al, %r9b je 0xcb14 xorl %eax, %eax retq leaq (%rdi,%rsi,4), %rsi leaq (%rdx,%rcx,4), %rcx movq %r8, %rax movl (%rdx), %r9d movl (%rdi), %r10d cmpl %r9d, %r10d jb 0xcb3f movl (%rdx), %r9d cmpl %r9d, %r10d jbe 0xcb3d addq $0x4, %rdx cmpq %rcx, %rdx jne 0xcb2a jmp 0xcb63 je 0xcb4a addq $0x4, %rdi cmpq %rsi, %rdi jne 0xcb22 jmp 0xcb63 movl %r10d, (%rax) addq $0x4, %rax addq $0x4, %rdi cmpq %rsi, %rdi je 0xcb63 addq $0x4, %rdx cmpq %rcx, %rdx jne 0xcb1f subq %r8, %rax sarq $0x2, %rax retq
/RoaringBitmap[P]CRoaring/src/array_util.c
intersection_uint32_card
size_t intersection_uint32_card(const uint32_t *A, const size_t lenA, const uint32_t *B, const size_t lenB) { if (lenA == 0 || lenB == 0) return 0; size_t card = 0; const uint32_t *endA = A + lenA; const uint32_t *endB = B + lenB; while (1) { while (*A < *B) { SKIP_FIRST_COMPARE: if (++A == endA) return card; } while (*A > *B) { if (++B == endB) return card; } if (*A == *B) { card++; if (++A == endA || ++B == endB) return card; } else { goto SKIP_FIRST_COMPARE; } } // return card; // NOTREACHED }
testq %rsi, %rsi sete %al testq %rcx, %rcx sete %r8b orb %al, %r8b je 0xcb80 xorl %eax, %eax retq leaq (%rdi,%rsi,4), %rsi leaq (%rdx,%rcx,4), %rcx xorl %eax, %eax movl (%rdx), %r8d movl (%rdi), %r9d cmpl %r8d, %r9d jb 0xcbaa movl (%rdx), %r8d cmpl %r8d, %r9d jbe 0xcba8 addq $0x4, %rdx cmpq %rcx, %rdx jne 0xcb95 jmp 0xcb7f je 0xcbb5 addq $0x4, %rdi cmpq %rsi, %rdi jne 0xcb8d jmp 0xcb7f incq %rax addq $0x4, %rdi cmpq %rsi, %rdi je 0xcb7f addq $0x4, %rdx cmpq %rcx, %rdx jne 0xcb8a jmp 0xcb7f
/RoaringBitmap[P]CRoaring/src/array_util.c
difference_uint16
int difference_uint16(const uint16_t *a1, int length1, const uint16_t *a2, int length2, uint16_t *a_out) { int out_card = 0; int k1 = 0, k2 = 0; if (length1 == 0) return 0; if (length2 == 0) { if (a1 != a_out) memcpy(a_out, a1, sizeof(uint16_t) * length1); return length1; } uint16_t s1 = a1[k1]; uint16_t s2 = a2[k2]; while (true) { if (s1 < s2) { a_out[out_card++] = s1; ++k1; if (k1 >= length1) { break; } s1 = a1[k1]; } else if (s1 == s2) { ++k1; ++k2; if (k1 >= length1) { break; } if (k2 >= length2) { memmove(a_out + out_card, a1 + k1, sizeof(uint16_t) * (length1 - k1)); return out_card + length1 - k1; } s1 = a1[k1]; s2 = a2[k2]; } else { // if (val1>val2) ++k2; if (k2 >= length2) { memmove(a_out + out_card, a1 + k1, sizeof(uint16_t) * (length1 - k1)); return out_card + length1 - k1; } s2 = a2[k2]; } } return out_card; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movl %esi, %ebx testl %esi, %esi je 0xcd45 movq %rdi, %rsi testl %ecx, %ecx je 0xcd6d movzwl (%rdx), %edi xorl %ebp, %ebp xorl %r14d, %r14d xorl %eax, %eax movq %rsi, %r10 movzwl (%r10), %r10d leal 0x1(%rax), %r9d movslq %r9d, %r9 leaq (%rdx,%r9,2), %r9 movl %ebp, %r11d cmpw %di, %r10w jb 0xcd0b je 0xcd23 incl %eax cmpl %ecx, %eax jge 0xcd49 movzwl (%r9), %edi addq $0x2, %r9 jmp 0xccf0 incl %ebp movw %r10w, (%r8,%r11,2) incl %r14d cmpl %ebx, %r14d jge 0xcd69 movslq %r14d, %r9 leaq (%rsi,%r9,2), %r10 jmp 0xcce1 leal 0x1(%r14), %r15d movl %ebx, %r10d subl %r15d, %r10d jle 0xcd69 incl %eax movslq %r15d, %r12 cmpl %ecx, %eax jge 0xcd8b leaq (%rsi,%r12,2), %r10 movzwl (%r9), %edi movl %r15d, %r14d jmp 0xcce1 xorl %ebx, %ebx jmp 0xcd80 leaq (%r8,%r11,2), %rdi movslq %r14d, %r14 leaq (%rsi,%r14,2), %rsi movl %ebx, %eax subl %r14d, %eax movslq %eax, %rdx addq %rdx, %rdx callq 0x10e0 addl %ebx, %ebp subl %r14d, %ebp movl %ebp, %ebx jmp 0xcd80 cmpq %r8, %rsi je 0xcd80 movslq %ebx, %rdx addq %rdx, %rdx movq %r8, %rdi callq 0x10a0 movl %ebx, %eax popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq leaq (%r8,%r11,2), %rdi leaq (%rsi,%r12,2), %rsi movslq %r10d, %rdx addq %rdx, %rdx callq 0x10e0 addl %ebx, %ebp notl %r14d addl %ebp, %r14d movl %r14d, %ebx jmp 0xcd80
/RoaringBitmap[P]CRoaring/src/array_util.c
xor_uint16
int32_t xor_uint16(const uint16_t *array_1, int32_t card_1, const uint16_t *array_2, int32_t card_2, uint16_t *out) { int32_t pos1 = 0, pos2 = 0, pos_out = 0; while (pos1 < card_1 && pos2 < card_2) { const uint16_t v1 = array_1[pos1]; const uint16_t v2 = array_2[pos2]; if (v1 == v2) { ++pos1; ++pos2; continue; } if (v1 < v2) { out[pos_out++] = v1; ++pos1; } else { out[pos_out++] = v2; ++pos2; } } if (pos1 < card_1) { const size_t n_elems = card_1 - pos1; memcpy(out + pos_out, array_1 + pos1, n_elems * sizeof(uint16_t)); pos_out += (int32_t)n_elems; } else if (pos2 < card_2) { const size_t n_elems = card_2 - pos2; memcpy(out + pos_out, array_2 + pos2, n_elems * sizeof(uint16_t)); pos_out += (int32_t)n_elems; } return pos_out; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx xorl %ebx, %ebx xorl %r11d, %r11d xorl %r9d, %r9d movl %r11d, %eax movslq %r11d, %r10 leaq (%rdx,%r10,2), %r14 movl %r9d, %r10d movslq %r9d, %r9 leaq (%rdi,%r9,2), %r15 xorl %r9d, %r9d leal (%rax,%r9), %ebp leal (%r10,%r9), %r12d movl %ebx, %r11d cmpl %esi, %r12d jge 0xce26 cmpl %ecx, %ebp jge 0xce26 movzwl (%r15,%r9,2), %ebp movzwl (%r14,%r9,2), %r12d cmpw %r12w, %bp jne 0xcdfb incq %r9 jmp 0xcdd2 jae 0xce10 addq %r9, %rax movw %bp, (%r8,%r11,2) addl %r10d, %r9d incl %r9d movl %eax, %r11d jmp 0xce22 addq %r9, %r10 movw %r12w, (%r8,%r11,2) leal (%rax,%r9), %r11d incl %r11d movl %r10d, %r9d incl %ebx jmp 0xcdbb cmpl %esi, %r12d jge 0xce37 leaq (%r10,%r9), %r14 subl %r10d, %esi subl %r9d, %esi jmp 0xce49 cmpl %ecx, %ebp jge 0xce69 leaq (%rax,%r9), %r14 subl %eax, %ecx subl %r9d, %ecx movl %ecx, %esi movq %rdx, %rdi movslq %esi, %r15 leaq (%r8,%r11,2), %rax movslq %r14d, %rcx leaq (%rdi,%rcx,2), %rsi leaq (%r15,%r15), %rdx movq %rax, %rdi callq 0x10a0 addl %ebx, %r15d movl %r15d, %ebx movl %ebx, %eax popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/array_util.c
union_vector16
CROARING_TARGET_AVX2 // a one-pass SSE union algorithm // This function may not be safe if array1 == output or array2 == output. uint32_t union_vector16(const uint16_t *__restrict__ array1, uint32_t length1, const uint16_t *__restrict__ array2, uint32_t length2, uint16_t *__restrict__ output) { if ((length1 < 8) || (length2 < 8)) { return (uint32_t)union_uint16(array1, length1, array2, length2, output); } __m128i vA, vB, V, vecMin, vecMax; __m128i laststore; uint16_t *initoutput = output; uint32_t len1 = length1 / 8; uint32_t len2 = length2 / 8; uint32_t pos1 = 0; uint32_t pos2 = 0; // we start the machine vA = _mm_lddqu_si128((const __m128i *)array1 + pos1); pos1++; vB = _mm_lddqu_si128((const __m128i *)array2 + pos2); pos2++; sse_merge(&vA, &vB, &vecMin, &vecMax); laststore = _mm_set1_epi16(-1); output += store_unique(laststore, vecMin, output); laststore = vecMin; if ((pos1 < len1) && (pos2 < len2)) { uint16_t curA, curB; curA = array1[8 * pos1]; curB = array2[8 * pos2]; while (true) { if (curA <= curB) { V = _mm_lddqu_si128((const __m128i *)array1 + pos1); pos1++; if (pos1 < len1) { curA = array1[8 * pos1]; } else { break; } } else { V = _mm_lddqu_si128((const __m128i *)array2 + pos2); pos2++; if (pos2 < len2) { curB = array2[8 * pos2]; } else { break; } } sse_merge(&V, &vecMax, &vecMin, &vecMax); output += store_unique(laststore, vecMin, output); laststore = vecMin; } sse_merge(&V, &vecMax, &vecMin, &vecMax); output += store_unique(laststore, vecMin, output); laststore = vecMin; } // we finish the rest off using a scalar algorithm // could be improved? // // copy the small end on a tmp buffer uint32_t len = (uint32_t)(output - initoutput); uint16_t buffer[16]; uint32_t leftoversize = store_unique(laststore, vecMax, buffer); if (pos1 == len1) { memcpy(buffer + leftoversize, array1 + 8 * pos1, (length1 - 8 * len1) * sizeof(uint16_t)); leftoversize += length1 - 8 * len1; qsort(buffer, leftoversize, sizeof(uint16_t), uint16_compare); leftoversize = unique(buffer, leftoversize); len += (uint32_t)union_uint16(buffer, leftoversize, array2 + 8 * pos2, length2 - 8 * pos2, output); } else { memcpy(buffer + leftoversize, array2 + 8 * pos2, (length2 - 8 * len2) * sizeof(uint16_t)); leftoversize += length2 - 8 * len2; qsort(buffer, leftoversize, sizeof(uint16_t), uint16_compare); leftoversize = unique(buffer, leftoversize); len += (uint32_t)union_uint16(buffer, leftoversize, array1 + 8 * pos1, length1 - 8 * pos1, output); } return len; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0xe8, %rsp movq %r8, %r15 movl %ecx, %r12d movq %rdx, %r14 movl %esi, %r13d movq %rdi, %rbp cmpl $0x8, %esi setae %al cmpl $0x8, %ecx setae %cl testb %cl, %al jne 0xcebd movl %r13d, %esi movl %r12d, %ecx movq %rbp, %rdi movq %r14, %rdx movq %r15, %r8 callq 0xcbcc jmp 0xd17e movl %r13d, %eax shrl $0x3, %eax movl %eax, 0xc(%rsp) vlddqu (%rbp), %xmm0 leaq 0xd0(%rsp), %rdi vmovapd %xmm0, (%rdi) vlddqu (%r14), %xmm0 leaq 0xc0(%rsp), %rsi vmovapd %xmm0, (%rsi) leaq 0x60(%rsp), %rbx leaq 0x50(%rsp), %rcx movq %rbx, %rdx callq 0xd190 vmovaps (%rbx), %xmm1 vpcmpeqd %xmm0, %xmm0, %xmm0 vmovaps %xmm1, 0x10(%rsp) movq %r15, %rdi callq 0xd275 cltq movq %r15, 0x98(%rsp) leaq (%r15,%rax,2), %r15 cmpl $0x10, %r13d setb %al cmpl $0x10, %r12d setb %cl orb %al, %cl movq %r14, 0x30(%rsp) movq %rbp, 0x28(%rsp) jne 0xd019 movq %r13, 0x88(%rsp) movq %r12, 0x90(%rsp) movl %r12d, %eax shrl $0x3, %eax movl %eax, 0x3c(%rsp) movzwl 0x10(%rbp), %r13d movzwl 0x10(%r14), %r12d pushq $0x1 popq %rbx leaq 0x50(%rsp), %r14 movl %ebx, %ebp vmovaps 0x10(%rsp), %xmm0 vmovaps %xmm0, 0x10(%rsp) cmpw %r12w, %r13w jbe 0xcfaf movl %ebp, %eax shlq $0x4, %rax movq 0x30(%rsp), %rcx vlddqu (%rcx,%rax), %xmm0 vmovapd %xmm0, 0x70(%rsp) incl %ebp cmpl 0x3c(%rsp), %ebp jae 0xd027 leal (,%rbp,8), %eax movzwl (%rcx,%rax,2), %r12d jmp 0xcfd9 movl %ebx, %eax shlq $0x4, %rax movq 0x28(%rsp), %rcx vlddqu (%rcx,%rax), %xmm0 vmovapd %xmm0, 0x70(%rsp) incl %ebx cmpl 0xc(%rsp), %ebx jae 0xd027 leal (,%rbx,8), %eax movzwl (%rcx,%rax,2), %r13d leaq 0x70(%rsp), %rdi movq %r14, %rsi leaq 0x60(%rsp), %rdx movq %r14, %rcx callq 0xd190 vmovaps 0x60(%rsp), %xmm1 vmovaps %xmm1, 0x40(%rsp) vmovaps 0x10(%rsp), %xmm0 movq %r15, %rdi callq 0xd275 cltq leaq (%r15,%rax,2), %r15 vmovaps 0x40(%rsp), %xmm0 jmp 0xcf73 pushq $0x8 popq %rbp pushq $0x1 popq %rbx vmovaps 0x10(%rsp), %xmm0 jmp 0xd079 leaq 0x70(%rsp), %rdi leaq 0x60(%rsp), %r14 leaq 0x50(%rsp), %rcx movq %rcx, %rsi movq %r14, %rdx callq 0xd190 vmovaps (%r14), %xmm1 vmovaps %xmm1, 0x40(%rsp) vmovaps 0x10(%rsp), %xmm0 movq %r15, %rdi callq 0xd275 cltq leaq (%r15,%rax,2), %r15 shll $0x3, %ebp vmovaps 0x40(%rsp), %xmm0 movq 0x90(%rsp), %r12 movq 0x88(%rsp), %r13 movq %r15, %rax subq 0x98(%rsp), %rax shrq %rax movq %rax, 0x10(%rsp) vmovaps 0x50(%rsp), %xmm1 leaq 0xa0(%rsp), %rdi callq 0xd275 movl %eax, %r14d movl %eax, %eax leaq (%rsp,%rax,2), %rdi addq $0xa0, %rdi cmpl 0xc(%rsp), %ebx jne 0xd118 movl %r13d, %eax andl $-0x8, %eax movq 0x28(%rsp), %rcx leaq (%rcx,%rax,2), %rsi andl $0x7, %r13d leal (,%r13,2), %edx callq 0x10a0 addl %r13d, %r14d leaq 0x1ce(%rip), %rcx # 0xd2ad leaq 0xa0(%rsp), %rbx pushq $0x2 popq %rdx movq %rbx, %rdi movq %r14, %rsi callq 0x1060 movq %rbx, %rdi movl %r14d, %esi callq 0xd2b6 movl %eax, %esi movl %ebp, %eax movq 0x30(%rsp), %rcx leaq (%rcx,%rax,2), %rdx subl %ebp, %r12d movq %rbx, %rdi movq %r12, %rcx jmp 0xd172 movl %ebp, %eax movq 0x30(%rsp), %rcx leaq (%rcx,%rax,2), %rsi andl $0x7, %r12d leal (%r12,%r12), %edx callq 0x10a0 addl %r12d, %r14d leaq 0x173(%rip), %rcx # 0xd2ad leaq 0xa0(%rsp), %r12 pushq $0x2 popq %rdx movq %r12, %rdi movq %r14, %rsi callq 0x1060 movq %r12, %rdi movl %r14d, %esi callq 0xd2b6 movl %eax, %esi shll $0x3, %ebx movq 0x28(%rsp), %rax leaq (%rax,%rbx,2), %rdx subl %ebx, %r13d movq %r12, %rdi movq %r13, %rcx movq %r15, %r8 callq 0xcbcc addl 0x10(%rsp), %eax addq $0xe8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/array_util.c
store_unique
CROARING_TARGET_AVX2 // write vector new, while omitting repeated values assuming that previously // written vector was "old" static inline int store_unique(__m128i old, __m128i newval, uint16_t *output) { __m128i vecTmp = _mm_alignr_epi8(newval, old, 16 - 2); // lots of high latency instructions follow (optimize?) int M = _mm_movemask_epi8( _mm_packs_epi16(_mm_cmpeq_epi16(vecTmp, newval), _mm_setzero_si128())); int numberofnewvalues = 8 - _mm_popcnt_u32(M); __m128i key = _mm_lddqu_si128((const __m128i *)uniqshuf + M); __m128i val = _mm_shuffle_epi8(newval, key); _mm_storeu_si128((__m128i *)output, val); return numberofnewvalues; }
vpalignr $0xe, %xmm0, %xmm1, %xmm0 # xmm0 = xmm0[14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13] vpcmpeqw %xmm1, %xmm0, %xmm0 vpxor %xmm2, %xmm2, %xmm2 vpacksswb %xmm2, %xmm0, %xmm0 vpmovmskb %xmm0, %ecx popcntl %ecx, %edx pushq $0x8 popq %rax subl %edx, %eax shll $0x4, %ecx leaq 0xbe42(%rip), %rdx # 0x190e0 vlddqu (%rcx,%rdx), %xmm0 vpshufb %xmm0, %xmm1, %xmm0 vmovdqu %xmm0, (%rdi) retq
/RoaringBitmap[P]CRoaring/src/array_util.c
xor_vector16
CROARING_TARGET_AVX2 // a one-pass SSE xor algorithm uint32_t xor_vector16(const uint16_t *__restrict__ array1, uint32_t length1, const uint16_t *__restrict__ array2, uint32_t length2, uint16_t *__restrict__ output) { if ((length1 < 8) || (length2 < 8)) { return xor_uint16(array1, length1, array2, length2, output); } __m128i vA, vB, V, vecMin, vecMax; __m128i laststore; uint16_t *initoutput = output; uint32_t len1 = length1 / 8; uint32_t len2 = length2 / 8; uint32_t pos1 = 0; uint32_t pos2 = 0; // we start the machine vA = _mm_lddqu_si128((const __m128i *)array1 + pos1); pos1++; vB = _mm_lddqu_si128((const __m128i *)array2 + pos2); pos2++; sse_merge(&vA, &vB, &vecMin, &vecMax); laststore = _mm_set1_epi16(-1); uint16_t buffer[17]; output += store_unique_xor(laststore, vecMin, output); laststore = vecMin; if ((pos1 < len1) && (pos2 < len2)) { uint16_t curA, curB; curA = array1[8 * pos1]; curB = array2[8 * pos2]; while (true) { if (curA <= curB) { V = _mm_lddqu_si128((const __m128i *)array1 + pos1); pos1++; if (pos1 < len1) { curA = array1[8 * pos1]; } else { break; } } else { V = _mm_lddqu_si128((const __m128i *)array2 + pos2); pos2++; if (pos2 < len2) { curB = array2[8 * pos2]; } else { break; } } sse_merge(&V, &vecMax, &vecMin, &vecMax); // conditionally stores the last value of laststore as well as all // but the // last value of vecMin output += store_unique_xor(laststore, vecMin, output); laststore = vecMin; } sse_merge(&V, &vecMax, &vecMin, &vecMax); // conditionally stores the last value of laststore as well as all but // the // last value of vecMin output += store_unique_xor(laststore, vecMin, output); laststore = vecMin; } uint32_t len = (uint32_t)(output - initoutput); // we finish the rest off using a scalar algorithm // could be improved? // conditionally stores the last value of laststore as well as all but the // last value of vecMax, // we store to "buffer" int leftoversize = store_unique_xor(laststore, vecMax, buffer); uint16_t vec7 = (uint16_t)_mm_extract_epi16(vecMax, 7); uint16_t vec6 = (uint16_t)_mm_extract_epi16(vecMax, 6); if (vec7 != vec6) buffer[leftoversize++] = vec7; if (pos1 == len1) { memcpy(buffer + leftoversize, array1 + 8 * pos1, (length1 - 8 * len1) * sizeof(uint16_t)); leftoversize += length1 - 8 * len1; if (leftoversize == 0) { // trivial case memcpy(output, array2 + 8 * pos2, (length2 - 8 * pos2) * sizeof(uint16_t)); len += (length2 - 8 * pos2); } else { qsort(buffer, leftoversize, sizeof(uint16_t), uint16_compare); leftoversize = unique_xor(buffer, leftoversize); len += xor_uint16(buffer, leftoversize, array2 + 8 * pos2, length2 - 8 * pos2, output); } } else { memcpy(buffer + leftoversize, array2 + 8 * pos2, (length2 - 8 * len2) * sizeof(uint16_t)); leftoversize += length2 - 8 * len2; if (leftoversize == 0) { // trivial case memcpy(output, array1 + 8 * pos1, (length1 - 8 * pos1) * sizeof(uint16_t)); len += (length1 - 8 * pos1); } else { qsort(buffer, leftoversize, sizeof(uint16_t), uint16_compare); leftoversize = unique_xor(buffer, leftoversize); len += xor_uint16(buffer, leftoversize, array1 + 8 * pos1, length1 - 8 * pos1, output); } } return len; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0xe8, %rsp movq %r8, %r15 movl %ecx, %r13d movq %rdx, %r12 movl %esi, %r14d movq %rdi, %rbp cmpl $0x8, %esi setae %al cmpl $0x8, %ecx setae %cl testb %cl, %al jne 0xd333 movq %rbp, %rdi movl %r14d, %esi movq %r12, %rdx movl %r13d, %ecx movq %r15, %r8 addq $0xe8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0xcdab movl %r14d, %eax shrl $0x3, %eax movl %eax, 0xc(%rsp) vlddqu (%rbp), %xmm0 leaq 0xd0(%rsp), %rdi vmovapd %xmm0, (%rdi) vlddqu (%r12), %xmm0 leaq 0xc0(%rsp), %rsi vmovapd %xmm0, (%rsi) leaq 0x60(%rsp), %rbx leaq 0x50(%rsp), %rcx movq %rbx, %rdx callq 0xd190 vmovaps (%rbx), %xmm1 vpcmpeqd %xmm0, %xmm0, %xmm0 vmovaps %xmm1, 0x20(%rsp) movq %r15, %rdi callq 0xd688 cltq movq %r15, 0x88(%rsp) leaq (%r15,%rax,2), %r15 cmpl $0x10, %r14d setb %al cmpl $0x10, %r13d setb %cl orb %al, %cl movq %r12, 0x18(%rsp) movq %rbp, 0x10(%rsp) movq %r14, 0x30(%rsp) jne 0xd493 movq %r13, 0x80(%rsp) movl %r13d, %eax shrl $0x3, %eax movl %eax, 0x3c(%rsp) movzwl 0x10(%rbp), %r14d movzwl 0x10(%r12), %r13d pushq $0x1 popq %rbp leaq 0x50(%rsp), %rbx movl %ebp, %r12d vmovaps 0x20(%rsp), %xmm0 vmovaps %xmm0, 0x20(%rsp) cmpw %r13w, %r14w jbe 0xd429 movl %r12d, %eax shlq $0x4, %rax movq 0x18(%rsp), %rcx vlddqu (%rcx,%rax), %xmm0 vmovapd %xmm0, 0x70(%rsp) incl %r12d cmpl 0x3c(%rsp), %r12d jae 0xd4a2 leal (,%r12,8), %eax movzwl (%rcx,%rax,2), %r13d jmp 0xd453 movl %ebp, %eax shlq $0x4, %rax movq 0x10(%rsp), %rcx vlddqu (%rcx,%rax), %xmm0 vmovapd %xmm0, 0x70(%rsp) incl %ebp cmpl 0xc(%rsp), %ebp jae 0xd4a2 leal (,%rbp,8), %eax movzwl (%rcx,%rax,2), %r14d leaq 0x70(%rsp), %rdi movq %rbx, %rsi leaq 0x60(%rsp), %rdx movq %rbx, %rcx callq 0xd190 vmovaps 0x60(%rsp), %xmm1 vmovaps %xmm1, 0x40(%rsp) vmovaps 0x20(%rsp), %xmm0 movq %r15, %rdi callq 0xd688 cltq leaq (%r15,%rax,2), %r15 vmovaps 0x40(%rsp), %xmm0 jmp 0xd3e9 pushq $0x8 popq %r12 pushq $0x1 popq %rbp vmovaps 0x20(%rsp), %xmm0 jmp 0xd4ec leaq 0x70(%rsp), %rdi leaq 0x60(%rsp), %rbx leaq 0x50(%rsp), %rcx movq %rcx, %rsi movq %rbx, %rdx callq 0xd190 vmovaps (%rbx), %xmm1 vmovaps %xmm1, 0x40(%rsp) vmovaps 0x20(%rsp), %xmm0 movq %r15, %rdi callq 0xd688 cltq leaq (%r15,%rax,2), %r15 shll $0x3, %r12d vmovaps 0x40(%rsp), %xmm0 movq 0x80(%rsp), %r13 movq %r15, %r14 subq 0x88(%rsp), %r14 shrq %r14 vmovaps 0x50(%rsp), %xmm1 vmovaps %xmm1, 0x20(%rsp) leaq 0x90(%rsp), %rdi callq 0xd688 vmovdqa 0x20(%rsp), %xmm0 vpextrw $0x7, %xmm0, %ecx movl %eax, %ebx vpextrw $0x6, %xmm0, %eax cmpw %ax, %cx je 0xd537 movslq %ebx, %rax incl %ebx movw %cx, 0x90(%rsp,%rax,2) movslq %ebx, %rax leaq (%rsp,%rax,2), %rdi addq $0x90, %rdi cmpl 0xc(%rsp), %ebp jne 0xd5ba movq 0x30(%rsp), %rbp movl %ebp, %eax andl $-0x8, %eax movq 0x10(%rsp), %rcx leaq (%rcx,%rax,2), %rsi andl $0x7, %ebp leal (,%rbp,2), %edx callq 0x10a0 addl %ebp, %ebx je 0xd62b movslq %ebx, %rbx leaq -0x2d2(%rip), %rcx # 0xd2ad movq %r13, %rbp leaq 0x90(%rsp), %r13 pushq $0x2 popq %rdx movq %r13, %rdi movq %rbx, %rsi callq 0x1060 movq %r13, %rdi movl %ebx, %esi callq 0xd6ce movl %r12d, %ecx movq 0x18(%rsp), %rdx leaq (%rdx,%rcx,2), %rdx subl %r12d, %ebp movq %r13, %rdi movl %eax, %esi movl %ebp, %ecx jmp 0xd61e movl %r12d, %eax movq 0x18(%rsp), %rcx leaq (%rcx,%rax,2), %rsi andl $0x7, %r13d leal (,%r13,2), %edx callq 0x10a0 addl %r13d, %ebx je 0xd64c movslq %ebx, %rbx leaq -0x339(%rip), %rcx # 0xd2ad leaq 0x90(%rsp), %r12 pushq $0x2 popq %rdx movq %r12, %rdi movq %rbx, %rsi callq 0x1060 movq %r12, %rdi movl %ebx, %esi callq 0xd6ce shll $0x3, %ebp movq 0x10(%rsp), %rcx leaq (%rcx,%rbp,2), %rdx movq 0x30(%rsp), %rcx subl %ebp, %ecx movq %r12, %rdi movl %eax, %esi movq %r15, %r8 callq 0xcdab movl %eax, %r13d jmp 0xd670 movl %r12d, %eax movq 0x18(%rsp), %rcx leaq (%rcx,%rax,2), %rsi subl %r12d, %r13d leaq (,%r13,2), %rdx movq %r15, %rdi callq 0x10a0 jmp 0xd670 shll $0x3, %ebp movq 0x10(%rsp), %rax leaq (%rax,%rbp,2), %rsi movq 0x30(%rsp), %r13 subl %ebp, %r13d leaq (,%r13,2), %rdx movq %r15, %rdi callq 0x10a0 addl %r14d, %r13d movl %r13d, %eax addq $0xe8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/array_util.c
store_unique_xor
static inline int store_unique_xor(__m128i old, __m128i newval, uint16_t *output) { __m128i vecTmp1 = _mm_alignr_epi8(newval, old, 16 - 4); __m128i vecTmp2 = _mm_alignr_epi8(newval, old, 16 - 2); __m128i equalleft = _mm_cmpeq_epi16(vecTmp2, vecTmp1); __m128i equalright = _mm_cmpeq_epi16(vecTmp2, newval); __m128i equalleftoright = _mm_or_si128(equalleft, equalright); int M = _mm_movemask_epi8( _mm_packs_epi16(equalleftoright, _mm_setzero_si128())); int numberofnewvalues = 8 - _mm_popcnt_u32(M); __m128i key = _mm_lddqu_si128((const __m128i *)uniqshuf + M); __m128i val = _mm_shuffle_epi8(vecTmp2, key); _mm_storeu_si128((__m128i *)output, val); return numberofnewvalues; }
vpalignr $0xc, %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11] vpalignr $0xe, %xmm0, %xmm1, %xmm0 # xmm0 = xmm0[14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13] vpcmpeqw %xmm2, %xmm0, %xmm2 vpcmpeqw %xmm1, %xmm0, %xmm1 vpor %xmm1, %xmm2, %xmm1 vpxor %xmm2, %xmm2, %xmm2 vpacksswb %xmm2, %xmm1, %xmm1 vpmovmskb %xmm1, %ecx popcntl %ecx, %edx pushq $0x8 popq %rax subl %edx, %eax shll $0x4, %ecx leaq 0xba21(%rip), %rdx # 0x190e0 vlddqu (%rcx,%rdx), %xmm1 vpshufb %xmm1, %xmm0, %xmm0 vmovdqu %xmm0, (%rdi) retq
/RoaringBitmap[P]CRoaring/src/array_util.c
unique_xor
CROARING_UNTARGET_AVX2 // working in-place, this function overwrites the repeated values // could be avoided? Warning: assumes len > 0 static inline uint32_t unique_xor(uint16_t *out, uint32_t len) { uint32_t pos = 1; for (uint32_t i = 1; i < len; ++i) { if (out[i] != out[i - 1]) { out[pos++] = out[i]; } else pos--; // if it is identical to previous, delete it } return pos; }
movl %esi, %ecx pushq $0x1 popq %rax pushq $0x1 popq %rdx cmpq %rcx, %rdx jae 0xd6f9 movzwl (%rdi,%rdx,2), %esi cmpw -0x2(%rdi,%rdx,2), %si jne 0xd6ea decl %eax jmp 0xd6f4 movl %eax, %r8d incl %eax movw %si, (%rdi,%r8,2) incq %rdx jmp 0xd6d6 retq
/RoaringBitmap[P]CRoaring/src/array_util.c
union_uint32
size_t union_uint32(const uint32_t *set_1, size_t size_1, const uint32_t *set_2, size_t size_2, uint32_t *buffer) { size_t pos = 0, idx_1 = 0, idx_2 = 0; if (0 == size_2) { memmove(buffer, set_1, size_1 * sizeof(uint32_t)); return size_1; } if (0 == size_1) { memmove(buffer, set_2, size_2 * sizeof(uint32_t)); return size_2; } uint32_t val_1 = set_1[idx_1], val_2 = set_2[idx_2]; while (true) { if (val_1 < val_2) { buffer[pos++] = val_1; ++idx_1; if (idx_1 >= size_1) break; val_1 = set_1[idx_1]; } else if (val_2 < val_1) { buffer[pos++] = val_2; ++idx_2; if (idx_2 >= size_2) break; val_2 = set_2[idx_2]; } else { buffer[pos++] = val_1; ++idx_1; ++idx_2; if (idx_1 >= size_1 || idx_2 >= size_2) break; val_1 = set_1[idx_1]; val_2 = set_2[idx_2]; } } if (idx_1 < size_1) { const size_t n_elems = size_1 - idx_1; memmove(buffer + pos, set_1 + idx_1, n_elems * sizeof(uint32_t)); pos += n_elems; } else if (idx_2 < size_2) { const size_t n_elems = size_2 - idx_2; memmove(buffer + pos, set_2 + idx_2, n_elems * sizeof(uint32_t)); pos += n_elems; } return pos; }
pushq %r15 pushq %r14 pushq %rbx movq %rsi, %rbx movq %rdi, %rsi testq %rcx, %rcx je 0xd79b movq %rcx, %r14 testq %rbx, %rbx je 0xd7ad movl (%rdx), %edi xorl %eax, %eax xorl %ecx, %ecx xorl %r15d, %r15d movl (%rsi,%rax,4), %r9d incq %r15 cmpl %edi, %r9d jb 0xd74b jbe 0xd75a movl %edi, -0x4(%r8,%r15,4) leaq 0x1(%rcx), %r10 cmpq %r14, %r10 jae 0xd779 movl (%rdx,%r10,4), %edi incq %r15 movq %r10, %rcx jmp 0xd72a movl %r9d, -0x4(%r8,%r15,4) incq %rax cmpq %rbx, %rax jb 0xd723 jmp 0xd77c movl %r9d, -0x4(%r8,%r15,4) incq %rax cmpq %rbx, %rax jae 0xd779 leaq 0x1(%rcx), %r9 cmpq %r14, %r9 jae 0xd779 movl 0x4(%rdx,%rcx,4), %edi movq %r9, %rcx jmp 0xd723 incq %rcx subq %rax, %rbx jbe 0xd7c8 leaq (%r8,%r15,4), %rdi leaq (%rsi,%rax,4), %rsi leaq (,%rbx,4), %rdx callq 0x10e0 addq %r15, %rbx jmp 0xd7e8 leaq (,%rbx,4), %rdx movq %r8, %rdi callq 0x10e0 jmp 0xd7e8 leaq (,%r14,4), %rax movq %r8, %rdi movq %rdx, %rsi movq %rax, %rdx callq 0x10e0 movq %r14, %rbx jmp 0xd7e8 subq %rcx, %r14 jbe 0xd7e5 leaq (%r8,%r15,4), %rdi leaq (%rdx,%rcx,4), %rsi leaq (,%r14,4), %rdx callq 0x10e0 addq %r14, %r15 movq %r15, %rbx movq %rbx, %rax popq %rbx popq %r14 popq %r15 retq
/RoaringBitmap[P]CRoaring/src/array_util.c
union_uint32_card
size_t union_uint32_card(const uint32_t *set_1, size_t size_1, const uint32_t *set_2, size_t size_2) { size_t pos = 0, idx_1 = 0, idx_2 = 0; if (0 == size_2) { return size_1; } if (0 == size_1) { return size_2; } uint32_t val_1 = set_1[idx_1], val_2 = set_2[idx_2]; while (true) { if (val_1 < val_2) { ++idx_1; ++pos; if (idx_1 >= size_1) break; val_1 = set_1[idx_1]; } else if (val_2 < val_1) { ++idx_2; ++pos; if (idx_2 >= size_2) break; val_2 = set_2[idx_2]; } else { ++idx_1; ++idx_2; ++pos; if (idx_1 >= size_1 || idx_2 >= size_2) break; val_1 = set_1[idx_1]; val_2 = set_2[idx_2]; } } if (idx_1 < size_1) { const size_t n_elems = size_1 - idx_1; pos += n_elems; } else if (idx_2 < size_2) { const size_t n_elems = size_2 - idx_2; pos += n_elems; } return pos; }
movq %rsi, %rax testq %rcx, %rcx je 0xd862 testq %rax, %rax je 0xd85f pushq %rbx movl (%rdx), %r10d xorl %r8d, %r8d xorl %r9d, %r9d xorl %esi, %esi movl (%rdi,%r8,4), %r11d incq %rsi cmpl %r10d, %r11d jb 0xd82d jbe 0xd837 leaq 0x1(%r9), %rbx cmpq %rcx, %rbx jae 0xd852 movl (%rdx,%rbx,4), %r10d incq %rsi movq %rbx, %r9 jmp 0xd811 incq %r8 cmpq %rax, %r8 jb 0xd80a jmp 0xd855 incq %r8 cmpq %rax, %r8 jae 0xd852 leaq 0x1(%r9), %r11 cmpq %rcx, %r11 jae 0xd852 movl 0x4(%rdx,%r9,4), %r10d movq %r11, %r9 jmp 0xd80a incq %r9 subq %r8, %rax jbe 0xd863 addq %rsi, %rax jmp 0xd873 movq %rcx, %rax retq subq %r9, %rcx jbe 0xd870 addq %rsi, %rcx movq %rcx, %rax jmp 0xd873 movq %rsi, %rax popq %rbx retq
/RoaringBitmap[P]CRoaring/src/array_util.c
memequals
bool memequals(const void *s1, const void *s2, size_t n) { if (n == 0) { return true; } #if CROARING_IS_X64 int support = croaring_hardware_support(); #if CROARING_COMPILER_SUPPORTS_AVX512 if (support & ROARING_SUPPORTS_AVX512) { return _avx512_memequals(s1, s2, n); } else #endif // CROARING_COMPILER_SUPPORTS_AVX512 if (support & ROARING_SUPPORTS_AVX2) { return _avx2_memequals(s1, s2, n); } else { return memcmp(s1, s2, n) == 0; } #else return memcmp(s1, s2, n) == 0; #endif }
pushq %r15 pushq %r14 pushq %rbx testq %rdx, %rdx je 0xd92e movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 callq 0xbc68 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx testb $0x2, %al jne 0xd936 testb $0x1, %al jne 0xd93d callq 0x10d0 testl %eax, %eax sete %al jmp 0xd930 movb $0x1, %al popq %rbx popq %r14 popq %r15 retq popq %rbx popq %r14 popq %r15 jmp 0xd947 popq %rbx popq %r14 popq %r15 jmp 0xd9de
/RoaringBitmap[P]CRoaring/src/array_util.c
avx2_memequals
CROARING_TARGET_AVX2 static inline bool _avx2_memequals(const void *s1, const void *s2, size_t n) { const uint8_t *ptr1 = (const uint8_t *)s1; const uint8_t *ptr2 = (const uint8_t *)s2; const uint8_t *end1 = ptr1 + n; const uint8_t *end8 = ptr1 + n / 8 * 8; const uint8_t *end32 = ptr1 + n / 32 * 32; while (ptr1 < end32) { __m256i r1 = _mm256_loadu_si256((const __m256i *)ptr1); __m256i r2 = _mm256_loadu_si256((const __m256i *)ptr2); int mask = _mm256_movemask_epi8(_mm256_cmpeq_epi8(r1, r2)); if ((uint32_t)mask != UINT32_MAX) { return false; } ptr1 += 32; ptr2 += 32; } while (ptr1 < end8) { uint64_t v1, v2; memcpy(&v1, ptr1, sizeof(uint64_t)); memcpy(&v2, ptr2, sizeof(uint64_t)); if (v1 != v2) { return false; } ptr1 += 8; ptr2 += 8; } while (ptr1 < end1) { if (*ptr1 != *ptr2) { return false; } ptr1++; ptr2++; } return true; }
leaq (%rdi,%rdx), %rcx movq %rdx, %rax andq $-0x8, %rax addq %rdi, %rax andq $-0x20, %rdx addq %rdi, %rdx cmpq %rdx, %rdi jae 0xda28 vmovdqu (%rdi), %ymm0 vpxor (%rsi), %ymm0, %ymm0 addq $0x20, %rdi addq $0x20, %rsi vptest %ymm0, %ymm0 je 0xd9f3 xorl %eax, %eax vzeroupper retq movq (%rdi), %rdx addq $0x8, %rdi leaq 0x8(%rsi), %r8 cmpq (%rsi), %rdx movq %r8, %rsi jne 0xda0f cmpq %rax, %rdi jb 0xda15 xorl %edx, %edx leaq (%rdi,%rdx), %r8 cmpq %rcx, %r8 setae %al jae 0xda11 movb (%r8), %r8b cmpb (%rsi,%rdx), %r8b jne 0xda11 incq %rdx jmp 0xda2f
/RoaringBitmap[P]CRoaring/src/array_util.c
bitset_extract_setbits_avx512
size_t bitset_extract_setbits_avx512(const uint64_t *words, size_t length, uint32_t *vout, size_t outcapacity, uint32_t base) { uint32_t *out = (uint32_t *)vout; uint32_t *initout = out; uint32_t *safeout = out + outcapacity; __m512i base_v = _mm512_set1_epi32(base); __m512i index_table = _mm512_loadu_si512(vbmi2_table); size_t i = 0; for (; (i < length) && ((out + 64) < safeout); i += 1) { uint64_t v = words[i]; __m512i vec = _mm512_maskz_compress_epi8(v, index_table); uint8_t advance = (uint8_t)roaring_hamming(v); __m512i vbase = _mm512_add_epi32(base_v, _mm512_set1_epi32((int)(i * 64))); __m512i r1 = _mm512_cvtepi8_epi32(_mm512_extracti32x4_epi32(vec, 0)); __m512i r2 = _mm512_cvtepi8_epi32(_mm512_extracti32x4_epi32(vec, 1)); __m512i r3 = _mm512_cvtepi8_epi32(_mm512_extracti32x4_epi32(vec, 2)); __m512i r4 = _mm512_cvtepi8_epi32(_mm512_extracti32x4_epi32(vec, 3)); r1 = _mm512_add_epi32(r1, vbase); r2 = _mm512_add_epi32(r2, vbase); r3 = _mm512_add_epi32(r3, vbase); r4 = _mm512_add_epi32(r4, vbase); _mm512_storeu_si512((__m512i *)out, r1); _mm512_storeu_si512((__m512i *)(out + 16), r2); _mm512_storeu_si512((__m512i *)(out + 32), r3); _mm512_storeu_si512((__m512i *)(out + 48), r4); out += advance; } base += i * 64; for (; (i < length) && (out < safeout); ++i) { uint64_t w = words[i]; while ((w != 0) && (out < safeout)) { int r = roaring_trailing_zeroes(w); // on x64, should compile to TZCNT uint32_t val = r + base; memcpy(out, &val, sizeof(uint32_t)); // should be compiled as a MOV on x64 out++; w &= (w - 1); } base += 64; } return out - initout; }
leaq (%rdx,%rcx,4), %rcx vmovdqa64 0x35c2(%rip), %zmm0 # 0x11080 xorl %r9d, %r9d movq %rdx, %rax cmpq %rsi, %r9 jae 0xdb96 leaq 0x100(%rax), %r10 cmpq %rcx, %r10 jae 0xdb96 movq (%rdi,%r9,8), %r10 popcntq %r10, %r11 kmovq %r10, %k1 vpcompressb %zmm0, %zmm1 {%k1} {z} vpbroadcastd %r8d, %zmm2 vpmovsxbd %xmm1, %zmm3 vextracti128 $0x1, %ymm1, %xmm4 vpmovsxbd %xmm4, %zmm4 vextracti32x4 $0x2, %zmm1, %xmm5 vpmovsxbd %xmm5, %zmm5 vextracti32x4 $0x3, %zmm1, %xmm1 vpmovsxbd %xmm1, %zmm1 vpaddd %zmm3, %zmm2, %zmm3 vpaddd %zmm4, %zmm2, %zmm4 vpaddd %zmm5, %zmm2, %zmm5 vpaddd %zmm1, %zmm2, %zmm1 vmovdqu64 %zmm3, (%rax) vmovdqu64 %zmm4, 0x40(%rax) vmovdqu64 %zmm5, 0x80(%rax) vmovdqu64 %zmm1, 0xc0(%rax) leaq (%rax,%r11,4), %rax incq %r9 addl $0x40, %r8d jmp 0xdac4 cmpq %rcx, %rax jae 0xdb9b movq (%rdi,%r9,8), %r10 testq %r10, %r10 je 0xdb8f cmpq %rcx, %rax jae 0xdb8f tzcntq %r10, %r11 addl %r8d, %r11d movl %r11d, (%rax) addq $0x4, %rax blsrq %r10, %r10 jmp 0xdb6f addl $0x40, %r8d incq %r9 cmpq %rsi, %r9 jb 0xdb66 subq %rdx, %rax sarq $0x2, %rax vzeroupper retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
bitset_extract_setbits_avx512_uint16
size_t bitset_extract_setbits_avx512_uint16(const uint64_t *array, size_t length, uint16_t *vout, size_t capacity, uint16_t base) { uint16_t *out = (uint16_t *)vout; uint16_t *initout = out; uint16_t *safeout = vout + capacity; __m512i base_v = _mm512_set1_epi16(base); __m512i index_table = _mm512_loadu_si512(vbmi2_table); size_t i = 0; for (; (i < length) && ((out + 64) < safeout); i++) { uint64_t v = array[i]; __m512i vec = _mm512_maskz_compress_epi8(v, index_table); uint8_t advance = (uint8_t)roaring_hamming(v); __m512i vbase = _mm512_add_epi16(base_v, _mm512_set1_epi16((short)(i * 64))); __m512i r1 = _mm512_cvtepi8_epi16(_mm512_extracti32x8_epi32(vec, 0)); __m512i r2 = _mm512_cvtepi8_epi16(_mm512_extracti32x8_epi32(vec, 1)); r1 = _mm512_add_epi16(r1, vbase); r2 = _mm512_add_epi16(r2, vbase); _mm512_storeu_si512((__m512i *)out, r1); _mm512_storeu_si512((__m512i *)(out + 32), r2); out += advance; } base += i * 64; for (; (i < length) && (out < safeout); ++i) { uint64_t w = array[i]; while ((w != 0) && (out < safeout)) { int r = roaring_trailing_zeroes(w); // on x64, should compile to TZCNT uint32_t val = r + base; memcpy(out, &val, sizeof(uint16_t)); out++; w &= (w - 1); } base += 64; } return out - initout; }
leaq (%rdx,%rcx,2), %rcx vmovdqa64 0x34cc(%rip), %zmm0 # 0x11080 xorl %r9d, %r9d movq %rdx, %rax cmpq %rsi, %r9 jae 0xdc57 leaq 0x80(%rax), %r10 cmpq %rcx, %r10 jae 0xdc57 movq (%rdi,%r9,8), %r10 popcntq %r10, %r11 kmovq %r10, %k1 vpcompressb %zmm0, %zmm1 {%k1} {z} vpbroadcastw %r8d, %zmm2 vpmovsxbw %ymm1, %zmm3 vextracti64x4 $0x1, %zmm1, %ymm1 vpmovsxbw %ymm1, %zmm1 vpaddw %zmm3, %zmm2, %zmm3 vpaddw %zmm1, %zmm2, %zmm1 vmovdqu64 %zmm3, (%rax) vmovdqu64 %zmm1, 0x40(%rax) leaq (%rax,%r11,2), %rax incq %r9 addl $0x40, %r8d jmp 0xdbba cmpq %rcx, %rax jae 0xdc5c movq (%rdi,%r9,8), %r10 testq %r10, %r10 je 0xdc50 cmpq %rcx, %rax jae 0xdc50 tzcntq %r10, %r11 addl %r8d, %r11d movw %r11w, (%rax) addq $0x2, %rax blsrq %r10, %r10 jmp 0xdc2f addl $0x40, %r8d incq %r9 cmpq %rsi, %r9 jb 0xdc26 subq %rdx, %rax sarq %rax vzeroupper retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
bitset_extract_setbits
size_t bitset_extract_setbits(const uint64_t *words, size_t length, uint32_t *out, uint32_t base) { int outpos = 0; for (size_t i = 0; i < length; ++i) { uint64_t w = words[i]; while (w != 0) { int r = roaring_trailing_zeroes(w); // on x64, should compile to TZCNT uint32_t val = r + base; memcpy(out + outpos, &val, sizeof(uint32_t)); // should be compiled as a MOV on x64 outpos++; w &= (w - 1); } base += 64; } return outpos; }
xorl %r8d, %r8d xorl %r9d, %r9d movslq %r9d, %rax cmpq %rsi, %r8 je 0xddc3 movq (%rdi,%r8,8), %r10 leaq (%rdx,%rax,4), %rax testq %r10, %r10 je 0xddbb bsfq %r10, %r11 addl %ecx, %r11d movl %r11d, (%rax) leaq -0x1(%r10), %r11 andq %r11, %r10 incl %r9d addq $0x4, %rax jmp 0xdd9c addl $0x40, %ecx incq %r8 jmp 0xdd8c retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
bitset_extract_intersection_setbits_uint16
size_t bitset_extract_intersection_setbits_uint16( const uint64_t *__restrict__ words1, const uint64_t *__restrict__ words2, size_t length, uint16_t *out, uint16_t base) { int outpos = 0; for (size_t i = 0; i < length; ++i) { uint64_t w = words1[i] & words2[i]; while (w != 0) { int r = roaring_trailing_zeroes(w); out[outpos++] = (uint16_t)(r + base); w &= (w - 1); } base += 64; } return outpos; }
pushq %rbx xorl %r9d, %r9d xorl %r10d, %r10d movslq %r9d, %rax cmpq %rdx, %r10 je 0xde07 movq (%rsi,%r10,8), %r11 andq (%rdi,%r10,8), %r11 leaq (%rcx,%rax,2), %rax testq %r11, %r11 je 0xddfe bsfq %r11, %rbx addl %r8d, %ebx movw %bx, (%rax) leaq -0x1(%r11), %rbx andq %rbx, %r11 incl %r9d addq $0x2, %rax jmp 0xdddf addl $0x40, %r8d incq %r10 jmp 0xddcb popq %rbx retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
bitset_extract_setbits_sse_uint16
CROARING_TARGET_AVX2 size_t bitset_extract_setbits_sse_uint16(const uint64_t *words, size_t length, uint16_t *out, size_t outcapacity, uint16_t base) { uint16_t *initout = out; __m128i baseVec = _mm_set1_epi16(base - 1); __m128i incVec = _mm_set1_epi16(64); __m128i add8 = _mm_set1_epi16(8); uint16_t *safeout = out + outcapacity; const int numberofbytes = 2; // process two bytes at a time size_t i = 0; for (; (i < length) && (out + numberofbytes * 8 <= safeout); ++i) { uint64_t w = words[i]; if (w == 0) { baseVec = _mm_add_epi16(baseVec, incVec); } else { for (int k = 0; k < 4; ++k) { uint8_t byteA = (uint8_t)w; uint8_t byteB = (uint8_t)(w >> 8); w >>= 16; __m128i vecA = _mm_loadu_si128( (const __m128i *)vecDecodeTable_uint16[byteA]); __m128i vecB = _mm_loadu_si128( (const __m128i *)vecDecodeTable_uint16[byteB]); uint8_t advanceA = lengthTable[byteA]; uint8_t advanceB = lengthTable[byteB]; vecA = _mm_add_epi16(baseVec, vecA); baseVec = _mm_add_epi16(baseVec, add8); vecB = _mm_add_epi16(baseVec, vecB); baseVec = _mm_add_epi16(baseVec, add8); _mm_storeu_si128((__m128i *)out, vecA); out += advanceA; _mm_storeu_si128((__m128i *)out, vecB); out += advanceB; } } } base += (uint16_t)(i * 64); for (; (i < length) && (out < safeout); ++i) { uint64_t w = words[i]; while ((w != 0) && (out < safeout)) { int r = roaring_trailing_zeroes(w); *out = (uint16_t)(r + base); out++; w &= (w - 1); } base += 64; } return out - initout; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %r8, -0x10(%rsp) movl %r8d, %eax decl %eax vmovd %eax, %xmm0 vpbroadcastw %xmm0, %xmm0 leaq (%rdx,%rcx,2), %r12 xorl %r9d, %r9d vpbroadcastw 0x320e(%rip), %xmm1 # 0x11044 movl $0x4, %r8d leaq 0x537d(%rip), %r11 # 0x131c0 vpbroadcastw 0x31f4(%rip), %xmm2 # 0x11040 leaq 0x526d(%rip), %r14 # 0x130c0 vpbroadcastw 0x31e6(%rip), %xmm3 # 0x11042 movq %rdx, -0x8(%rsp) movq %rdx, %rax cmpq %rsi, %r9 jae 0xdece leaq 0x20(%rax), %rbx cmpq %r12, %rbx ja 0xdece movq (%rdi,%r9,8), %rbx testq %rbx, %rbx je 0xdec5 movl %r8d, %ebp subl $0x1, %ebp jb 0xdec9 movzbl %bl, %r15d movzbl %bh, %ecx movzbl (%r15,%r14), %r13d shll $0x4, %r15d movzbl (%rcx,%r14), %r10d shll $0x4, %ecx vpaddw (%r15,%r11), %xmm0, %xmm4 vpaddw (%rcx,%r11), %xmm0, %xmm5 shrq $0x10, %rbx vpaddw %xmm2, %xmm5, %xmm5 vpaddw %xmm3, %xmm0, %xmm0 vmovdqu %xmm4, (%rax) leaq (%rax,%r13,2), %rax vmovdqu %xmm5, (%rax) leaq (%rax,%r10,2), %rax jmp 0xde7e vpaddw %xmm1, %xmm0, %xmm0 incq %r9 jmp 0xde64 movl %r9d, %ecx shll $0x6, %ecx movq -0x10(%rsp), %rdx leal (%rdx,%rcx), %r8d cmpq %rsi, %r9 jae 0xdf14 cmpq %r12, %rax jae 0xdf14 movq (%rdi,%r9,8), %r10 testq %r10, %r10 je 0xdf0b cmpq %r12, %rax jae 0xdf0b tzcntq %r10, %rcx addl %r8d, %ecx movw %cx, (%rax) addq $0x2, %rax blsrq %r10, %r10 jmp 0xdeeb addl $0x40, %r8d incq %r9 jmp 0xdedd subq -0x8(%rsp), %rax sarq %rax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
bitset_extract_setbits_uint16
size_t bitset_extract_setbits_uint16(const uint64_t *words, size_t length, uint16_t *out, uint16_t base) { int outpos = 0; for (size_t i = 0; i < length; ++i) { uint64_t w = words[i]; while (w != 0) { int r = roaring_trailing_zeroes(w); out[outpos++] = (uint16_t)(r + base); w &= (w - 1); } base += 64; } return outpos; }
xorl %r8d, %r8d xorl %r9d, %r9d movslq %r8d, %rax cmpq %rsi, %r9 je 0xdf65 movq (%rdi,%r9,8), %r10 leaq (%rdx,%rax,2), %rax testq %r10, %r10 je 0xdf5d bsfq %r10, %r11 addl %ecx, %r11d movw %r11w, (%rax) leaq -0x1(%r10), %r11 andq %r11, %r10 incl %r8d addq $0x2, %rax jmp 0xdf3d addl $0x40, %ecx incq %r9 jmp 0xdf2d retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
bitset_set_list_withcard
uint64_t bitset_set_list_withcard(uint64_t *words, uint64_t card, const uint16_t *list, uint64_t length) { if (croaring_hardware_support() & ROARING_SUPPORTS_AVX2) { return _asm_bitset_set_list_withcard(words, card, list, length); } else { return _scalar_bitset_set_list_withcard(words, card, list, length); } }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movq %rcx, %r12 movq %rdx, %r14 movq %rsi, %rbx movq %rdi, %r15 callq 0xbc68 testb $0x1, %al jne 0xe040 addq %r12, %r12 xorl %eax, %eax cmpq %rax, %r12 je 0xe06e movzwl (%r14,%rax), %ecx movl %ecx, %edx shrl $0x6, %edx movq (%r15,%rdx,8), %rsi movq %rsi, %rdi btsq %rcx, %rdi xorq %rdi, %rsi shrq %cl, %rsi addq %rsi, %rbx movq %rdi, (%r15,%rdx,8) addq $0x2, %rax jmp 0xe013 testq %r12, %r12 je 0xe06e leaq (%r14,%r12,2), %rax pushq $0x6 popq %rcx movzwq (%r14), %rsi shrxq %rcx, %rsi, %rdi movq (%r15,%rdi,8), %rdx btsq %rsi, %rdx movq %rdx, (%r15,%rdi,8) sbbq $-0x1, %rbx addq $0x2, %r14 cmpq %r14, %rax jne 0xe04c movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
bitset_set_list
void bitset_set_list(uint64_t *words, const uint16_t *list, uint64_t length) { if (croaring_hardware_support() & ROARING_SUPPORTS_AVX2) { _asm_bitset_set_list(words, list, length); } else { _scalar_bitset_set_list(words, list, length); } }
pushq %r15 pushq %r14 pushq %rbx movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 callq 0xbc68 testb $0x1, %al jne 0xe0bf addq %rbx, %rbx xorl %eax, %eax pushq $0x1 popq %rdx cmpq %rax, %rbx je 0xe159 movzwl (%r14,%rax), %ecx movl %ecx, %esi shrl $0x6, %esi movq %rdx, %rdi shlq %cl, %rdi orq %rdi, (%r15,%rsi,8) addq $0x2, %rax jmp 0xe09c leaq (%r14,%rbx,2), %rcx leaq 0x6(%r14), %rdx xorl %eax, %eax pushq $0x6 popq %rsi leaq (%rdx,%rax), %rdi cmpq %rcx, %rdi jae 0xe132 movzwl -0x6(%rdx,%rax), %edi shrxq %rsi, %rdi, %r9 movq (%r15,%r9,8), %r8 btsq %rdi, %r8 movq %r8, (%r15,%r9,8) movzwl -0x4(%rdx,%rax), %edi shrxq %rsi, %rdi, %r9 movq (%r15,%r9,8), %r8 btsq %rdi, %r8 movq %r8, (%r15,%r9,8) movzwl -0x2(%rdx,%rax), %edi shrxq %rsi, %rdi, %r9 movq (%r15,%r9,8), %r8 btsq %rdi, %r8 movq %r8, (%r15,%r9,8) movzwl (%rdx,%rax), %edi shrxq %rsi, %rdi, %r9 movq (%r15,%r9,8), %r8 btsq %rdi, %r8 movq %r8, (%r15,%r9,8) addq $0x8, %rax jmp 0xe0cc addq %rbx, %rbx pushq $0x6 popq %rcx cmpq %rax, %rbx je 0xe159 movzwl (%r14,%rax), %edx shrxq %rcx, %rdx, %rdi movq (%r15,%rdi,8), %rsi btsq %rdx, %rsi movq %rsi, (%r15,%rdi,8) addq $0x2, %rax jmp 0xe138 popq %rbx popq %r14 popq %r15 retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
bitset_flip_list
void bitset_flip_list(uint64_t *words, const uint16_t *list, uint64_t length) { uint64_t offset, load, newload, pos, index; const uint16_t *end = list + length; while (list != end) { pos = *list; offset = pos >> 6; index = pos % 64; load = words[offset]; newload = load ^ (UINT64_C(1) << index); words[offset] = newload; list++; } }
addq %rdx, %rdx xorl %eax, %eax pushq $0x1 popq %r8 cmpq %rax, %rdx je 0xe1cb movzwl (%rsi,%rax), %ecx movl %ecx, %r9d shrl $0x6, %r9d movq %r8, %r10 shlq %cl, %r10 xorq %r10, (%rdi,%r9,8) addq $0x2, %rax jmp 0xe1ab retq
/RoaringBitmap[P]CRoaring/src/bitset_util.c
container_free
void container_free(container_t *c, uint8_t type) { switch (type) { case BITSET_CONTAINER_TYPE: bitset_container_free(CAST_bitset(c)); break; case ARRAY_CONTAINER_TYPE: array_container_free(CAST_array(c)); break; case RUN_CONTAINER_TYPE: run_container_free(CAST_run(c)); break; case SHARED_CONTAINER_TYPE: shared_container_free(CAST_shared(c)); break; default: assert(false); roaring_unreachable; } }
decl %esi leaq 0x5feb(%rip), %rax # 0x141c0 movslq (%rax,%rsi,4), %rcx addq %rax, %rcx jmpq *%rcx jmp 0x361b jmp 0xa96c jmp 0xe1f2 jmp 0x2ca6
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_printf_as_uint32_array
static inline const container_t *container_unwrap_shared( const container_t *candidate_shared_container, uint8_t *type) { if (*type == SHARED_CONTAINER_TYPE) { *type = const_CAST_shared(candidate_shared_container)->typecode; assert(*type != SHARED_CONTAINER_TYPE); return const_CAST_shared(candidate_shared_container)->container; } else { return candidate_shared_container; } }
cmpl $0x4, %esi jne 0xe24a movb 0x8(%rdi), %sil movq (%rdi), %rdi cmpb $0x3, %sil je 0xe264 movzbl %sil, %eax movl %edx, %esi cmpl $0x2, %eax jne 0x9437 jmp 0x31fd movl %edx, %esi jmp 0xb50e
/RoaringBitmap[P]CRoaring/include/roaring/containers/containers.h
get_copy_of_container
container_t *get_copy_of_container(container_t *c, uint8_t *typecode, bool copy_on_write) { if (copy_on_write) { shared_container_t *shared_container; if (*typecode == SHARED_CONTAINER_TYPE) { shared_container = CAST_shared(c); croaring_refcount_inc(&shared_container->counter); return shared_container; } assert(*typecode != SHARED_CONTAINER_TYPE); if ((shared_container = (shared_container_t *)roaring_malloc( sizeof(shared_container_t))) == NULL) { return NULL; } shared_container->container = c; shared_container->typecode = *typecode; // At this point, we are creating new shared container // so there should be no other references, and setting // the counter to 2 - even non-atomically - is safe as // long as the value is set before the return statement. shared_container->counter = 2; *typecode = SHARED_CONTAINER_TYPE; return shared_container; } // copy_on_write // otherwise, no copy on write... const container_t *actual_container = container_unwrap_shared(c, typecode); assert(*typecode != SHARED_CONTAINER_TYPE); return container_clone(actual_container, *typecode); }
pushq %r14 pushq %rbx pushq %rax movq %rsi, %r14 movq %rdi, %rbx movb (%rsi), %al testl %edx, %edx je 0xe306 cmpb $0x4, %al jne 0xe325 lock incl 0xc(%rbx) jmp 0xe34c cmpb $0x4, %al jne 0xe313 movb 0x8(%rbx), %al movb %al, (%r14) movq (%rbx), %rbx movzbl %al, %esi movq %rbx, %rdi addq $0x8, %rsp popq %rbx popq %r14 jmp 0xe357 pushq $0x10 popq %rdi callq 0xbc19 testq %rax, %rax je 0xe34a movq %rbx, (%rax) movb (%r14), %cl movb %cl, 0x8(%rax) pushq $0x2 popq %rcx xchgl %ecx, 0xc(%rax) movb $0x4, (%r14) movq %rax, %rbx jmp 0xe34c xorl %ebx, %ebx movq %rbx, %rax addq $0x8, %rsp popq %rbx popq %r14 retq
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_clone
container_t *container_clone(const container_t *c, uint8_t typecode) { // We do not want to allow cloning of shared containers. // c = container_unwrap_shared(c, &typecode); switch (typecode) { case BITSET_CONTAINER_TYPE: return bitset_container_clone(const_CAST_bitset(c)); case ARRAY_CONTAINER_TYPE: return array_container_clone(const_CAST_array(c)); case RUN_CONTAINER_TYPE: return run_container_clone(const_CAST_run(c)); case SHARED_CONTAINER_TYPE: // Shared containers are not cloneable. Are you mixing COW and // non-COW bitmaps? return NULL; default: assert(false); roaring_unreachable; return NULL; } }
decl %esi leaq 0x5e70(%rip), %rax # 0x141d0 movslq (%rax,%rsi,4), %rcx addq %rax, %rcx jmpq *%rcx jmp 0x3637 jmp 0xa7de xorl %eax, %eax retq jmp 0x2a85
/RoaringBitmap[P]CRoaring/src/containers/containers.c
shared_container_extract_copy
container_t *shared_container_extract_copy(shared_container_t *sc, uint8_t *typecode) { assert(sc->typecode != SHARED_CONTAINER_TYPE); *typecode = sc->typecode; container_t *answer; if (croaring_refcount_dec(&sc->counter)) { answer = sc->container; sc->container = NULL; // paranoid roaring_free(sc); } else { answer = container_clone(sc->container, *typecode); } assert(*typecode != SHARED_CONTAINER_TYPE); return answer; }
movb 0x8(%rdi), %al movb %al, (%rsi) lock decl 0xc(%rdi) jne 0xe398 pushq %rbx movq (%rdi), %rbx andq $0x0, (%rdi) callq 0xbc31 movq %rbx, %rax popq %rbx retq movq (%rdi), %rdi movzbl (%rsi), %esi jmp 0xe357
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_init_iterator
roaring_container_iterator_t container_init_iterator(const container_t *c, uint8_t typecode, uint16_t *value) { switch (typecode) { case BITSET_CONTAINER_TYPE: { const bitset_container_t *bc = const_CAST_bitset(c); uint32_t wordindex = 0; uint64_t word; while ((word = bc->words[wordindex]) == 0) { wordindex++; } // word is non-zero int32_t index = wordindex * 64 + roaring_trailing_zeroes(word); *value = index; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = index, }; } case ARRAY_CONTAINER_TYPE: { const array_container_t *ac = const_CAST_array(c); *value = ac->array[0]; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = 0, }; } case RUN_CONTAINER_TYPE: { const run_container_t *rc = const_CAST_run(c); *value = rc->runs[0].value; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = 0, }; } default: assert(false); roaring_unreachable; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{0}; } }
movq 0x8(%rdi), %rax cmpl $0x3, %esi je 0xe3b1 cmpl $0x2, %esi jne 0xe3b8 movzwl (%rax), %esi xorl %eax, %eax jmp 0xe3d5 xorl %esi, %esi pushq $-0x40 popq %rcx movl %esi, %edi movq (%rax,%rdi,8), %rdi incl %esi addl $0x40, %ecx testq %rdi, %rdi je 0xe3bd bsfq %rdi, %rsi addl %ecx, %esi movl %esi, %eax movw %si, (%rdx) retq
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_init_iterator_last
roaring_container_iterator_t container_init_iterator_last(const container_t *c, uint8_t typecode, uint16_t *value) { switch (typecode) { case BITSET_CONTAINER_TYPE: { const bitset_container_t *bc = const_CAST_bitset(c); uint32_t wordindex = BITSET_CONTAINER_SIZE_IN_WORDS - 1; uint64_t word; while ((word = bc->words[wordindex]) == 0) { wordindex--; } // word is non-zero int32_t index = wordindex * 64 + (63 - roaring_leading_zeroes(word)); *value = index; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = index, }; } case ARRAY_CONTAINER_TYPE: { const array_container_t *ac = const_CAST_array(c); int32_t index = ac->cardinality - 1; *value = ac->array[index]; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = index, }; } case RUN_CONTAINER_TYPE: { const run_container_t *rc = const_CAST_run(c); int32_t run_index = rc->n_runs - 1; const rle16_t *last_run = &rc->runs[run_index]; *value = last_run->value + last_run->length; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{ .index = run_index, }; } default: assert(false); roaring_unreachable; return ROARING_INIT_ROARING_CONTAINER_ITERATOR_T{0}; } }
cmpl $0x3, %esi je 0xe3f4 cmpl $0x2, %esi jne 0xe40b movslq (%rdi), %rax movq 0x8(%rdi), %rcx movzwl -0x2(%rcx,%rax,2), %ecx decq %rax jmp 0xe434 movslq (%rdi), %rsi leaq -0x1(%rsi), %rax movq 0x8(%rdi), %rdi movzwl -0x2(%rdi,%rsi,4), %ecx addw -0x4(%rdi,%rsi,4), %cx jmp 0xe434 movq 0x8(%rdi), %rax movl $0x3ff, %esi # imm = 0x3FF movl $0x1003f, %ecx # imm = 0x1003F movl %esi, %edi movq (%rax,%rdi,8), %rdi decl %esi addl $-0x40, %ecx testq %rdi, %rdi je 0xe419 bsrq %rdi, %rax xorl $0x3f, %eax subl %eax, %ecx movl %ecx, %eax movw %cx, (%rdx) retq
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_iterator_next
bool container_iterator_next(const container_t *c, uint8_t typecode, roaring_container_iterator_t *it, uint16_t *value) { switch (typecode) { case BITSET_CONTAINER_TYPE: { const bitset_container_t *bc = const_CAST_bitset(c); it->index++; uint32_t wordindex = it->index / 64; if (wordindex >= BITSET_CONTAINER_SIZE_IN_WORDS) { return false; } uint64_t word = bc->words[wordindex] & (UINT64_MAX << (it->index % 64)); // next part could be optimized/simplified while (word == 0 && (wordindex + 1 < BITSET_CONTAINER_SIZE_IN_WORDS)) { wordindex++; word = bc->words[wordindex]; } if (word != 0) { it->index = wordindex * 64 + roaring_trailing_zeroes(word); *value = it->index; return true; } return false; } case ARRAY_CONTAINER_TYPE: { const array_container_t *ac = const_CAST_array(c); it->index++; if (it->index < ac->cardinality) { *value = ac->array[it->index]; return true; } return false; } case RUN_CONTAINER_TYPE: { if (*value == UINT16_MAX) { // Avoid overflow to zero return false; } const run_container_t *rc = const_CAST_run(c); uint32_t limit = rc->runs[it->index].value + rc->runs[it->index].length; if (*value < limit) { (*value)++; return true; } it->index++; if (it->index < rc->n_runs) { *value = rc->runs[it->index].value; return true; } return false; } default: assert(false); roaring_unreachable; return false; } }
movq %rcx, %r8 movq %rdx, %r9 cmpl $0x3, %esi je 0xe467 cmpl $0x2, %esi jne 0xe493 movslq (%r9), %rax leal 0x1(%rax), %ecx movl %ecx, (%r9) cmpl (%rdi), %ecx jge 0xe50d movq 0x8(%rdi), %rcx movzwl 0x2(%rcx,%rax,2), %eax jmp 0xe506 movzwl (%r8), %eax cmpl $0xffff, %eax # imm = 0xFFFF je 0xe50d movq 0x8(%rdi), %rcx movslq (%r9), %rdx movzwl (%rcx,%rdx,4), %esi movzwl 0x2(%rcx,%rdx,4), %r10d addl %esi, %r10d cmpl %eax, %r10d jbe 0xe4f7 incl %eax jmp 0xe506 movl (%r9), %eax incl %eax movl %eax, (%r9) pushq $0x40 popq %rcx cltd idivl %ecx cmpl $0x3ff, %eax # imm = 0x3FF ja 0xe50d movl %edx, %ecx movq 0x8(%rdi), %rdx movl %eax, %edi movq (%rdx,%rdi,8), %rsi shrq %cl, %rsi shlq %cl, %rsi movl %eax, %ecx shll $0x6, %ecx andl $0x3ff, %eax # imm = 0x3FF shll $0x6, %eax testq %rsi, %rsi jne 0xe4e5 cmpq $0x3fe, %rdi # imm = 0x3FE ja 0xe4e5 movq 0x8(%rdx,%rdi,8), %rsi incq %rdi addl $0x40, %ecx addl $0x40, %eax jmp 0xe4c7 testq %rsi, %rsi je 0xe50d bsfq %rsi, %rdx addl %edx, %ecx movl %ecx, (%r9) addl %edx, %eax jmp 0xe506 leal 0x1(%rdx), %eax movl %eax, (%r9) cmpl (%rdi), %eax jge 0xe50d movzwl 0x4(%rcx,%rdx,4), %eax movw %ax, (%r8) movb $0x1, %al retq xorl %eax, %eax retq
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_iterator_prev
bool container_iterator_prev(const container_t *c, uint8_t typecode, roaring_container_iterator_t *it, uint16_t *value) { switch (typecode) { case BITSET_CONTAINER_TYPE: { if (--it->index < 0) { return false; } const bitset_container_t *bc = const_CAST_bitset(c); int32_t wordindex = it->index / 64; uint64_t word = bc->words[wordindex] & (UINT64_MAX >> (63 - (it->index % 64))); while (word == 0 && --wordindex >= 0) { word = bc->words[wordindex]; } if (word == 0) { return false; } it->index = (wordindex * 64) + (63 - roaring_leading_zeroes(word)); *value = it->index; return true; } case ARRAY_CONTAINER_TYPE: { if (--it->index < 0) { return false; } const array_container_t *ac = const_CAST_array(c); *value = ac->array[it->index]; return true; } case RUN_CONTAINER_TYPE: { if (*value == 0) { return false; } const run_container_t *rc = const_CAST_run(c); (*value)--; if (*value >= rc->runs[it->index].value) { return true; } if (--it->index < 0) { return false; } *value = rc->runs[it->index].value + rc->runs[it->index].length; return true; } default: assert(false); roaring_unreachable; return false; } }
movq %rcx, %r8 cmpl $0x3, %esi je 0xe53b cmpl $0x2, %esi jne 0xe56f movl (%rdx), %ecx leal -0x1(%rcx), %eax movl %eax, (%rdx) testl %ecx, %ecx jle 0xe5b0 movq 0x8(%rdi), %rcx movl %eax, %eax movzwl (%rcx,%rax,2), %eax jmp 0xe5c0 movzwl (%r8), %esi testw %si, %si je 0xe5b0 decl %esi movw %si, (%r8) movq 0x8(%rdi), %rcx movslq (%rdx), %rdi movb $0x1, %al cmpw (%rcx,%rdi,4), %si jae 0xe5b2 leal -0x1(%rdi), %eax movl %eax, (%rdx) testl %edi, %edi jle 0xe5b0 movl %eax, %edx movzwl 0x2(%rcx,%rdx,4), %eax addw (%rcx,%rdx,4), %ax jmp 0xe5c0 movl (%rdx), %ecx leal -0x1(%rcx), %eax movl %eax, (%rdx) testl %ecx, %ecx jle 0xe5b0 movl %eax, %esi shrl $0x6, %esi movq 0x8(%rdi), %rdi movq (%rdi,%rsi,8), %r9 movl %eax, %ecx notb %cl shlq %cl, %r9 shrq %cl, %r9 orl $0x3f, %eax movl %eax, %ecx testq %r9, %r9 jne 0xe5b3 testq %rsi, %rsi jle 0xe5b0 movq -0x8(%rdi,%rsi,8), %r9 decq %rsi addl $-0x40, %ecx addl $-0x40, %eax jmp 0xe596 xorl %eax, %eax retq bsrq %r9, %rsi xorl $0x3f, %esi subl %esi, %ecx movl %ecx, (%rdx) subl %esi, %eax movw %ax, (%r8) movb $0x1, %al retq
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_iterator_lower_bound
bool container_iterator_lower_bound(const container_t *c, uint8_t typecode, roaring_container_iterator_t *it, uint16_t *value_out, uint16_t val) { if (val > container_maximum(c, typecode)) { return false; } switch (typecode) { case BITSET_CONTAINER_TYPE: { const bitset_container_t *bc = const_CAST_bitset(c); it->index = bitset_container_index_equalorlarger(bc, val); *value_out = it->index; return true; } case ARRAY_CONTAINER_TYPE: { const array_container_t *ac = const_CAST_array(c); it->index = array_container_index_equalorlarger(ac, val); *value_out = ac->array[it->index]; return true; } case RUN_CONTAINER_TYPE: { const run_container_t *rc = const_CAST_run(c); it->index = run_container_index_equalorlarger(rc, val); if (rc->runs[it->index].value <= val) { *value_out = val; } else { *value_out = rc->runs[it->index].value; } return true; } default: assert(false); roaring_unreachable; return false; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movl %r8d, %ebx movq %rcx, %r14 movq %rdx, %r15 movl %esi, %r13d movq %rdi, %r12 movl %r13d, %eax cmpl $0x4, %esi jne 0xe5f2 movb 0x8(%r12), %al movq (%r12), %rdi cmpb $0x3, %al je 0xe611 movzbl %al, %eax cmpl $0x2, %eax jne 0xe629 movslq (%rdi), %rax testq %rax, %rax je 0xe632 movq 0x8(%rdi), %rcx movzwl -0x2(%rcx,%rax,2), %ebp jmp 0xe634 movslq (%rdi), %rax testq %rax, %rax je 0xe632 movq 0x8(%rdi), %rcx movzwl -0x2(%rcx,%rax,4), %ebp addw -0x4(%rcx,%rax,4), %bp jmp 0xe634 callq 0x993c movl %eax, %ebp jmp 0xe634 xorl %ebp, %ebp cmpw %bx, %bp jb 0xe693 cmpb $0x3, %r13b je 0xe663 movzbl %r13b, %eax movzwl %bx, %esi cmpl $0x2, %eax jne 0xe684 movq %r12, %rdi callq 0x2899 movl %eax, (%r15) movq 0x8(%r12), %rcx cltq movzwl (%rcx,%rax,2), %eax jmp 0xe68f movzwl %bx, %esi movq %r12, %rdi callq 0xa592 movl %eax, (%r15) movq 0x8(%r12), %rcx cltq movzwl (%rcx,%rax,4), %eax cmpw %bx, %ax cmovbel %ebx, %eax jmp 0xe68f movq %r12, %rdi callq 0x9c65 movl %eax, (%r15) movw %ax, (%r14) cmpw %bx, %bp setae %al addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_iterator_read_into_uint32
bool container_iterator_read_into_uint32(const container_t *c, uint8_t typecode, roaring_container_iterator_t *it, uint32_t high16, uint32_t *buf, uint32_t count, uint32_t *consumed, uint16_t *value_out) { *consumed = 0; if (count == 0) { return false; } switch (typecode) { case BITSET_CONTAINER_TYPE: { const bitset_container_t *bc = const_CAST_bitset(c); uint32_t wordindex = it->index / 64; uint64_t word = bc->words[wordindex] & (UINT64_MAX << (it->index % 64)); do { // Read set bits. while (word != 0 && *consumed < count) { *buf = high16 | (wordindex * 64 + roaring_trailing_zeroes(word)); word = word & (word - 1); buf++; (*consumed)++; } // Skip unset bits. while (word == 0 && wordindex + 1 < BITSET_CONTAINER_SIZE_IN_WORDS) { wordindex++; word = bc->words[wordindex]; } } while (word != 0 && *consumed < count); if (word != 0) { it->index = wordindex * 64 + roaring_trailing_zeroes(word); *value_out = it->index; return true; } return false; } case ARRAY_CONTAINER_TYPE: { const array_container_t *ac = const_CAST_array(c); uint32_t num_values = minimum_uint32(ac->cardinality - it->index, count); for (uint32_t i = 0; i < num_values; i++) { buf[i] = high16 | ac->array[it->index + i]; } *consumed += num_values; it->index += num_values; if (it->index < ac->cardinality) { *value_out = ac->array[it->index]; return true; } return false; } case RUN_CONTAINER_TYPE: { const run_container_t *rc = const_CAST_run(c); do { uint32_t largest_run_value = rc->runs[it->index].value + rc->runs[it->index].length; uint32_t num_values = minimum_uint32( largest_run_value - *value_out + 1, count - *consumed); for (uint32_t i = 0; i < num_values; i++) { buf[i] = high16 | (*value_out + i); } *value_out += num_values; buf += num_values; *consumed += num_values; // We check for `value == 0` because `it->value += num_values` // can overflow when `value == UINT16_MAX`, and `count > // length`. In this case `value` will overflow to 0. if (*value_out > largest_run_value || *value_out == 0) { it->index++; if (it->index < rc->n_runs) { *value_out = rc->runs[it->index].value; } else { return false; } } } while (*consumed < count); return true; } default: assert(false); roaring_unreachable; return 0; } }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx movq 0x28(%rsp), %r14 andl $0x0, (%r14) testl %r9d, %r9d je 0xe848 movl %ecx, %r10d movq %rdx, %r11 movq 0x30(%rsp), %rbx cmpb $0x1, %sil je 0xe72a movzbl %sil, %eax cmpl $0x2, %eax jne 0xe7c5 movl (%rdi), %eax subl (%r11), %eax cmpl %r9d, %eax cmovael %r9d, %eax xorl %ecx, %ecx cmpq %rcx, %rax je 0xe70a movq 0x8(%rdi), %rdx movl (%r11), %esi addl %ecx, %esi movzwl (%rdx,%rsi,2), %edx orl %r10d, %edx movl %edx, (%r8,%rcx,4) incq %rcx jmp 0xe6ec addl %eax, (%r14) addl (%r11), %eax movl %eax, (%r11) cmpl (%rdi), %eax jge 0xe848 movq 0x8(%rdi), %rcx cltq movzwl (%rcx,%rax,2), %ebp jmp 0xe7c0 movl (%r11), %eax movl $0x40, %ecx cltd idivl %ecx movl %edx, %ecx movq 0x8(%rdi), %rdx movq (%rdx,%rax,8), %rsi shrq %cl, %rsi shlq %cl, %rsi xorl %edi, %edi movl %eax, %ecx shll $0x6, %ecx movl %ecx, %ebp orl %r10d, %ebp testq %rsi, %rsi je 0xe779 cmpl %r9d, %edi jae 0xe779 bsfq %rsi, %rdi orl %ebp, %edi movl %edi, (%r8) leaq -0x1(%rsi), %rdi andq %rdi, %rsi addq $0x4, %r8 movl (%r14), %edi incl %edi movl %edi, (%r14) jmp 0xe751 movl %eax, %ebp andl $0x3ff, %ebp # imm = 0x3FF shll $0x6, %ebp leal 0x1(%rax), %r15d testq %rsi, %rsi jne 0xe7a7 cmpl $0x3ff, %r15d # imm = 0x3FF ja 0xe7a7 movq (%rdx,%r15,8), %rsi addl $0x40, %ecx addl $0x40, %ebp incq %r15 incl %eax jmp 0xe788 testq %rsi, %rsi je 0xe848 cmpl %r9d, %edi jb 0xe747 bsfq %rsi, %rax addl %eax, %ecx movl %ecx, (%r11) addl %eax, %ebp movw %bp, (%rbx) jmp 0xe844 movq 0x8(%rdi), %rax movzwl (%rbx), %ecx xorl %r15d, %r15d movslq (%r11), %rdx movzwl (%rax,%rdx,4), %ebp movzwl 0x2(%rax,%rdx,4), %esi addl %ebp, %esi movzwl %cx, %ecx movl %esi, %ebp subl %ecx, %ebp incl %ebp movl %r9d, %edx subl %r15d, %edx cmpl %edx, %ebp cmovbl %ebp, %edx xorl %r15d, %r15d cmpq %r15, %rdx je 0xe809 leal (%rcx,%r15), %ebp orl %r10d, %ebp movl %ebp, (%r8,%r15,4) incq %r15 jmp 0xe7f4 addl %edx, %ecx movw %cx, (%rbx) movl (%r14), %r15d addl %edx, %r15d movl %r15d, (%r14) testw %cx, %cx je 0xe823 movzwl %cx, %ebp cmpl %ebp, %esi jae 0xe83b movslq (%r11), %rcx leal 0x1(%rcx), %esi movl %esi, (%r11) cmpl (%rdi), %esi jge 0xe848 movzwl 0x4(%rax,%rcx,4), %ecx movw %cx, (%rbx) movl (%r14), %r15d leaq (%r8,%rdx,4), %r8 cmpl %r9d, %r15d jb 0xe7cf movb $0x1, %al jmp 0xe84a xorl %eax, %eax popq %rbx popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/containers.c
container_iterator_read_into_uint64
bool container_iterator_read_into_uint64(const container_t *c, uint8_t typecode, roaring_container_iterator_t *it, uint64_t high48, uint64_t *buf, uint32_t count, uint32_t *consumed, uint16_t *value_out) { *consumed = 0; if (count == 0) { return false; } switch (typecode) { case BITSET_CONTAINER_TYPE: { const bitset_container_t *bc = const_CAST_bitset(c); uint32_t wordindex = it->index / 64; uint64_t word = bc->words[wordindex] & (UINT64_MAX << (it->index % 64)); do { // Read set bits. while (word != 0 && *consumed < count) { *buf = high48 | (wordindex * 64 + roaring_trailing_zeroes(word)); word = word & (word - 1); buf++; (*consumed)++; } // Skip unset bits. while (word == 0 && wordindex + 1 < BITSET_CONTAINER_SIZE_IN_WORDS) { wordindex++; word = bc->words[wordindex]; } } while (word != 0 && *consumed < count); if (word != 0) { it->index = wordindex * 64 + roaring_trailing_zeroes(word); *value_out = it->index; return true; } return false; } case ARRAY_CONTAINER_TYPE: { const array_container_t *ac = const_CAST_array(c); uint32_t num_values = minimum_uint32(ac->cardinality - it->index, count); for (uint32_t i = 0; i < num_values; i++) { buf[i] = high48 | ac->array[it->index + i]; } *consumed += num_values; it->index += num_values; if (it->index < ac->cardinality) { *value_out = ac->array[it->index]; return true; } return false; } case RUN_CONTAINER_TYPE: { const run_container_t *rc = const_CAST_run(c); do { uint32_t largest_run_value = rc->runs[it->index].value + rc->runs[it->index].length; uint32_t num_values = minimum_uint32( largest_run_value - *value_out + 1, count - *consumed); for (uint32_t i = 0; i < num_values; i++) { buf[i] = high48 | (*value_out + i); } *value_out += num_values; buf += num_values; *consumed += num_values; // We check for `value == 0` because `it->value += num_values` // can overflow when `value == UINT16_MAX`, and `count > // length`. In this case `value` will overflow to 0. if (*value_out > largest_run_value || *value_out == 0) { it->index++; if (it->index < rc->n_runs) { *value_out = rc->runs[it->index].value; } else { return false; } } } while (*consumed < count); return true; } default: assert(false); roaring_unreachable; return 0; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx movq 0x30(%rsp), %r14 andl $0x0, (%r14) testl %r9d, %r9d je 0xe9f7 movq %rcx, %r10 movq %rdx, %r11 movq 0x38(%rsp), %rbx cmpb $0x1, %sil je 0xe8d7 movzbl %sil, %eax cmpl $0x2, %eax jne 0xe971 movl (%rdi), %eax movl (%r11), %ecx subl %ecx, %eax cmpl %r9d, %eax cmovael %r9d, %eax xorl %edx, %edx cmpq %rdx, %rax je 0xe8b7 movq 0x8(%rdi), %rsi leal (%rcx,%rdx), %r9d movzwl (%rsi,%r9,2), %esi orq %r10, %rsi movq %rsi, (%r8,%rdx,8) incq %rdx jmp 0xe899 movl %eax, (%r14) addl (%r11), %eax movl %eax, (%r11) cmpl (%rdi), %eax jge 0xe9f7 movq 0x8(%rdi), %rcx cltq movzwl (%rcx,%rax,2), %ebp jmp 0xe969 movl (%r11), %eax movl $0x40, %ecx cltd idivl %ecx movl %edx, %ecx movq 0x8(%rdi), %rdx movq (%rdx,%rax,8), %rsi shrq %cl, %rsi shlq %cl, %rsi xorl %ecx, %ecx movl %eax, %edi shll $0x6, %edi testq %rsi, %rsi je 0xe922 cmpl %r9d, %ecx jae 0xe922 bsfq %rsi, %r15 orl %edi, %r15d orq %r10, %r15 movq %r15, (%r8) leaq -0x1(%rsi), %r15 andq %r15, %rsi addq $0x8, %r8 incl %ecx movl %ecx, (%r14) jmp 0xe8f9 movl %eax, %ebp andl $0x3ff, %ebp # imm = 0x3FF shll $0x6, %ebp leal 0x1(%rax), %r15d testq %rsi, %rsi jne 0xe950 cmpl $0x3ff, %r15d # imm = 0x3FF ja 0xe950 movq (%rdx,%r15,8), %rsi addl $0x40, %edi addl $0x40, %ebp incq %r15 incl %eax jmp 0xe931 testq %rsi, %rsi je 0xe9f7 cmpl %r9d, %ecx jb 0xe8f4 bsfq %rsi, %rax addl %eax, %edi movl %edi, (%r11) addl %eax, %ebp movw %bp, (%rbx) jmp 0xe9f3 movq 0x8(%rdi), %rax movzwl (%rbx), %ecx xorl %edx, %edx movslq (%r11), %rsi movzwl (%rax,%rsi,4), %r15d movzwl 0x2(%rax,%rsi,4), %ebp addl %r15d, %ebp movzwl %cx, %ecx movl %ebp, %r15d subl %ecx, %r15d incl %r15d movl %r9d, %esi subl %edx, %esi cmpl %esi, %r15d cmovbl %r15d, %esi xorl %r15d, %r15d cmpq %r15, %rsi je 0xe9ba leal (%rcx,%r15), %r12d orq %r10, %r12 movq %r12, (%r8,%r15,8) incq %r15 jmp 0xe9a5 addl %esi, %ecx movw %cx, (%rbx) addl %esi, %edx movl %edx, (%r14) testw %cx, %cx je 0xe9d2 movzwl %cx, %r15d cmpl %r15d, %ebp jae 0xe9ea movslq (%r11), %rcx leal 0x1(%rcx), %edx movl %edx, (%r11) cmpl (%rdi), %edx jge 0xe9f7 movzwl 0x4(%rax,%rcx,4), %ecx movw %cx, (%rbx) movl (%r14), %edx leaq (%r8,%rsi,8), %r8 cmpl %r9d, %edx jb 0xe97a movb $0x1, %al jmp 0xe9f9 xorl %eax, %eax popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq
/RoaringBitmap[P]CRoaring/src/containers/containers.c
is_complete(char const*, char const*, unsigned long, int*)
static const char *is_complete(const char *buf, const char *buf_end, size_t last_len, int *ret) { int ret_cnt = 0; buf = last_len < 3 ? buf : buf + last_len - 3; while (1) { CHECK_EOF(); if (*buf == '\015') { ++buf; CHECK_EOF(); EXPECT_CHAR('\012'); ++ret_cnt; } else if (*buf == '\012') { ++buf; ++ret_cnt; } else { ++buf; ret_cnt = 0; } if (ret_cnt == 2) { return buf; } } *ret = -2; return NULL; }
cmpq $0x3, %rdx leaq -0x3(%rdi,%rdx), %r9 cmovbq %rdi, %r9 xorl %r8d, %r8d pushq $-0x2 popq %rdx pushq $-0x1 popq %rdi cmpq %rsi, %r9 je 0x7808 movzbl (%r9), %r11d leaq 0x1(%r9), %rax cmpl $0xa, %r11d je 0x77f3 xorl %r10d, %r10d cmpl $0xd, %r11d jne 0x77f9 cmpq %rsi, %rax je 0x7808 cmpb $0xa, (%rax) jne 0x7806 addq $0x2, %r9 incl %r8d movq %r9, %rax jmp 0x77f6 incl %r8d movl %r8d, %r10d movq %rax, %r9 movl %r10d, %r8d cmpl $0x2, %r10d jne 0x77c1 retq movl %edi, %edx movl %edx, (%rcx) xorl %eax, %eax retq
/mattn[P]clask/clask/../clask/picohttpparser.c
phr_parse_response
int phr_parse_response(const char *buf_start, size_t len, int *minor_version, int *status, const char **msg, size_t *msg_len, struct phr_header *headers, size_t *num_headers, size_t last_len) { const char *buf = buf_start, *buf_end = buf + len; size_t max_headers = *num_headers; int r; *minor_version = -1; *status = 0; *msg = NULL; *msg_len = 0; *num_headers = 0; /* if last_len != 0, check if the response is complete (a fast countermeasure against slowloris */ if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) { return r; } if ((buf = parse_response(buf, buf_end, minor_version, status, msg, msg_len, headers, num_headers, max_headers, &r)) == NULL) { return r; } return (int)(buf - buf_start); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %r9, %r12 movq %r8, %r13 movq %rcx, %rbp movq %rdx, %r15 movq %rsi, %r14 movq 0x58(%rsp), %rax movq (%rax), %rcx movq %rcx, 0x10(%rsp) orl $-0x1, (%rdx) movq %rdi, %rbx andl $0x0, (%rbp) movq 0x60(%rsp), %rdx andq $0x0, (%r8) addq %rdi, %r14 andq $0x0, (%r9) andq $0x0, (%rax) testq %rdx, %rdx je 0x7873 leaq 0xc(%rsp), %rcx movq %rbx, %rdi movq %r14, %rsi callq 0x77ab testq %rax, %rax je 0x7901 leaq 0xc(%rsp), %rcx movq %rbx, %rdi movq %r14, %rsi movq %r15, %rdx callq 0x8174 testq %rax, %rax je 0x7901 cmpb $0x20, (%rax) jne 0x78fc movq 0x50(%rsp), %r15 movq %r14, %rcx subq %rax, %rcx incq %rax cmpq %r14, %rax je 0x78b6 movb (%rax), %dl incq %rax decq %rcx cmpb $0x20, %dl je 0x789e cmpq $0x3, %rcx jg 0x78c0 movl $0xfffffffe, 0xc(%rsp) # imm = 0xFFFFFFFE jmp 0x7901 leal -0x3a(%rdx), %ecx cmpb $-0xb, %cl jbe 0x78fc addb $-0x30, %dl movzbl %dl, %ecx imull $0x64, %ecx, %ecx movl %ecx, (%rbp) movb (%rax), %dl leal -0x3a(%rdx), %esi cmpb $-0xb, %sil jbe 0x78fc movzbl %dl, %edx imull $0xa, %edx, %edx addb $0x20, %dl movzbl %dl, %edx addl %edx, %ecx movl %ecx, (%rbp) movb 0x1(%rax), %dl leal -0x3a(%rdx), %esi cmpb $-0xb, %sil ja 0x7914 orl $-0x1, 0xc(%rsp) movl 0xc(%rsp), %eax addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq addq $0x2, %rax addb $-0x30, %dl movzbl %dl, %edx addl %edx, %ecx movl %ecx, (%rbp) leaq 0xc(%rsp), %r8 movq %rax, %rdi movq %r14, %rsi movq %r13, %rdx movq %r12, %rcx callq 0x81d3 testq %rax, %rax je 0x7901 movq (%r12), %rcx testq %rcx, %rcx je 0x796a movq (%r13), %rdx cmpb $0x20, (%rdx) jne 0x78fc decq %rcx incq %rdx movq %rdx, (%r13) movq %rcx, (%r12) decq %rcx cmpb $0x20, (%rdx) leaq 0x1(%rdx), %rdx je 0x7956 leaq 0xc(%rsp), %r9 movq %rax, %rdi movq %r14, %rsi movq %r15, %rdx movq 0x58(%rsp), %rcx movq 0x10(%rsp), %r8 callq 0x7a0b testq %rax, %rax je 0x7901 subl %ebx, %eax jmp 0x7905
/mattn[P]clask/clask/../clask/picohttpparser.c
parse_headers(char const*, char const*, phr_header*, unsigned long*, unsigned long, int*)
static const char *parse_headers(const char *buf, const char *buf_end, struct phr_header *headers, size_t *num_headers, size_t max_headers, int *ret) { for (;; ++*num_headers) { CHECK_EOF(); if (*buf == '\015') { ++buf; EXPECT_CHAR('\012'); break; } else if (*buf == '\012') { ++buf; break; } if (*num_headers == max_headers) { *ret = -1; return NULL; } if (!(*num_headers != 0 && (*buf == ' ' || *buf == '\t'))) { /* parsing name, but do not discard SP before colon, see * http://www.mozilla.org/security/announce/2006/mfsa2006-33.html */ headers[*num_headers].name = buf; static const char ALIGNED(16) ranges1[] = "\x00 " /* control chars and up to SP */ "\"\"" /* 0x22 */ "()" /* 0x28,0x29 */ ",," /* 0x2c */ "//" /* 0x2f */ ":@" /* 0x3a-0x40 */ "[]" /* 0x5b-0x5d */ "{\377"; /* 0x7b-0xff */ int found; buf = findchar_fast(buf, buf_end, ranges1, sizeof(ranges1) - 1, &found); if (!found) { CHECK_EOF(); } while (1) { if (*buf == ':') { break; } else if (!token_char_map[(unsigned char)*buf]) { *ret = -1; return NULL; } ++buf; CHECK_EOF(); } if ((headers[*num_headers].name_len = buf - headers[*num_headers].name) == 0) { *ret = -1; return NULL; } ++buf; for (;; ++buf) { CHECK_EOF(); if (!(*buf == ' ' || *buf == '\t')) { break; } } } else { headers[*num_headers].name = NULL; headers[*num_headers].name_len = 0; } const char *value; size_t value_len; if ((buf = get_token_to_eol(buf, buf_end, &value, &value_len, ret)) == NULL) { return NULL; } /* remove trailing SPs and HTABs */ const char *value_end = value + value_len; for (; value_end != value; --value_end) { const char c = *(value_end - 1); if (!(c == ' ' || c == '\t')) { break; } } headers[*num_headers].value = value; headers[*num_headers].value_len = value_end - value; } return buf; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rdx, %rbx movq %rsi, %r15 movq %rdi, %rax leaq 0x105f3(%rip), %rbp # 0x1801c leaq 0x20(%rsp), %rdx leaq 0x18(%rsp), %r14 xorps %xmm0, %xmm0 cmpq %r15, %rax je 0x7b92 movzbl (%rax), %esi cmpl $0xa, %esi je 0x7b78 cmpl $0xd, %esi je 0x7b7d movq (%rcx), %rdi cmpq %r8, %rdi je 0x7b9b testq %rdi, %rdi je 0x7ae4 cmpl $0x9, %esi je 0x7a6f cmpl $0x20, %esi jne 0x7ae4 movq %rcx, 0x10(%rsp) movq %r8, 0x8(%rsp) shlq $0x5, %rdi movq %rbx, %r13 movups %xmm0, (%rbx,%rdi) movq %rax, %rdi movq %r15, %rsi movq %rdx, %rbx movq %r14, %r12 movq %r14, %rcx movq %r9, %r14 movq %r9, %r8 callq 0x81d3 testq %rax, %rax je 0x7b9f movq 0x20(%rsp), %r10 movq 0x18(%rsp), %rdi movq %rbx, %rdx movq %r14, %r9 movq 0x8(%rsp), %r8 movq 0x10(%rsp), %rcx movq %r13, %rbx movq %r12, %r14 xorps %xmm0, %xmm0 testq %rdi, %rdi je 0x7b1e movzbl -0x1(%r10,%rdi), %esi cmpl $0x20, %esi je 0x7adf cmpl $0x9, %esi jne 0x7b44 decq %rdi jmp 0x7aca shlq $0x5, %rdi leaq (%rbx,%rdi), %r10 movq %rax, (%rbx,%rdi) leaq 0x1(%rax), %r11 xorl %esi, %esi movzbl (%rax,%rsi), %edi cmpq $0x3a, %rdi je 0x7b23 cmpb $0x0, (%rdi,%rbp) je 0x7b9b leaq (%rax,%rsi), %rdi incq %rdi incq %rsi incq %r11 cmpq %r15, %rdi jne 0x7af6 jmp 0x7b92 movq %r10, %rdi jmp 0x7b47 movq %rsi, 0x8(%r10) testq %rsi, %rsi je 0x7b9b cmpq %r15, %r11 je 0x7b92 movzbl (%r11), %eax cmpl $0x20, %eax je 0x7b3f cmpl $0x9, %eax jne 0x7b63 incq %r11 jmp 0x7b2c addq %r10, %rdi movq (%rcx), %rsi shlq $0x5, %rsi movq %r10, 0x10(%rbx,%rsi) subq %r10, %rdi movq %rdi, 0x18(%rbx,%rsi) incq (%rcx) jmp 0x7a36 movq %rbx, %r13 movq %rcx, 0x10(%rsp) movq %r8, 0x8(%rsp) movq %r11, %rax jmp 0x7a84 incq %rax jmp 0x7ba1 leaq 0x1(%rax), %rcx cmpq %r15, %rcx je 0x7b92 cmpb $0xa, 0x1(%rax) jne 0x7b9b addq $0x2, %rax jmp 0x7ba1 movl $0xfffffffe, (%r9) # imm = 0xFFFFFFFE jmp 0x7b9f orl $-0x1, (%r9) xorl %eax, %eax addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/mattn[P]clask/clask/../clask/picohttpparser.c
main
int main(int argc, char* argv[]) { argparse::ArgumentParser program("file", CLASK_VERSION); program.add_argument("-dir") .default_value("./public") .help("public directory") .metavar("PUBLIC_DIR") .nargs(1); program.add_argument("-addr") .default_value("0.0.0.0:8080") .help("server address") .metavar("ADDR") .nargs(1); program.parse_args(argc, argv); auto s = clask::server(); s.log.default_level = clask::log_level::INFO; s.static_dir("/", program.get<std::string>("-dir")); auto addr = program.get<std::string>("-addr"); std::cerr << "started " << addr << std::endl; s.run(addr); }
pushq %rbp pushq %r14 pushq %rbx subq $0x5e0, %rsp # imm = 0x5E0 movq %rsi, %rbx movl %edi, %ebp leaq 0x1088b(%rip), %rsi # 0x18689 leaq 0xf0(%rsp), %rdi leaq 0x2f8(%rsp), %rdx callq 0x88b6 leaq 0x10874(%rip), %rsi # 0x1868e leaq 0xd0(%rsp), %rdi leaq 0x10(%rsp), %rdx callq 0x88b6 movq 0x1d165(%rip), %r9 # 0x24f98 leaq 0x110(%rsp), %rdi leaq 0xf0(%rsp), %rsi leaq 0xd0(%rsp), %rdx pushq $0x3 popq %rcx pushq $0x1 popq %r8 callq 0x88f2 leaq 0xd0(%rsp), %rdi callq 0x6958 leaq 0xf0(%rsp), %rdi callq 0x6958 leaq 0x1081c(%rip), %rsi # 0x18694 leaq 0x110(%rsp), %rdi callq 0x8d06 leaq 0x1080d(%rip), %rsi # 0x18699 movq %rax, %rdi callq 0x8d8a movq %rax, %r14 leaq 0x10804(%rip), %rsi # 0x186a2 leaq 0xb0(%rsp), %rdi leaq 0x2f8(%rsp), %rdx callq 0x88b6 leaq 0x28(%r14), %rdi leaq 0xb0(%rsp), %rsi callq 0x6640 leaq 0x107e8(%rip), %rsi # 0x186b3 leaq 0x90(%rsp), %rdi leaq 0x10(%rsp), %rdx callq 0x88b6 movq %r14, %rdi addq $0x48, %rdi leaq 0x90(%rsp), %rsi callq 0x6640 pushq $0x1 popq %rsi movq %r14, %rdi callq 0x8dde leaq 0x90(%rsp), %rdi callq 0x6958 leaq 0xb0(%rsp), %rdi callq 0x6958 leaq 0x107a1(%rip), %rsi # 0x186be leaq 0x110(%rsp), %rdi callq 0x8d06 leaq 0x10793(%rip), %rsi # 0x186c4 movq %rax, %rdi callq 0x8d8a movq %rax, %r14 leaq 0x1078e(%rip), %rsi # 0x186d1 leaq 0x70(%rsp), %rdi leaq 0x2f8(%rsp), %rdx callq 0x88b6 leaq 0x28(%r14), %rdi leaq 0x70(%rsp), %rsi callq 0x6640 leaq 0x10776(%rip), %rsi # 0x186e0 leaq 0x50(%rsp), %rdi leaq 0x10(%rsp), %rdx callq 0x88b6 movq %r14, %rdi addq $0x48, %rdi leaq 0x50(%rsp), %rsi callq 0x6640 pushq $0x1 popq %rsi movq %r14, %rdi callq 0x8dde leaq 0x50(%rsp), %rdi callq 0x6958 leaq 0x70(%rsp), %rdi callq 0x6958 leaq 0x110(%rsp), %rdi movl %ebp, %esi movq %rbx, %rdx callq 0x8e0e leaq 0x2f8(%rsp), %rdi callq 0xfc26 movl $0x2, 0x1d4e6(%rip) # 0x254b8 leaq 0x1070c(%rip), %rsi # 0x186e5 leaq 0x10(%rsp), %rdi leaq 0xf(%rsp), %rdx callq 0x88b6 leaq 0x106a5(%rip), %rcx # 0x18694 leaq 0x30(%rsp), %rdi leaq 0x110(%rsp), %rsi pushq $0x4 popq %rdx callq 0x8f1e leaq 0x2f8(%rsp), %rdi leaq 0x10(%rsp), %rsi leaq 0x30(%rsp), %rdx xorl %ecx, %ecx callq 0x8e6a leaq 0x30(%rsp), %rdi callq 0x6958 leaq 0x10(%rsp), %rdi callq 0x6958 leaq 0x10686(%rip), %rcx # 0x186be leaq 0x10(%rsp), %rdi leaq 0x110(%rsp), %rsi pushq $0x5 popq %rdx callq 0x8f1e movq 0x1cf94(%rip), %rdi # 0x24fe8 leaq 0x1068c(%rip), %rsi # 0x186e7 callq 0x6550 leaq 0x10(%rsp), %rsi movq %rax, %rdi callq 0x6510 movq %rax, %rdi callq 0x6290 leaq 0x2f8(%rsp), %rdi leaq 0x10(%rsp), %rsi callq 0x8f96 leaq 0x10(%rsp), %rdi callq 0x6958 leaq 0x2f8(%rsp), %rdi callq 0x9088 leaq 0x110(%rsp), %rdi callq 0x90b6 xorl %eax, %eax addq $0x5e0, %rsp # imm = 0x5E0 popq %rbx popq %r14 popq %rbp retq jmp 0x80cc movq %rax, %rbx leaq 0x30(%rsp), %rdi callq 0x6958 jmp 0x8143 jmp 0x8140 movq %rax, %rbx jmp 0x814d jmp 0x815c movq %rax, %rbx leaq 0x50(%rsp), %rdi callq 0x6958 jmp 0x80e8 movq %rax, %rbx leaq 0x70(%rsp), %rdi jmp 0x810e jmp 0x815c movq %rax, %rbx leaq 0x90(%rsp), %rdi callq 0x6958 jmp 0x8106 movq %rax, %rbx leaq 0xb0(%rsp), %rdi callq 0x6958 jmp 0x815f jmp 0x815c movq %rax, %rbx leaq 0xd0(%rsp), %rdi callq 0x6958 jmp 0x812c movq %rax, %rbx leaq 0xf0(%rsp), %rdi callq 0x6958 jmp 0x816c movq %rax, %rbx jmp 0x816c movq %rax, %rbx leaq 0x10(%rsp), %rdi callq 0x6958 leaq 0x2f8(%rsp), %rdi callq 0x9088 jmp 0x815f movq %rax, %rbx leaq 0x110(%rsp), %rdi callq 0x90b6 movq %rbx, %rdi callq 0x6840
/mattn[P]clask/example/file/main.cxx
clask::server_t::run(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
inline void server_t::run(const std::string& addr) { auto pos = addr.find_last_of(':'); if (pos == std::string::npos) { throw std::runtime_error("invalid host:port"); } auto host = addr.substr(0, pos); auto port = std::stoi(addr.substr(pos + 1)); _run(host, port); }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x48, %rsp movq %rsi, %r14 movq %rdi, %rbx pushq $0x3a popq %rsi pushq $-0x1 popq %rdx movq %r14, %rdi callq 0x6930 cmpq $-0x1, %rax je 0x9024 movq %rax, %r15 leaq 0x28(%rsp), %rdi movq %r14, %rsi xorl %edx, %edx movq %rax, %rcx callq 0x6590 incq %r15 leaq 0x8(%rsp), %rdi pushq $-0x1 popq %rcx movq %r14, %rsi movq %r15, %rdx callq 0x6590 leaq 0x8(%rsp), %rdi pushq $0xa popq %rdx xorl %esi, %esi callq 0x1300c movl %eax, %ebp leaq 0x8(%rsp), %rdi callq 0x6958 leaq 0x28(%rsp), %rsi movq %rbx, %rdi movl %ebp, %edx callq 0x13028 leaq 0x28(%rsp), %rdi callq 0x6958 addq $0x48, %rsp popq %rbx popq %r14 popq %r15 popq %rbp retq pushq $0x10 popq %rdi callq 0x62a0 movq %rax, %r14 leaq 0xfc0a(%rip), %rsi # 0x18c40 movq %rax, %rdi callq 0x61f0 movq 0x1bfab(%rip), %rsi # 0x24ff0 movq 0x1bf1c(%rip), %rdx # 0x24f68 movq %r14, %rdi callq 0x6810 movq %rax, %rbx movq %r14, %rdi callq 0x6420 jmp 0x907f jmp 0x9072 movq %rax, %rbx leaq 0x8(%rsp), %rdi callq 0x6958 jmp 0x9075 movq %rax, %rbx leaq 0x28(%rsp), %rdi callq 0x6958 movq %rbx, %rdi callq 0x6840 nop
/mattn[P]clask/clask/../clask/core.hpp
argparse::Argument::is_positional(std::basic_string_view<char, std::char_traits<char>>, std::basic_string_view<char, std::char_traits<char>>)
static bool is_positional(std::string_view name, std::string_view prefix_chars) { auto first = lookahead(name); if (first == eof) { return true; } else if (prefix_chars.find(static_cast<char>(first)) != std::string_view::npos) { name.remove_prefix(1); if (name.empty()) { return true; } return is_decimal_literal(name); } return true; }
pushq %r15 pushq %r14 pushq %rbx subq $0x10, %rsp movq %rdx, (%rsp) movq %rcx, 0x8(%rsp) movb $0x1, %bl testq %rdi, %rdi je 0x9cc9 movq %rsi, %r14 movq %rdi, %r15 movsbl (%rsi), %esi movq %rsp, %rdi xorl %edx, %edx callq 0x9cd6 cmpq $-0x1, %rax je 0x9cc9 decq %r15 je 0x9cc9 incq %r14 movq %r15, %rdi movq %r14, %rsi callq 0x9d2c movl %eax, %ebx movl %ebx, %eax addq $0x10, %rsp popq %rbx popq %r14 popq %r15 retq nop
/mattn[P]clask/example/file/./argparse/argparse.hpp
argparse::Argument::throw_required_arg_no_value_provided_error() const
void throw_required_arg_no_value_provided_error() const { std::stringstream stream; stream << m_used_name << ": no value provided."; throw std::runtime_error(stream.str()); }
pushq %rbp pushq %r14 pushq %rbx subq $0x1b0, %rsp # imm = 0x1B0 movq %rdi, %rbx leaq 0x28(%rsp), %rdi callq 0x63e0 leaq 0x38(%rsp), %rdi movq 0x18(%rbx), %rdx movq 0x20(%rbx), %rsi callq 0x65f0 leaq 0x9662(%rip), %rsi # 0x18aa7 movq %rax, %rdi callq 0x6550 pushq $0x10 popq %rdi callq 0x62a0 movq %rax, %rbx leaq 0x40(%rsp), %rsi leaq 0x8(%rsp), %rdi callq 0x6790 movb $0x1, %bpl leaq 0x8(%rsp), %rsi movq %rbx, %rdi callq 0x67b0 xorl %ebp, %ebp movq 0x15b70(%rip), %rsi # 0x24ff0 movq 0x15ae1(%rip), %rdx # 0x24f68 movq %rbx, %rdi callq 0x6810 movq %rax, %r14 leaq 0x8(%rsp), %rdi callq 0x6958 testb %bpl, %bpl jne 0xf4a6 jmp 0xf4b3 movq %rax, %r14 movq %rbx, %rdi callq 0x6420 jmp 0xf4b3 movq %rax, %r14 leaq 0x28(%rsp), %rdi callq 0x6440 movq %r14, %rdi callq 0x6840 nop
/mattn[P]clask/example/file/./argparse/argparse.hpp
clask::url_encode(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, bool)
inline std::string url_encode(const std::string &value, bool escape_slash = true) { std::ostringstream os; os.fill('0'); os << std::hex; for (char c : value) { if (isalnum(c) || c == '-' || c == '_' || c == '.' || c == '~') { os << c; continue; } if (!escape_slash && c == '/') { os << c; continue; } os << std::uppercase; os << '%' << std::setw(2) << int((unsigned char) c); os << std::nouppercase; } return os.str(); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x188, %rsp # imm = 0x188 movl %edx, 0x4(%rsp) movq %rsi, %r15 movq %rdi, %rbx leaq 0x10(%rsp), %r14 movq %r14, %rdi callq 0x66a0 movq (%r14), %rax addq -0x18(%rax), %r14 movq %r14, %rdi callq 0x62e0 movq %rbx, 0x8(%rsp) movb $0x30, 0xe0(%r14) leaq 0x10(%rsp), %r14 movq (%r14), %rax movq -0x18(%rax), %rax movl 0x28(%rsp,%rax), %ecx andl $-0x4b, %ecx orl $0x8, %ecx movl %ecx, 0x28(%rsp,%rax) movq (%r15), %r13 movq 0x8(%r15), %r15 xorl %ebx, %ebx cmpq %rbx, %r15 je 0x1220e movb (%r13,%rbx), %bpl movsbl %bpl, %edi callq 0x6580 testl %eax, %eax je 0x1219a movsbl %bpl, %esi movq %r14, %rdi callq 0x6620 incq %rbx jmp 0x1216e movzbl %bpl, %r12d leal -0x2d(%r12), %eax cmpl $0x2, %eax jb 0x12189 cmpl $0x5f, %r12d je 0x12189 cmpl $0x7e, %r12d je 0x12189 cmpb $0x2f, %bpl setne %al orb 0x4(%rsp), %al movb $0x2f, %bpl je 0x12189 movq 0x10(%rsp), %rax movq -0x18(%rax), %rax orl $0x4000, 0x28(%rsp,%rax) # imm = 0x4000 movq %r14, %rdi pushq $0x25 popq %rsi callq 0x6620 movq (%rax), %rcx movq -0x18(%rcx), %rcx movq $0x2, 0x10(%rax,%rcx) movq %rax, %rdi movl %r12d, %esi callq 0x6820 movq 0x10(%rsp), %rax movq -0x18(%rax), %rax andl $0xffffbfff, 0x28(%rsp,%rax) # imm = 0xFFFFBFFF jmp 0x12195 leaq 0x18(%rsp), %rsi movq 0x8(%rsp), %rbx movq %rbx, %rdi callq 0x6790 leaq 0x10(%rsp), %rdi callq 0x6100 movq %rbx, %rax addq $0x188, %rsp # imm = 0x188 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq jmp 0x12241 movq %rax, %rbx leaq 0x10(%rsp), %rdi callq 0x6100 movq %rbx, %rdi callq 0x6840
/mattn[P]clask/clask/../clask/core.hpp
argparse::Argument::Argument<1ul, 0ul>(std::basic_string_view<char, std::char_traits<char>>, std::array<std::basic_string_view<char, std::char_traits<char>>, 1ul>&&, std::integer_sequence<unsigned long, 0ul>)
explicit Argument(std::string_view prefix_chars, std::array<std::string_view, N> &&a, std::index_sequence<I...> /*unused*/) : m_accepts_optional_like_value(false), m_is_optional((is_optional(a[I], prefix_chars) || ...)), m_is_required(false), m_is_repeatable(false), m_is_used(false), m_prefix_chars(prefix_chars) { ((void)m_names.emplace_back(a[I]), ...); std::sort( m_names.begin(), m_names.end(), [](const auto &lhs, const auto &rhs) { return lhs.size() == rhs.size() ? lhs < rhs : lhs.size() < rhs.size(); }); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rcx, %rbp movq %rdx, %r14 movq %rdi, %rbx leaq 0x38(%rdi), %rax xorps %xmm0, %xmm0 movups %xmm0, 0x10(%rdi) movups %xmm0, (%rdi) andq $0x0, 0x20(%rdi) movq %rax, 0x28(%rdi) andq $0x0, 0x30(%rdi) movq %rsi, %r12 xorl %eax, %eax movb %al, 0x38(%rdi) leaq 0x58(%rdi), %rcx movq %rcx, 0x48(%rdi) andq $0x0, 0x50(%rdi) leaq 0x28(%rdi), %rcx movq %rcx, 0x20(%rsp) leaq 0x48(%rdi), %rcx movq %rcx, 0x18(%rsp) movb %al, 0x58(%rdi) leaq 0x68(%rdi), %rcx movq %rcx, 0x10(%rsp) movups %xmm0, 0x68(%rdi) leaq 0x78(%rdi), %rcx movq %rcx, 0x8(%rsp) leaq 0x88(%rdi), %rcx movq %rcx, 0x78(%rdi) andq $0x0, 0x80(%rdi) movb %al, 0x88(%rdi) movb %al, 0xb8(%rdi) movups %xmm0, 0xc0(%rdi) movb %al, 0xe8(%rdi) movups %xmm0, 0xf0(%rdi) leaq -0xcd2d(%rip), %rcx # 0x9ad2 movq %rcx, 0x108(%rdi) leaq -0xcd2d(%rip), %rcx # 0x9ae0 movq %rcx, 0x100(%rdi) movb %al, 0x110(%rdi) andq $0x0, 0x128(%rdi) leaq 0xc0(%rdi), %rax movq %rax, (%rsp) movups %xmm0, 0x118(%rdi) pushq $0x1 popq %rax movq %rax, 0x130(%rdi) movq %rax, 0x138(%rdi) andb $-0x2, 0x140(%rdi) leaq 0xf0(%rdi), %r13 leaq 0x118(%rdi), %r15 movq (%rbp), %rdi movq 0x8(%rbp), %rsi movq %r12, %rdx movq %r14, %rcx callq 0x9a3a movb 0x140(%rbx), %cl addb %al, %al andb $-0x1f, %cl orb %al, %cl movb %cl, 0x140(%rbx) movq %r12, 0x148(%rbx) movq %r14, 0x150(%rbx) movq %rbx, %rdi movq %rbp, %rsi callq 0x9a44 movq (%rbx), %rdi movq 0x8(%rbx), %rsi callq 0x16926 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %r14 leaq 0xd0(%rbx), %r12 leaq 0x98(%rbx), %rbp movq %r15, %rdi callq 0x9a7e movq %r13, %rdi callq 0x9aa0 movq %r12, %rdi callq 0xaae6 movq (%rsp), %rdi callq 0xc032 movq %rbp, %rdi callq 0xaafc movq 0x8(%rsp), %rdi callq 0x6958 movq 0x10(%rsp), %rdi callq 0xc032 movq 0x18(%rsp), %rdi callq 0x6958 movq 0x20(%rsp), %rdi callq 0x6958 movq %rbx, %rdi callq 0x9ab0 movq %r14, %rdi callq 0x6840
/mattn[P]clask/example/file/./argparse/argparse.hpp
argparse::ArgumentParser::operator[](std::basic_string_view<char, std::char_traits<char>>) const
Argument &operator[](std::string_view arg_name) const { auto it = m_argument_map.find(arg_name); if (it != m_argument_map.end()) { return *(it->second); } if (!is_valid_prefix_char(arg_name.front())) { std::string name(arg_name); const auto legal_prefix_char = get_any_valid_prefix_char(); const auto prefix = std::string(1, legal_prefix_char); // "-" + arg_name name = prefix + name; it = m_argument_map.find(name); if (it != m_argument_map.end()) { return *(it->second); } // "--" + arg_name name = prefix + name; it = m_argument_map.find(name); if (it != m_argument_map.end()) { return *(it->second); } } throw std::logic_error("No such argument: " + std::string(arg_name)); }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x70, %rsp movq %rdi, %r15 leaq 0x40(%rsp), %rax movq %rsi, (%rax) movq %rdx, 0x8(%rax) leaq 0x100(%rdi), %r14 movq %r14, %rdi movq %rax, %rsi callq 0xd8b6 movq %rax, %rbx leaq 0x108(%r15), %rax cmpq %rax, %rbx je 0x17119 movq 0x30(%rbx), %rbx addq $0x10, %rbx jmp 0x17224 movq 0x48(%rsp), %rax movsbl (%rax), %esi movq %r15, %rdi callq 0xd7f8 testb %al, %al jne 0x17239 leaq 0x20(%rsp), %rdi leaq 0x40(%rsp), %rsi movq %rsp, %rdx callq 0xa0c0 movq 0x88(%r15), %rax movsbl (%rax), %edx leaq 0x10(%rsp), %rax movq %rax, -0x10(%rax) movq %rsp, %rdi pushq $0x1 popq %rsi callq 0x6610 leaq 0x50(%rsp), %rdi movq %rsp, %rsi leaq 0x20(%rsp), %rdx callq 0x173c5 leaq 0x20(%rsp), %r15 leaq 0x50(%rsp), %r12 movq %r15, %rdi movq %r12, %rsi callq 0x6640 movq %r12, %rdi callq 0x6958 movq (%r15), %rax movq 0x8(%r15), %rcx movq %rcx, (%r12) movq %rax, 0x8(%r12) leaq 0x50(%rsp), %rsi movq %r14, %rdi callq 0xd8b6 cmpq %rbx, %rax jne 0x17203 leaq 0x50(%rsp), %rdi movq %rsp, %rsi leaq 0x20(%rsp), %rdx callq 0x173c5 leaq 0x20(%rsp), %r15 leaq 0x50(%rsp), %r12 movq %r15, %rdi movq %r12, %rsi callq 0x6640 movq %r12, %rdi callq 0x6958 movq (%r15), %rax movq 0x8(%r15), %rcx movq %rcx, (%r12) movq %rax, 0x8(%r12) leaq 0x50(%rsp), %rsi movq %r14, %rdi callq 0xd8b6 cmpq %rbx, %rax je 0x17234 movq 0x30(%rax), %rbx addq $0x10, %rbx xorl %ebp, %ebp movq %rsp, %rdi callq 0x6958 leaq 0x20(%rsp), %rdi callq 0x6958 testb %bpl, %bpl jne 0x17239 movq %rbx, %rax addq $0x70, %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq movb $0x1, %bpl jmp 0x1720d pushq $0x10 popq %rdi callq 0x62a0 movq %rax, %rbx movq %rsp, %rdi leaq 0x40(%rsp), %rsi leaq 0x50(%rsp), %rdx callq 0xa0c0 leaq 0x1bf4(%rip), %rsi # 0x18e51 leaq 0x20(%rsp), %rdi movq %rsp, %rdx callq 0xd12c movb $0x1, %bpl leaq 0x20(%rsp), %rsi movq %rbx, %rdi callq 0x6740 xorl %ebp, %ebp movq 0xdd45(%rip), %rsi # 0x24fc8 movq 0xdcf6(%rip), %rdx # 0x24f80 movq %rbx, %rdi callq 0x6810 jmp 0x172bc jmp 0x172bc movq %rax, %r14 leaq 0x20(%rsp), %rdi callq 0x6958 jmp 0x172ab movq %rax, %r14 movb $0x1, %bpl movq %rsp, %rdi callq 0x6958 testb %bpl, %bpl jne 0x172cc jmp 0x172e8 jmp 0x172bc movq %rax, %r14 movq %rsp, %rdi callq 0x6958 jmp 0x172d9 movq %rax, %r14 movq %rbx, %rdi callq 0x6420 jmp 0x172e8 movq %rax, %r14 leaq 0x20(%rsp), %rdi callq 0x6958 jmp 0x172e8 movq %rax, %r14 movq %r14, %rdi callq 0x6840
/mattn[P]clask/example/file/./argparse/argparse.hpp
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> argparse::Argument::get<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>>() const
T get() const { if (!m_values.empty()) { if constexpr (details::IsContainer<T>) { return any_cast_container<T>(m_values); } else { return std::any_cast<T>(m_values.front()); } } if (m_default_value.has_value()) { return std::any_cast<T>(m_default_value); } if constexpr (details::IsContainer<T>) { if (!m_accepts_optional_like_value) { return any_cast_container<T>(m_values); } } throw std::logic_error("No value provided for '" + m_names.back() + "'."); }
pushq %rbp pushq %r14 pushq %rbx subq $0x40, %rsp movq %rsi, %r14 movq %rdi, %rbx movq 0x118(%rsi), %rsi cmpq 0x120(%r14), %rsi jne 0x1731c cmpq $0x0, 0x68(%r14) je 0x17330 addq $0x68, %r14 movq %r14, %rsi movq %rbx, %rdi callq 0x173fd movq %rbx, %rax addq $0x40, %rsp popq %rbx popq %r14 popq %rbp retq pushq $0x10 popq %rdi callq 0x62a0 movq %rax, %rbx movq 0x8(%r14), %rdx addq $-0x20, %rdx leaq 0x1b1a(%rip), %rsi # 0x18e64 movq %rsp, %rdi callq 0xd25f leaq 0x16e9(%rip), %rdx # 0x18a42 leaq 0x20(%rsp), %rdi movq %rsp, %rsi callq 0xd0a6 movb $0x1, %bpl leaq 0x20(%rsp), %rsi movq %rbx, %rdi callq 0x6740 xorl %ebp, %ebp movq 0xdc49(%rip), %rsi # 0x24fc8 movq 0xdbfa(%rip), %rdx # 0x24f80 movq %rbx, %rdi callq 0x6810 movq %rax, %r14 leaq 0x20(%rsp), %rdi callq 0x6958 jmp 0x173a3 movq %rax, %r14 movb $0x1, %bpl movq %rsp, %rdi callq 0x6958 testb %bpl, %bpl jne 0x173b5 jmp 0x173bd movq %rax, %r14 movq %rbx, %rdi callq 0x6420 movq %r14, %rdi callq 0x6840
/mattn[P]clask/example/file/./argparse/argparse.hpp
Shared_Multi_Field_constructors::test_method()
BOOST_AUTO_TEST_CASE(Shared_Multi_Field_constructors) { #ifdef PM_GMP_FOUND Shared_multi_field_element::initialize(5, 13); test_multi_field_constructors<Shared_multi_field_element>(); #endif Shared_multi_field_element_with_small_characteristics<>::initialize(5, 13); test_multi_field_constructors<Shared_multi_field_element_with_small_characteristics<> >(); }
pushq %rax movl $0x5, %edi movl $0xd, %esi callq 0x231ea popq %rax jmp 0x2341c
/GUDHI[P]gudhi-devel/src/Persistence_matrix/test/test_persistence_matrix_field.cpp
cmyk_ycck_convert
METHODDEF(void) cmyk_ycck_convert(j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) { my_cconvert_ptr cconvert = (my_cconvert_ptr)cinfo->cconvert; register int r, g, b; register JLONG *ctab = cconvert->rgb_ycc_tab; register JSAMPROW inptr; register JSAMPROW outptr0, outptr1, outptr2, outptr3; register JDIMENSION col; JDIMENSION num_cols = cinfo->image_width; while (--num_rows >= 0) { inptr = *input_buf++; outptr0 = output_buf[0][output_row]; outptr1 = output_buf[1][output_row]; outptr2 = output_buf[2][output_row]; outptr3 = output_buf[3][output_row]; output_row++; for (col = 0; col < num_cols; col++) { r = MAXJSAMPLE - inptr[0]; g = MAXJSAMPLE - inptr[1]; b = MAXJSAMPLE - inptr[2]; /* K passes through as-is */ outptr3[col] = inptr[3]; inptr += 4; /* If the inputs are 0..MAXJSAMPLE, the outputs of these equations * must be too; we do not need an explicit range-limiting operation. * Hence the value being shifted is never negative, and we don't * need the general RIGHT_SHIFT macro. */ /* Y */ outptr0[col] = (JSAMPLE)((ctab[r + R_Y_OFF] + ctab[g + G_Y_OFF] + ctab[b + B_Y_OFF]) >> SCALEBITS); /* Cb */ outptr1[col] = (JSAMPLE)((ctab[r + R_CB_OFF] + ctab[g + G_CB_OFF] + ctab[b + B_CB_OFF]) >> SCALEBITS); /* Cr */ outptr2[col] = (JSAMPLE)((ctab[r + R_CR_OFF] + ctab[g + G_CR_OFF] + ctab[b + B_CR_OFF]) >> SCALEBITS); } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movl %ecx, -0xc(%rsp) movq %rdx, -0x8(%rsp) testl %r8d, %r8d jle 0x7b1d movq 0x1d8(%rdi), %rax movq 0x10(%rax), %rax movl 0x30(%rdi), %ecx movl $0xff, %r9d testq %rcx, %rcx je 0x7b04 movl -0xc(%rsp), %r14d movq -0x8(%rsp), %rdx movq (%rdx), %r10 movq 0x8(%rdx), %r11 movq (%r10,%r14,8), %r10 movq (%r11,%r14,8), %r11 movq 0x10(%rdx), %rbx movq (%rbx,%r14,8), %rbx movq 0x18(%rdx), %r15 movq (%r15,%r14,8), %r14 movq (%rsi), %r15 xorl %r12d, %r12d movzbl (%r15,%r12,4), %r13d xorl %r9d, %r13d movzbl 0x1(%r15,%r12,4), %ebp xorl %r9d, %ebp movzbl 0x2(%r15,%r12,4), %edi xorl %r9d, %edi movb 0x3(%r15,%r12,4), %dl movb %dl, (%r14,%r12) movl 0x800(%rax,%rbp,8), %edx addl (%rax,%r13,8), %edx addl 0x1000(%rax,%rdi,8), %edx shrl $0x10, %edx movb %dl, (%r10,%r12) movl 0x2000(%rax,%rbp,8), %edx addl 0x1800(%rax,%r13,8), %edx addl 0x2800(%rax,%rdi,8), %edx shrl $0x10, %edx movb %dl, (%r11,%r12) movl 0x3000(%rax,%rbp,8), %edx addl 0x2800(%rax,%r13,8), %edx addl 0x3800(%rax,%rdi,8), %edx shrl $0x10, %edx movb %dl, (%rbx,%r12) incq %r12 cmpq %r12, %rcx jne 0x7a86 leal -0x1(%r8), %r10d addq $0x8, %rsi incl -0xc(%rsp) cmpl $0x2, %r8d movl %r10d, %r8d jge 0x7a4e popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jccolor.c
jinit_forward_dct
GLOBAL(void) jinit_forward_dct(j_compress_ptr cinfo) { my_fdct_ptr fdct; int i; fdct = (my_fdct_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(my_fdct_controller)); cinfo->fdct = (struct jpeg_forward_dct *)fdct; fdct->pub.start_pass = start_pass_fdctmgr; /* First determine the DCT... */ switch (cinfo->dct_method) { #ifdef DCT_ISLOW_SUPPORTED case JDCT_ISLOW: fdct->pub.forward_DCT = forward_DCT; if (jsimd_can_fdct_islow()) fdct->dct = jsimd_fdct_islow; else fdct->dct = jpeg_fdct_islow; break; #endif #ifdef DCT_IFAST_SUPPORTED case JDCT_IFAST: fdct->pub.forward_DCT = forward_DCT; if (jsimd_can_fdct_ifast()) fdct->dct = jsimd_fdct_ifast; else fdct->dct = jpeg_fdct_ifast; break; #endif #ifdef DCT_FLOAT_SUPPORTED case JDCT_FLOAT: fdct->pub.forward_DCT = forward_DCT_float; if (jsimd_can_fdct_float()) fdct->float_dct = jsimd_fdct_float; else fdct->float_dct = jpeg_fdct_float; break; #endif default: ERREXIT(cinfo, JERR_NOT_COMPILED); break; } /* ...then the supporting stages. */ switch (cinfo->dct_method) { #ifdef DCT_ISLOW_SUPPORTED case JDCT_ISLOW: #endif #ifdef DCT_IFAST_SUPPORTED case JDCT_IFAST: #endif #if defined(DCT_ISLOW_SUPPORTED) || defined(DCT_IFAST_SUPPORTED) if (jsimd_can_convsamp()) fdct->convsamp = jsimd_convsamp; else fdct->convsamp = convsamp; if (jsimd_can_quantize()) fdct->quantize = jsimd_quantize; else fdct->quantize = quantize; break; #endif #ifdef DCT_FLOAT_SUPPORTED case JDCT_FLOAT: if (jsimd_can_convsamp_float()) fdct->float_convsamp = jsimd_convsamp_float; else fdct->float_convsamp = convsamp_float; if (jsimd_can_quantize_float()) fdct->float_quantize = jsimd_quantize_float; else fdct->float_quantize = quantize_float; break; #endif default: ERREXIT(cinfo, JERR_NOT_COMPILED); break; } /* Allocate workspace memory */ #ifdef DCT_FLOAT_SUPPORTED if (cinfo->dct_method == JDCT_FLOAT) fdct->float_workspace = (FAST_FLOAT *) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(FAST_FLOAT) * DCTSIZE2); else #endif fdct->workspace = (DCTELEM *) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(DCTELEM) * DCTSIZE2); /* Mark divisor tables unallocated */ for (i = 0; i < NUM_QUANT_TBLS; i++) { fdct->divisors[i] = NULL; #ifdef DCT_FLOAT_SUPPORTED fdct->float_divisors[i] = NULL; #endif } }
pushq %r15 pushq %r14 pushq %rbx movq %rdi, %r14 movq 0x8(%rdi), %rax movl $0x90, %edx movl $0x1, %esi callq *(%rax) movq %rax, %rbx movq %rax, 0x1e8(%r14) leaq 0x17d(%rip), %rax # 0x7cce movq %rax, (%rbx) movl 0x114(%r14), %eax cmpl $0x2, %eax je 0x7ba3 cmpl $0x1, %eax je 0x7b86 testl %eax, %eax jne 0x7bc0 leaq 0x419(%rip), %rax # 0x7f89 movq %rax, 0x8(%rbx) callq 0x4a555 testl %eax, %eax je 0x7be7 leaq 0x42a52(%rip), %rax # 0x4a5d6 jmp 0x7bee leaq 0x3fc(%rip), %rax # 0x7f89 movq %rax, 0x8(%rbx) callq 0x4a592 testl %eax, %eax je 0x7bd1 leaq 0x42a47(%rip), %rax # 0x4a5e8 jmp 0x7bee leaq 0x46d(%rip), %rax # 0x8017 movq %rax, 0x8(%rbx) callq 0x4a5b4 testl %eax, %eax je 0x7bda leaq 0x42a2f(%rip), %rax # 0x4a5ed jmp 0x7be1 movq (%r14), %rax movl $0x30, 0x28(%rax) movq %r14, %rdi callq *(%rax) jmp 0x7bf2 movq 0x86388(%rip), %rax # 0x8df60 jmp 0x7bee movq 0x863bf(%rip), %rax # 0x8dfa0 movq %rax, 0x50(%rbx) jmp 0x7bf2 movq 0x86342(%rip), %rax # 0x8df30 movq %rax, 0x10(%rbx) cmpl $0x2, 0x114(%r14) jae 0x7c0e callq 0x4a516 testl %eax, %eax je 0x7c22 leaq 0x42932(%rip), %rax # 0x4a53e jmp 0x7c29 jne 0x7c3f callq 0x4a52a testl %eax, %eax je 0x7c5d leaq 0x42930(%rip), %rax # 0x4a550 jmp 0x7c64 leaq 0x47f(%rip), %rax # 0x80a8 movq %rax, 0x18(%rbx) callq 0x4a5f2 testl %eax, %eax je 0x7c50 leaq 0x429dd(%rip), %rax # 0x4a61a jmp 0x7c57 movq (%r14), %rax movl $0x30, 0x28(%rax) movq %r14, %rdi callq *(%rax) jmp 0x7c85 leaq 0x4e3(%rip), %rax # 0x813a movq %rax, 0x20(%rbx) jmp 0x7c85 leaq 0x52d(%rip), %rax # 0x8191 movq %rax, 0x58(%rbx) callq 0x4a606 testl %eax, %eax je 0x7c7a leaq 0x429b4(%rip), %rax # 0x4a62c jmp 0x7c81 leaq 0x5e2(%rip), %rax # 0x8263 movq %rax, 0x60(%rbx) xorl %r15d, %r15d cmpl $0x2, 0x114(%r14) sete %r15b movq 0x8(%r14), %rax movl %r15d, %edx shll $0x7, %edx subq $-0x80, %rdx shll $0x6, %r15d movq %r14, %rdi movl $0x1, %esi callq *(%rax) movq %rax, 0x48(%r15,%rbx) xorps %xmm0, %xmm0 movups %xmm0, 0x28(%rbx) movups %xmm0, 0x38(%rbx) movups %xmm0, 0x78(%rbx) movups %xmm0, 0x68(%rbx) popq %rbx popq %r14 popq %r15 retq
/rui314[P]libjpeg-turbo/jcdctmgr.c
jinit_inverse_dct
GLOBAL(void) jinit_inverse_dct(j_decompress_ptr cinfo) { my_idct_ptr idct; int ci; jpeg_component_info *compptr; idct = (my_idct_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(my_idct_controller)); cinfo->idct = (struct jpeg_inverse_dct *)idct; idct->pub.start_pass = start_pass; for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { /* Allocate and pre-zero a multiplier table for each component */ compptr->dct_table = (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(multiplier_table)); MEMZERO(compptr->dct_table, sizeof(multiplier_table)); /* Mark multiplier table not yet set up for any method */ idct->cur_method[ci] = -1; } }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movq %rdi, %rbx movq 0x8(%rdi), %rax movl $0x80, %edx movl $0x1, %esi callq *(%rax) movq %rax, %r14 movq %rax, 0x258(%rbx) leaq 0x61(%rip), %rax # 0x21ec1 movq %rax, (%r14) cmpl $0x0, 0x38(%rbx) jle 0x21eb5 movq 0x130(%rbx), %r15 addq $0x58, %r15 xorl %r12d, %r12d movq 0x8(%rbx), %rax movl $0x100, %edx # imm = 0x100 movq %rbx, %rdi movl $0x1, %esi callq *(%rax) movq %rax, (%r15) movl $0x100, %edx # imm = 0x100 movq %rax, %rdi xorl %esi, %esi callq 0x51f0 movl $0xffffffff, 0x58(%r14,%r12,4) # imm = 0xFFFFFFFF incq %r12 movslq 0x38(%rbx), %rax addq $0x60, %r15 cmpq %rax, %r12 jl 0x21e77 addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq
/rui314[P]libjpeg-turbo/jddctmgr.c
start_pass_huff_decoder
METHODDEF(void) start_pass_huff_decoder(j_decompress_ptr cinfo) { huff_entropy_ptr entropy = (huff_entropy_ptr)cinfo->entropy; int ci, blkn, dctbl, actbl; d_derived_tbl **pdtbl; jpeg_component_info *compptr; /* Check that the scan parameters Ss, Se, Ah/Al are OK for sequential JPEG. * This ought to be an error condition, but we make it a warning because * there are some baseline files out there with all zeroes in these bytes. */ if (cinfo->Ss != 0 || cinfo->Se != DCTSIZE2 - 1 || cinfo->Ah != 0 || cinfo->Al != 0) WARNMS(cinfo, JWRN_NOT_SEQUENTIAL); for (ci = 0; ci < cinfo->comps_in_scan; ci++) { compptr = cinfo->cur_comp_info[ci]; dctbl = compptr->dc_tbl_no; actbl = compptr->ac_tbl_no; /* Compute derived values for Huffman tables */ /* We may do this more than once for a table, but it's not expensive */ pdtbl = (d_derived_tbl **)(entropy->dc_derived_tbls) + dctbl; jpeg_make_d_derived_tbl(cinfo, TRUE, dctbl, pdtbl); pdtbl = (d_derived_tbl **)(entropy->ac_derived_tbls) + actbl; jpeg_make_d_derived_tbl(cinfo, FALSE, actbl, pdtbl); /* Initialize DC predictions to 0 */ entropy->saved.last_dc_val[ci] = 0; } /* Precalculate decoding info for each block in an MCU of this scan */ for (blkn = 0; blkn < cinfo->blocks_in_MCU; blkn++) { ci = cinfo->MCU_membership[blkn]; compptr = cinfo->cur_comp_info[ci]; /* Precalculate which table to use for each block */ entropy->dc_cur_tbls[blkn] = entropy->dc_derived_tbls[compptr->dc_tbl_no]; entropy->ac_cur_tbls[blkn] = entropy->ac_derived_tbls[compptr->ac_tbl_no]; /* Decide whether we really care about the coefficient values */ if (compptr->component_needed) { entropy->dc_needed[blkn] = TRUE; /* we don't need the ACs if producing a 1/8th-size image */ entropy->ac_needed[blkn] = (compptr->_DCT_scaled_size > 1); } else { entropy->dc_needed[blkn] = entropy->ac_needed[blkn] = FALSE; } } /* Initialize bitread state variables */ entropy->bitstate.bits_left = 0; entropy->bitstate.get_buffer = 0; /* unnecessary, but keeps Purify quiet */ entropy->pub.insufficient_data = FALSE; /* Initialize restart counter */ entropy->restarts_to_go = cinfo->restart_interval; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rdi, %rbx movq 0x250(%rdi), %r15 cmpl $0x0, 0x20c(%rdi) jne 0x227c3 cmpl $0x3f, 0x210(%rbx) jne 0x227c3 cmpl $0x0, 0x214(%rbx) jne 0x227c3 cmpl $0x0, 0x218(%rbx) je 0x227d8 movq (%rbx), %rax movl $0x7a, 0x28(%rax) movq %rbx, %rdi movl $0xffffffff, %esi # imm = 0xFFFFFFFF callq *0x8(%rax) cmpl $0x0, 0x1b0(%rbx) jle 0x2283c leaq 0x40(%r15), %r12 leaq 0x60(%r15), %r13 xorl %ebp, %ebp movq 0x1b8(%rbx,%rbp,8), %rax movslq 0x14(%rax), %rdx movslq 0x18(%rax), %r14 leaq (%r12,%rdx,8), %rcx movq %rbx, %rdi movl $0x1, %esi callq 0x54b0 leaq (,%r14,8), %rcx addq %r13, %rcx movq %rbx, %rdi xorl %esi, %esi movl %r14d, %edx callq 0x54b0 movl $0x0, 0x28(%r15,%rbp,4) incq %rbp movslq 0x1b0(%rbx), %rax cmpq %rax, %rbp jl 0x227eb cmpl $0x0, 0x1e0(%rbx) jle 0x228c3 xorl %eax, %eax movslq 0x1e4(%rbx,%rax,4), %rcx movq 0x1b8(%rbx,%rcx,8), %rdx movslq 0x14(%rdx), %rcx movq 0x40(%r15,%rcx,8), %rcx movq %rcx, 0x80(%r15,%rax,8) movslq 0x18(%rdx), %rcx movq 0x60(%r15,%rcx,8), %rcx movq %rcx, 0xd0(%r15,%rax,8) cmpl $0x0, 0x30(%rdx) je 0x2289b movl $0x1, 0x120(%r15,%rax,4) xorl %ecx, %ecx cmpl $0x2, 0x24(%rdx) setge %cl movl $0x148, %edx # imm = 0x148 jmp 0x228ae movl $0x0, 0x148(%r15,%rax,4) xorl %ecx, %ecx movl $0x120, %edx # imm = 0x120 addq %r15, %rdx movl %ecx, (%rdx,%rax,4) incq %rax movslq 0x1e0(%rbx), %rcx cmpq %rcx, %rax jl 0x22847 xorl %eax, %eax movl %eax, 0x20(%r15) movq $0x0, 0x18(%r15) movl %eax, 0x10(%r15) movl 0x170(%rbx), %eax movl %eax, 0x38(%r15) addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jdhuff.c
consume_markers
METHODDEF(int) consume_markers(j_decompress_ptr cinfo) { my_inputctl_ptr inputctl = (my_inputctl_ptr)cinfo->inputctl; int val; if (inputctl->pub.eoi_reached) /* After hitting EOI, read no further */ return JPEG_REACHED_EOI; val = (*cinfo->marker->read_markers) (cinfo); switch (val) { case JPEG_REACHED_SOS: /* Found SOS */ if (inputctl->inheaders) { /* 1st SOS */ initial_setup(cinfo); inputctl->inheaders = FALSE; /* Note: start_input_pass must be called by jdmaster.c * before any more input can be consumed. jdapimin.c is * responsible for enforcing this sequencing. */ } else { /* 2nd or later SOS marker */ if (!inputctl->pub.has_multiple_scans) ERREXIT(cinfo, JERR_EOI_EXPECTED); /* Oops, I wasn't expecting this! */ start_input_pass(cinfo); } break; case JPEG_REACHED_EOI: /* Found EOI */ inputctl->pub.eoi_reached = TRUE; if (inputctl->inheaders) { /* Tables-only datastream, apparently */ if (cinfo->marker->saw_SOF) ERREXIT(cinfo, JERR_SOF_NO_SOS); } else { /* Prevent infinite loop in coef ctlr's decompress_data routine * if user set output_scan_number larger than number of scans. */ if (cinfo->output_scan_number > cinfo->input_scan_number) cinfo->output_scan_number = cinfo->input_scan_number; } break; case JPEG_SUSPENDED: break; } return val; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq 0x240(%rdi), %r15 movl $0x2, %ebx cmpl $0x0, 0x24(%r15) je 0x23d5b movl %ebx, %eax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rdi, %r14 movq 0x248(%rdi), %rax callq *0x8(%rax) movl %eax, %ebx cmpl $0x2, %eax je 0x23fbc cmpl $0x1, %ebx jne 0x23d4a cmpl $0x0, 0x28(%r15) je 0x23ff0 cmpl $0xffdc, 0x34(%r14) # imm = 0xFFDC ja 0x23d97 cmpl $0xffdd, 0x30(%r14) # imm = 0xFFDD jb 0x23db0 movq (%r14), %rax movabsq $0xffdc00000029, %rcx # imm = 0xFFDC00000029 movq %rcx, 0x28(%rax) movq (%r14), %rax movq %r14, %rdi callq *(%rax) movl 0x128(%r14), %eax cmpl $0x8, %eax je 0x23dd1 movq (%r14), %rcx movl $0xf, 0x28(%rcx) movl %eax, 0x2c(%rcx) movq (%r14), %rax movq %r14, %rdi callq *(%rax) movl 0x38(%r14), %eax cmpl $0xb, %eax jl 0x23df9 movq (%r14), %rcx movl $0x1a, 0x28(%rcx) movl %eax, 0x2c(%rcx) movq (%r14), %rax movl $0xa, 0x30(%rax) movq (%r14), %rax movq %r14, %rdi callq *(%rax) movabsq $0x100000001, %rax # imm = 0x100000001 movq %rax, 0x198(%r14) cmpl $0x0, 0x38(%r14) jle 0x23e7e movq 0x130(%r14), %r12 addq $0x8, %r12 xorl %ebp, %ebp movl (%r12), %eax addl $-0x5, %eax cmpl $-0x4, %eax jb 0x23e37 movl 0x4(%r12), %eax addl $-0x5, %eax cmpl $-0x5, %eax ja 0x23e46 movq (%r14), %rax movl $0x12, 0x28(%rax) movq %r14, %rdi callq *(%rax) movq 0x198(%r14), %xmm0 movq (%r12), %xmm1 movdqa %xmm0, %xmm2 pcmpgtd %xmm1, %xmm2 pand %xmm2, %xmm0 pandn %xmm1, %xmm2 por %xmm0, %xmm2 movq %xmm2, 0x198(%r14) incl %ebp addq $0x60, %r12 cmpl 0x38(%r14), %ebp jl 0x23e1e movl $0x8, 0x1a0(%r14) cmpl $0x0, 0x38(%r14) jle 0x23f6b movq 0x130(%r14), %r12 addq $0x8, %r12 xorl %r13d, %r13d movl $0x8, 0x1c(%r12) movl 0x30(%r14), %eax movslq (%r12), %rdi imulq %rax, %rdi movslq 0x198(%r14), %rsi shlq $0x3, %rsi callq 0x5150 movl %eax, 0x14(%r12) movl 0x34(%r14), %eax movslq 0x4(%r12), %rdi imulq %rax, %rdi movslq 0x19c(%r14), %rsi shlq $0x3, %rsi callq 0x5150 movl %eax, 0x18(%r12) movq 0x220(%r14), %rax movl $0x0, 0x1c(%rax,%r13,4) movl 0x14(%r12), %ecx decl %ecx movl %ecx, 0x44(%rax,%r13,4) movl 0x30(%r14), %eax movslq (%r12), %rdi imulq %rax, %rdi movslq 0x198(%r14), %rsi callq 0x5150 movl %eax, 0x20(%r12) movl 0x34(%r14), %eax movslq 0x4(%r12), %rdi imulq %rax, %rdi movslq 0x19c(%r14), %rsi callq 0x5150 movl %eax, 0x24(%r12) movl $0x1, 0x28(%r12) movq $0x0, 0x48(%r12) incq %r13 movslq 0x38(%r14), %rax addq $0x60, %r12 cmpq %rax, %r13 jl 0x23ea2 movl 0x34(%r14), %edi movslq 0x19c(%r14), %rsi shlq $0x3, %rsi callq 0x5150 movl %eax, 0x1a4(%r14) movl 0x1b0(%r14), %ecx movl $0x1, %eax cmpl 0x38(%r14), %ecx jl 0x23fa5 xorl %eax, %eax cmpl $0x0, 0x138(%r14) setne %al movq 0x240(%r14), %rcx movl %eax, 0x20(%rcx) movl $0x0, 0x28(%r15) jmp 0x23d4a movl $0x1, 0x24(%r15) cmpl $0x0, 0x28(%r15) je 0x24013 movq 0x248(%r14), %rax cmpl $0x0, 0x1c(%rax) je 0x23d4a movq (%r14), %rax movl $0x3b, 0x28(%rax) movq %r14, %rdi callq *(%rax) jmp 0x23d4a cmpl $0x0, 0x20(%r15) jne 0x24006 movq (%r14), %rax movl $0x23, 0x28(%rax) movq %r14, %rdi callq *(%rax) movq %r14, %rdi callq 0x2407a jmp 0x23d4a movl 0xac(%r14), %eax cmpl %eax, 0xb4(%r14) jle 0x23d4a movl %eax, 0xb4(%r14) jmp 0x23d4a
/rui314[P]libjpeg-turbo/jdinput.c
start_input_pass
METHODDEF(void) start_input_pass(j_decompress_ptr cinfo) { per_scan_setup(cinfo); latch_quant_tables(cinfo); (*cinfo->entropy->start_pass) (cinfo); (*cinfo->coef->start_input_pass) (cinfo); cinfo->inputctl->consume_input = cinfo->coef->consume_data; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rdi, %rbx movl 0x1b0(%rdi), %eax cmpl $0x1, %eax jne 0x240ea movq 0x1b8(%rbx), %rcx movl 0x1c(%rcx), %eax movl %eax, 0x1d8(%rbx) movl 0x20(%rcx), %eax movl %eax, 0x1dc(%rbx) movabsq $0x100000001, %rdx # imm = 0x100000001 movq %rdx, 0x34(%rcx) movl $0x1, %edx movl %edx, 0x3c(%rcx) movl 0xc(%rcx), %esi movl 0x24(%rcx), %edi movl %edi, 0x40(%rcx) movl %edx, 0x44(%rcx) xorl %edx, %edx divl %esi testl %edx, %edx cmovel %esi, %edx movl %edx, 0x48(%rcx) movq $0x1, 0x1e0(%rbx) jmp 0x241f4 leal -0x5(%rax), %ecx cmpl $-0x5, %ecx ja 0x24111 movq (%rbx), %rcx movl $0x1a, 0x28(%rcx) movl %eax, 0x2c(%rcx) movq (%rbx), %rax movl $0x4, 0x30(%rax) movq (%rbx), %rax movq %rbx, %rdi callq *(%rax) movl 0x30(%rbx), %edi movslq 0x198(%rbx), %rsi shlq $0x3, %rsi callq 0x5150 movl %eax, 0x1d8(%rbx) movl 0x34(%rbx), %edi movslq 0x19c(%rbx), %rsi shlq $0x3, %rsi callq 0x5150 movl %eax, 0x1dc(%rbx) movl $0x0, 0x1e0(%rbx) cmpl $0x0, 0x1b0(%rbx) jle 0x241f4 xorl %r14d, %r14d movq 0x1b8(%rbx,%r14,8), %rcx movl 0x8(%rcx), %esi movl 0xc(%rcx), %edi movl %esi, 0x34(%rcx) movl %edi, 0x38(%rcx) movl %edi, %ebp imull %esi, %ebp movl %ebp, 0x3c(%rcx) movl 0x24(%rcx), %eax imull %esi, %eax movl %eax, 0x40(%rcx) movl 0x1c(%rcx), %eax xorl %edx, %edx divl %esi testl %edx, %edx cmovel %esi, %edx movl %edx, 0x44(%rcx) movl 0x20(%rcx), %eax xorl %edx, %edx divl %edi testl %edx, %edx cmovel %edi, %edx movl %edx, 0x48(%rcx) movl 0x1e0(%rbx), %eax addl %ebp, %eax cmpl $0xb, %eax jl 0x241bc movq (%rbx), %rax movl $0xd, 0x28(%rax) movq %rbx, %rdi callq *(%rax) testl %ebp, %ebp jle 0x241e1 incl %ebp movslq 0x1e0(%rbx), %rax leal 0x1(%rax), %ecx movl %ecx, 0x1e0(%rbx) movl %r14d, 0x1e4(%rbx,%rax,4) decl %ebp cmpl $0x1, %ebp jg 0x241c2 incq %r14 movslq 0x1b0(%rbx), %rax cmpq %rax, %r14 jl 0x2415d cmpl $0x0, 0x1b0(%rbx) jle 0x24282 xorl %r15d, %r15d movq 0x1b8(%rbx,%r15,8), %r12 cmpq $0x0, 0x50(%r12) jne 0x24273 movl 0x10(%r12), %r14d cmpq $0x3, %r14 ja 0x2422a cmpq $0x0, 0xc8(%rbx,%r14,8) jne 0x24240 movq (%rbx), %rax movl $0x34, 0x28(%rax) movl %r14d, 0x2c(%rax) movq (%rbx), %rax movq %rbx, %rdi callq *(%rax) movslq %r14d, %r13 movq 0x8(%rbx), %rax movl $0x84, %edx movq %rbx, %rdi movl $0x1, %esi callq *(%rax) movq %rax, %r14 movq 0xc8(%rbx,%r13,8), %rsi movl $0x84, %edx movq %rax, %rdi callq 0x52d0 movq %r14, 0x50(%r12) incq %r15 movslq 0x1b0(%rbx), %rax cmpq %rax, %r15 jl 0x24204 movq 0x250(%rbx), %rax movq %rbx, %rdi callq *(%rax) movq 0x230(%rbx), %rax movq %rbx, %rdi callq *(%rax) movq 0x230(%rbx), %rax movq 0x240(%rbx), %rcx movq 0x8(%rax), %rax movq %rax, (%rcx) addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jdinput.c
jinit_d_main_controller
GLOBAL(void) jinit_d_main_controller(j_decompress_ptr cinfo, boolean need_full_buffer) { my_main_ptr main_ptr; int ci, rgroup, ngroups; jpeg_component_info *compptr; main_ptr = (my_main_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(my_main_controller)); cinfo->main = (struct jpeg_d_main_controller *)main_ptr; main_ptr->pub.start_pass = start_pass_main; if (need_full_buffer) /* shouldn't happen */ ERREXIT(cinfo, JERR_BAD_BUFFER_MODE); /* Allocate the workspace. * ngroups is the number of row groups we need. */ if (cinfo->upsample->need_context_rows) { if (cinfo->_min_DCT_scaled_size < 2) /* unsupported, see comments above */ ERREXIT(cinfo, JERR_NOTIMPL); alloc_funny_pointers(cinfo); /* Alloc space for xbuffer[] lists */ ngroups = cinfo->_min_DCT_scaled_size + 2; } else { ngroups = cinfo->_min_DCT_scaled_size; } for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { rgroup = (compptr->v_samp_factor * compptr->_DCT_scaled_size) / cinfo->_min_DCT_scaled_size; /* height of a row group of component */ main_ptr->buffer[ci] = (*cinfo->mem->alloc_sarray) ((j_common_ptr)cinfo, JPOOL_IMAGE, compptr->width_in_blocks * compptr->_DCT_scaled_size, (JDIMENSION)(rgroup * ngroups)); } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movl %esi, %ebp movq %rdi, %rbx movq 0x8(%rdi), %rax movl $0x88, %edx movl $0x1, %esi callq *(%rax) movq %rax, %r14 movq %rax, 0x228(%rbx) leaq 0x166(%rip), %rax # 0x2446a movq %rax, (%r14) testl %ebp, %ebp je 0x2431a movq (%rbx), %rax movl $0x4, 0x28(%rax) movq %rbx, %rdi callq *(%rax) movq 0x260(%rbx), %rax cmpl $0x0, 0x10(%rax) movl 0x1a0(%rbx), %ebp je 0x24405 movq %r14, 0x10(%rsp) cmpl $0x1, %ebp jg 0x2434a movq (%rbx), %rax movl $0x2f, 0x28(%rax) movq %rbx, %rdi callq *(%rax) movq 0x228(%rbx), %r15 movslq 0x1a0(%rbx), %rax movq %rax, 0x8(%rsp) movq 0x8(%rbx), %rax movslq 0x38(%rbx), %rdx shlq $0x4, %rdx movq %rbx, %rdi movl $0x1, %esi callq *(%rax) movq %rax, 0x68(%r15) movslq 0x38(%rbx), %rcx leaq (%rax,%rcx,8), %rax movq %rax, 0x70(%r15) testq %rcx, %rcx jle 0x243f7 movq 0x130(%rbx), %r13 addq $0x4, 0x8(%rsp) addq $0x24, %r13 xorl %ebp, %ebp movl (%r13), %eax imull -0x18(%r13), %eax cltd idivl 0x1a0(%rbx) movq 0x8(%rbx), %rcx movslq %eax, %r12 movq %r12, %r14 imulq 0x8(%rsp), %r14 movq %r14, %rdx shlq $0x4, %rdx movq %rbx, %rdi movl $0x1, %esi callq *(%rcx) leaq (%rax,%r12,8), %rax movq 0x68(%r15), %rcx movq %rax, (%rcx,%rbp,8) movslq %r14d, %rcx leaq (%rax,%rcx,8), %rax movq 0x70(%r15), %rcx movq %rax, (%rcx,%rbp,8) incq %rbp movslq 0x38(%rbx), %rax addq $0x60, %r13 cmpq %rax, %rbp jl 0x2439b movl 0x1a0(%rbx), %ebp addl $0x2, %ebp movq 0x10(%rsp), %r14 cmpl $0x0, 0x38(%rbx) jle 0x2445b movq 0x130(%rbx), %r15 addq $0x24, %r15 xorl %r12d, %r12d movl (%r15), %ecx movl -0x18(%r15), %eax imull %ecx, %eax cltd idivl 0x1a0(%rbx) movq 0x8(%rbx), %r8 imull -0x8(%r15), %ecx imull %ebp, %eax movq %rbx, %rdi movl $0x1, %esi movl %ecx, %edx movl %eax, %ecx callq *0x10(%r8) movq %rax, 0x10(%r14,%r12,8) incq %r12 movslq 0x38(%rbx), %rax addq $0x60, %r15 cmpq %rax, %r12 jl 0x24419 addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jdmainct.c
process_data_context_main
METHODDEF(void) process_data_context_main(j_decompress_ptr cinfo, JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail) { my_main_ptr main_ptr = (my_main_ptr)cinfo->main; /* Read input data if we haven't filled the main buffer yet */ if (!main_ptr->buffer_full) { if (!(*cinfo->coef->decompress_data) (cinfo, main_ptr->xbuffer[main_ptr->whichptr])) return; /* suspension forced, can do nothing more */ main_ptr->buffer_full = TRUE; /* OK, we have an iMCU row to work with */ main_ptr->iMCU_row_ctr++; /* count rows received */ } /* Postprocessor typically will not swallow all the input data it is handed * in one call (due to filling the output buffer first). Must be prepared * to exit and restart. This switch lets us keep track of how far we got. * Note that each case falls through to the next on successful completion. */ switch (main_ptr->context_state) { case CTX_POSTPONED_ROW: /* Call postprocessor using previously set pointers for postponed row */ (*cinfo->post->post_process_data) (cinfo, main_ptr->xbuffer[main_ptr->whichptr], &main_ptr->rowgroup_ctr, main_ptr->rowgroups_avail, output_buf, out_row_ctr, out_rows_avail); if (main_ptr->rowgroup_ctr < main_ptr->rowgroups_avail) return; /* Need to suspend */ main_ptr->context_state = CTX_PREPARE_FOR_IMCU; if (*out_row_ctr >= out_rows_avail) return; /* Postprocessor exactly filled output buf */ FALLTHROUGH /*FALLTHROUGH*/ case CTX_PREPARE_FOR_IMCU: /* Prepare to process first M-1 row groups of this iMCU row */ main_ptr->rowgroup_ctr = 0; main_ptr->rowgroups_avail = (JDIMENSION)(cinfo->_min_DCT_scaled_size - 1); /* Check for bottom of image: if so, tweak pointers to "duplicate" * the last sample row, and adjust rowgroups_avail to ignore padding rows. */ if (main_ptr->iMCU_row_ctr == cinfo->total_iMCU_rows) set_bottom_pointers(cinfo); main_ptr->context_state = CTX_PROCESS_IMCU; FALLTHROUGH /*FALLTHROUGH*/ case CTX_PROCESS_IMCU: /* Call postprocessor using previously set pointers */ (*cinfo->post->post_process_data) (cinfo, main_ptr->xbuffer[main_ptr->whichptr], &main_ptr->rowgroup_ctr, main_ptr->rowgroups_avail, output_buf, out_row_ctr, out_rows_avail); if (main_ptr->rowgroup_ctr < main_ptr->rowgroups_avail) return; /* Need to suspend */ /* After the first iMCU, change wraparound pointers to normal state */ if (main_ptr->iMCU_row_ctr == 1) set_wraparound_pointers(cinfo); /* Prepare to load new iMCU row using other xbuffer list */ main_ptr->whichptr ^= 1; /* 0=>1 or 1=>0 */ main_ptr->buffer_full = FALSE; /* Still need to process last row group of this iMCU row, */ /* which is saved at index M+1 of the other xbuffer */ main_ptr->rowgroup_ctr = (JDIMENSION)(cinfo->_min_DCT_scaled_size + 1); main_ptr->rowgroups_avail = (JDIMENSION)(cinfo->_min_DCT_scaled_size + 2); main_ptr->context_state = CTX_POSTPONED_ROW; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movl %ecx, %ebp movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %rbx movq 0x228(%rdi), %r12 cmpl $0x0, 0x60(%r12) jne 0x246d0 movq 0x230(%rbx), %rax movslq 0x78(%r12), %rcx movq 0x68(%r12,%rcx,8), %rsi movq %rbx, %rdi callq *0x18(%rax) testl %eax, %eax je 0x24a31 movl $0x1, 0x60(%r12) incl 0x84(%r12) movl 0x7c(%r12), %eax testl %eax, %eax je 0x2473d cmpl $0x1, %eax je 0x248cd cmpl $0x2, %eax jne 0x24a31 movq 0x238(%rbx), %rax movslq 0x78(%r12), %rcx movq 0x68(%r12,%rcx,8), %rsi leaq 0x64(%r12), %rdx movl 0x80(%r12), %ecx movl %ebp, (%rsp) movq %rbx, %rdi movq %r15, %r8 movq %r14, %r9 callq *0x8(%rax) movl 0x64(%r12), %eax cmpl 0x80(%r12), %eax jb 0x24a31 movl $0x0, 0x7c(%r12) cmpl %ebp, (%r14) jae 0x24a31 movq %r15, 0x10(%rsp) movq %r14, 0x18(%rsp) movl $0x0, 0x64(%r12) movl 0x1a0(%rbx), %r8d leal -0x1(%r8), %eax movl %eax, 0x80(%r12) movl 0x84(%r12), %eax movq %rbx, 0x8(%rsp) cmpl 0x1a4(%rbx), %eax jne 0x248b5 movq 0x8(%rsp), %rax movslq 0x38(%rax), %r9 testq %r9, %r9 jle 0x248b5 movq 0x8(%rsp), %rax movq 0x130(%rax), %r10 movq 0x228(%rax), %r11 movslq 0x78(%r11), %r13 xorl %r15d, %r15d movdqa 0x26850(%rip), %xmm0 # 0x4b000 movdqa 0x26898(%rip), %xmm1 # 0x4b050 pcmpeqd %xmm2, %xmm2 movdqa 0x2684c(%rip), %xmm3 # 0x4b010 movl 0x24(%r10), %ecx movl 0x2c(%r10), %edi imull 0xc(%r10), %ecx movl %ecx, %eax cltd idivl %r8d movl %eax, %esi movl %edi, %eax xorl %edx, %edx divl %ecx testl %edx, %edx cmovnel %edx, %ecx testq %r15, %r15 jne 0x247f8 leal -0x1(%rcx), %eax cltd idivl %esi incl %eax movl %eax, 0x80(%r11) testl %esi, %esi jle 0x248a5 movq 0x68(%r11,%r13,8), %rax movq (%rax,%r15,8), %rax addl %esi, %esi cmpl $0x2, %esi movl $0x1, %edx cmovll %edx, %esi movslq %ecx, %rcx movl %esi, %edx incl %edx decq %rsi movq %rsi, %xmm4 pshufd $0x44, %xmm4, %xmm4 # xmm4 = xmm4[0,1,0,1] leaq (%rax,%rcx,8), %rsi addq $0x8, %rsi andl $-0x2, %edx shlq $0x3, %rdx pxor %xmm1, %xmm4 xorl %edi, %edi movdqa %xmm0, %xmm5 movdqa %xmm5, %xmm6 pxor %xmm1, %xmm6 movdqa %xmm6, %xmm7 pcmpgtd %xmm4, %xmm7 pcmpeqd %xmm4, %xmm6 pshufd $0xf5, %xmm6, %xmm8 # xmm8 = xmm6[1,1,3,3] pand %xmm7, %xmm8 pshufd $0xf5, %xmm7, %xmm6 # xmm6 = xmm7[1,1,3,3] por %xmm8, %xmm6 movq -0x8(%rax,%rcx,8), %rbx movd %xmm6, %r14d notl %r14d testb $0x1, %r14b je 0x24884 movq %rbx, -0x8(%rsi,%rdi) pxor %xmm2, %xmm6 pextrw $0x4, %xmm6, %r14d testb $0x1, %r14b je 0x24898 movq %rbx, (%rsi,%rdi) paddq %xmm3, %xmm5 addq $0x10, %rdi cmpq %rdi, %rdx jne 0x24843 incq %r15 addq $0x60, %r10 cmpq %r9, %r15 jne 0x247c4 movl $0x1, 0x7c(%r12) movq 0x8(%rsp), %rbx movq 0x18(%rsp), %r14 movq 0x10(%rsp), %r15 movq 0x238(%rbx), %rax movslq 0x78(%r12), %rcx movq 0x68(%r12,%rcx,8), %rsi leaq 0x64(%r12), %rdx movl 0x80(%r12), %ecx movl %ebp, (%rsp) movq %rbx, %rdi movq %r15, %r8 movq %r14, %r9 callq *0x8(%rax) movl 0x64(%r12), %eax cmpl 0x80(%r12), %eax jb 0x24a31 movq %rbx, 0x8(%rsp) cmpl $0x1, 0x84(%r12) jne 0x249fb movq 0x8(%rsp), %rax movslq 0x38(%rax), %rax movq %rax, 0x18(%rsp) testq %rax, %rax jle 0x249fb movq 0x8(%rsp), %rax movl 0x1a0(%rax), %ecx movq 0x130(%rax), %rdi movq 0x228(%rax), %r8 leal 0x1(%rcx), %eax movl %eax, 0x24(%rsp) movq %rcx, 0x10(%rsp) leal 0x2(%rcx), %eax movl %eax, 0x20(%rsp) xorl %r11d, %r11d movl 0x24(%rdi), %eax imull 0xc(%rdi), %eax cltd idivl 0x10(%rsp) testl %eax, %eax jle 0x249e9 movq 0x68(%r8), %rcx movq 0x70(%r8), %rdx movq (%rcx,%r11,8), %rbx movq (%rdx,%r11,8), %r14 movl %eax, %ecx imull 0x24(%rsp), %ecx movslq %ecx, %rcx movl %eax, %r15d imull 0x20(%rsp), %eax movslq %eax, %rdx shlq $0x3, %r15 leaq (%r14,%rdx,8), %rax leaq (%rbx,%rdx,8), %r13 movq %r14, %rbp subq %r15, %rbp movq %rbx, %rdx subq %r15, %rdx leaq (%r14,%rcx,8), %r9 leaq (%rbx,%rcx,8), %r10 xorl %ecx, %ecx movq (%r10,%rcx), %rsi movq %rsi, (%rdx,%rcx) movq (%r9,%rcx), %rsi movq %rsi, (%rbp,%rcx) movq (%rbx,%rcx), %rsi movq %rsi, (%r13,%rcx) movq (%r14,%rcx), %rsi movq %rsi, (%rax,%rcx) addq $0x8, %rcx cmpq %rcx, %r15 jne 0x249be incq %r11 addq $0x60, %rdi cmpq 0x18(%rsp), %r11 jne 0x24967 xorb $0x1, 0x78(%r12) movl $0x0, 0x60(%r12) movq 0x8(%rsp), %rax movl 0x1a0(%rax), %eax leal 0x1(%rax), %ecx movl %ecx, 0x64(%r12) addl $0x2, %eax movl %eax, 0x80(%r12) movl $0x2, 0x7c(%r12) addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jdmainct.c
process_restart
LOCAL(boolean) process_restart(j_decompress_ptr cinfo) { phuff_entropy_ptr entropy = (phuff_entropy_ptr)cinfo->entropy; int ci; /* Throw away any unused bits remaining in bit buffer; */ /* include any full bytes in next_marker's count of discarded bytes */ cinfo->marker->discarded_bytes += entropy->bitstate.bits_left / 8; entropy->bitstate.bits_left = 0; /* Advance past the RSTn marker */ if (!(*cinfo->marker->read_restart_marker) (cinfo)) return FALSE; /* Re-initialize DC predictions to 0 */ for (ci = 0; ci < cinfo->comps_in_scan; ci++) entropy->saved.last_dc_val[ci] = 0; /* Re-init EOB run count, too */ entropy->saved.EOBRUN = 0; /* Reset restart counter */ entropy->restarts_to_go = cinfo->restart_interval; /* Reset out-of-data flag, unless read_restart_marker left us smack up * against a marker. In that case we will end up treating the next data * segment as empty, and we can avoid producing bogus output pixels by * leaving the flag set. */ if (cinfo->unread_marker == 0) entropy->pub.insufficient_data = FALSE; return TRUE; }
pushq %r14 pushq %rbx pushq %rax movq %rdi, %rbx movq 0x248(%rdi), %rax movq 0x250(%rdi), %r14 movl 0x20(%r14), %ecx leal 0x7(%rcx), %edx testl %ecx, %ecx cmovnsl %ecx, %edx sarl $0x3, %edx addl %edx, 0x24(%rax) movl $0x0, 0x20(%r14) callq *0x10(%rax) testl %eax, %eax je 0x2aa48 cmpl $0x0, 0x1b0(%rbx) jle 0x2aa1e xorl %eax, %eax movl $0x0, 0x2c(%r14,%rax,4) incq %rax movslq 0x1b0(%rbx), %rcx cmpq %rcx, %rax jl 0x2aa06 movl $0x0, 0x28(%r14) movl 0x170(%rbx), %eax movl %eax, 0x3c(%r14) movl $0x1, %eax cmpl $0x0, 0x21c(%rbx) jne 0x2aa4a movl $0x0, 0x10(%r14) jmp 0x2aa4a xorl %eax, %eax addq $0x8, %rsp popq %rbx popq %r14 retq nop
/rui314[P]libjpeg-turbo/jdphuff.c
post_process_prepass
METHODDEF(void) post_process_prepass(j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION *in_row_group_ctr, JDIMENSION in_row_groups_avail, JSAMPARRAY output_buf, JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail) { my_post_ptr post = (my_post_ptr)cinfo->post; JDIMENSION old_next_row, num_rows; /* Reposition virtual buffer if at start of strip. */ if (post->next_row == 0) { post->buffer = (*cinfo->mem->access_virt_sarray) ((j_common_ptr)cinfo, post->whole_image, post->starting_row, post->strip_height, TRUE); } /* Upsample some data (up to a strip height's worth). */ old_next_row = post->next_row; (*cinfo->upsample->upsample) (cinfo, input_buf, in_row_group_ctr, in_row_groups_avail, post->buffer, &post->next_row, post->strip_height); /* Allow quantizer to scan new data. No data is emitted, */ /* but we advance out_row_ctr so outer loop can tell when we're done. */ if (post->next_row > old_next_row) { num_rows = post->next_row - old_next_row; (*cinfo->cquantize->color_quantize) (cinfo, post->buffer + old_next_row, (JSAMPARRAY)NULL, (int)num_rows); *out_row_ctr += num_rows; } /* Advance if we filled the strip. */ if (post->next_row >= post->strip_height) { post->starting_row += post->strip_height; post->next_row = 0; } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %r9, 0x10(%rsp) movl %ecx, %r9d movq %rdx, %r15 movq %rsi, %rbx movq %rdi, %r14 movq 0x238(%rdi), %rbp leaq 0x28(%rbp), %r12 cmpl $0x0, 0x28(%rbp) jne 0x2ac7d movq 0x8(%r14), %rax movq 0x10(%rbp), %rsi movl 0x24(%rbp), %edx movl 0x20(%rbp), %ecx movq %r14, %rdi movl $0x1, %r8d movl %r9d, %r13d callq *0x38(%rax) movl %r13d, %r9d movq %rax, 0x18(%rbp) movl 0x28(%rbp), %r13d movq 0x260(%r14), %rax movq 0x18(%rbp), %r8 movl 0x20(%rbp), %ecx movl %ecx, (%rsp) movq %r14, %rdi movq %rbx, %rsi movq %r15, %rdx movl %r9d, %ecx movq %r12, %r9 callq *0x8(%rax) movl 0x28(%rbp), %ebx cmpl %r13d, %ebx jbe 0x2acd2 subl %r13d, %ebx movq 0x270(%r14), %rax shlq $0x3, %r13 addq 0x18(%rbp), %r13 movq %r14, %rdi movq %r13, %rsi xorl %edx, %edx movl %ebx, %ecx callq *0x8(%rax) movq 0x10(%rsp), %rax addl %ebx, (%rax) movl 0x20(%rbp), %eax cmpl %eax, 0x28(%rbp) jb 0x2ace4 addl %eax, 0x24(%rbp) movl $0x0, 0x28(%rbp) addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jdpostct.c
jinit_upsampler
GLOBAL(void) jinit_upsampler(j_decompress_ptr cinfo) { my_upsample_ptr upsample; int ci; jpeg_component_info *compptr; boolean need_buffer, do_fancy; int h_in_group, v_in_group, h_out_group, v_out_group; if (!cinfo->master->jinit_upsampler_no_alloc) { upsample = (my_upsample_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(my_upsampler)); cinfo->upsample = (struct jpeg_upsampler *)upsample; upsample->pub.start_pass = start_pass_upsample; upsample->pub.upsample = sep_upsample; upsample->pub.need_context_rows = FALSE; /* until we find out differently */ } else upsample = (my_upsample_ptr)cinfo->upsample; if (cinfo->CCIR601_sampling) /* this isn't supported */ ERREXIT(cinfo, JERR_CCIR601_NOTIMPL); /* jdmainct.c doesn't support context rows when min_DCT_scaled_size = 1, * so don't ask for it. */ do_fancy = cinfo->do_fancy_upsampling && cinfo->_min_DCT_scaled_size > 1; /* Verify we can handle the sampling factors, select per-component methods, * and create storage as needed. */ for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { /* Compute size of an "input group" after IDCT scaling. This many samples * are to be converted to max_h_samp_factor * max_v_samp_factor pixels. */ h_in_group = (compptr->h_samp_factor * compptr->_DCT_scaled_size) / cinfo->_min_DCT_scaled_size; v_in_group = (compptr->v_samp_factor * compptr->_DCT_scaled_size) / cinfo->_min_DCT_scaled_size; h_out_group = cinfo->max_h_samp_factor; v_out_group = cinfo->max_v_samp_factor; upsample->rowgroup_height[ci] = v_in_group; /* save for use later */ need_buffer = TRUE; if (!compptr->component_needed) { /* Don't bother to upsample an uninteresting component. */ upsample->methods[ci] = noop_upsample; need_buffer = FALSE; } else if (h_in_group == h_out_group && v_in_group == v_out_group) { /* Fullsize components can be processed without any work. */ upsample->methods[ci] = fullsize_upsample; need_buffer = FALSE; } else if (h_in_group * 2 == h_out_group && v_in_group == v_out_group) { /* Special cases for 2h1v upsampling */ if (do_fancy && compptr->downsampled_width > 2) { if (jsimd_can_h2v1_fancy_upsample()) upsample->methods[ci] = jsimd_h2v1_fancy_upsample; else upsample->methods[ci] = h2v1_fancy_upsample; } else { if (jsimd_can_h2v1_upsample()) upsample->methods[ci] = jsimd_h2v1_upsample; else upsample->methods[ci] = h2v1_upsample; } } else if (h_in_group == h_out_group && v_in_group * 2 == v_out_group && do_fancy) { /* Non-fancy upsampling is handled by the generic method */ #if defined(__arm__) || defined(__aarch64__) || \ defined(_M_ARM) || defined(_M_ARM64) if (jsimd_can_h1v2_fancy_upsample()) upsample->methods[ci] = jsimd_h1v2_fancy_upsample; else #endif upsample->methods[ci] = h1v2_fancy_upsample; upsample->pub.need_context_rows = TRUE; } else if (h_in_group * 2 == h_out_group && v_in_group * 2 == v_out_group) { /* Special cases for 2h2v upsampling */ if (do_fancy && compptr->downsampled_width > 2) { if (jsimd_can_h2v2_fancy_upsample()) upsample->methods[ci] = jsimd_h2v2_fancy_upsample; else upsample->methods[ci] = h2v2_fancy_upsample; upsample->pub.need_context_rows = TRUE; } else { if (jsimd_can_h2v2_upsample()) upsample->methods[ci] = jsimd_h2v2_upsample; else upsample->methods[ci] = h2v2_upsample; } } else if ((h_out_group % h_in_group) == 0 && (v_out_group % v_in_group) == 0) { /* Generic integral-factors upsampling method */ #if defined(__mips__) if (jsimd_can_int_upsample()) upsample->methods[ci] = jsimd_int_upsample; else #endif upsample->methods[ci] = int_upsample; upsample->h_expand[ci] = (UINT8)(h_out_group / h_in_group); upsample->v_expand[ci] = (UINT8)(v_out_group / v_in_group); } else ERREXIT(cinfo, JERR_FRACT_SAMPLE_NOTIMPL); if (need_buffer && !cinfo->master->jinit_upsampler_no_alloc) { upsample->color_buf[ci] = (*cinfo->mem->alloc_sarray) ((j_common_ptr)cinfo, JPOOL_IMAGE, (JDIMENSION)jround_up((long)cinfo->output_width, (long)cinfo->max_h_samp_factor), (JDIMENSION)cinfo->max_v_samp_factor); } } }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movq %rdi, %rbx movq 0x220(%rdi), %rax cmpl $0x0, 0x6c(%rax) je 0x2addf movq 0x260(%rbx), %r14 jmp 0x2ae19 movq 0x8(%rbx), %rax movl $0x100, %edx # imm = 0x100 movq %rbx, %rdi movl $0x1, %esi callq *(%rax) movq %rax, %r14 movq %rax, 0x260(%rbx) leaq 0x289(%rip), %rax # 0x2b08c movq %rax, (%r14) leaq 0x29f(%rip), %rax # 0x2b0ac movq %rax, 0x8(%r14) movl $0x0, 0x10(%r14) cmpl $0x0, 0x188(%rbx) je 0x2ae31 movq (%rbx), %rax movl $0x19, 0x28(%rax) movq %rbx, %rdi callq *(%rax) cmpl $0x0, 0x64(%rbx) je 0x2ae44 cmpl $0x2, 0x1a0(%rbx) setge %bpl jmp 0x2ae46 xorl %ebp, %ebp cmpl $0x0, 0x38(%rbx) jle 0x2b07d movq 0x130(%rbx), %r15 leaq 0x18(%r14), %rax movq %rax, 0x10(%rsp) leaq 0x68(%r14), %r13 addq $0x30, %r15 xorl %r12d, %r12d leaq 0x368(%rip), %r9 # 0x2b1da movq %r13, 0x8(%rsp) movl -0xc(%r15), %ecx movl -0x28(%r15), %eax imull %ecx, %eax movl 0x1a0(%rbx), %edi cltd idivl %edi movl %eax, %esi imull -0x24(%r15), %ecx movl %ecx, %eax cltd idivl %edi movl %eax, %ecx movl 0x198(%rbx), %eax movl 0x19c(%rbx), %edi movl %ecx, 0xc0(%r14,%r12,4) cmpl $0x0, (%r15) movq %r13, %r8 movq %r9, %rdx je 0x2b042 cmpl %eax, %esi jne 0x2aed3 movq %r13, %r8 leaq 0x317(%rip), %rdx # 0x2b1e2 cmpl %edi, %ecx je 0x2b042 leal (%rsi,%rsi), %edx cmpl %eax, %edx jne 0x2af0b cmpl %edi, %ecx jne 0x2af0b testb %bpl, %bpl je 0x2af79 cmpl $0x3, -0x8(%r15) jb 0x2af79 callq 0x4a39b testl %eax, %eax je 0x2afd6 leaq 0x1f4ed(%rip), %rax # 0x4a3f3 jmp 0x2afdd cmpl %eax, %esi jne 0x2af3c leal (%rcx,%rcx), %r8d cmpl %edi, %r8d sete %r8b andb %bpl, %r8b cmpb $0x1, %r8b jne 0x2af3c leaq 0x3cc(%rip), %rax # 0x2b2f6 movq %rax, 0x68(%r14,%r12,8) movl $0x1, 0x10(%r14) jmp 0x2afe9 cmpl %eax, %edx jne 0x2af8b leal (%rcx,%rcx), %edx cmpl %edi, %edx jne 0x2af8b testb %bpl, %bpl je 0x2afb2 cmpl $0x3, -0x8(%r15) jb 0x2afb2 callq 0x4a35e leaq 0x43d(%rip), %rcx # 0x2b39c testl %eax, %eax je 0x2af6a leaq 0x1f46e(%rip), %rcx # 0x4a3d8 movq %rcx, 0x68(%r14,%r12,8) movl $0x1, 0x10(%r14) jmp 0x2afe2 callq 0x4a30e testl %eax, %eax je 0x2afc4 leaq 0x1f3b7(%rip), %rax # 0x4a340 jmp 0x2afdd cltd idivl %esi testl %edx, %edx jne 0x2afa1 movl %eax, %esi movl %edi, %eax cltd idivl %ecx testl %edx, %edx je 0x2b05c movq (%rbx), %rax movl $0x26, 0x28(%rax) movq %rbx, %rdi callq *(%rax) jmp 0x2afe2 callq 0x4a2fa testl %eax, %eax je 0x2afcd leaq 0x1f360(%rip), %rax # 0x4a322 jmp 0x2afdd leaq 0x2d6(%rip), %rax # 0x2b2a1 jmp 0x2afdd leaq 0x506(%rip), %rax # 0x2b4da jmp 0x2afdd leaq 0x209(%rip), %rax # 0x2b1e6 movq %rax, 0x68(%r14,%r12,8) leaq 0x1f1(%rip), %r9 # 0x2b1da movq 0x220(%rbx), %rax cmpl $0x0, 0x6c(%rax) jne 0x2b046 movq 0x8(%rbx), %rax movq %r14, %r13 movl %ebp, %r14d movq 0x10(%rax), %rbp movl 0x88(%rbx), %edi movslq 0x198(%rbx), %rsi callq 0x52c0 movl 0x19c(%rbx), %ecx movq %rbx, %rdi movl $0x1, %esi movl %eax, %edx callq *%rbp leaq 0x1ab(%rip), %r9 # 0x2b1da movl %r14d, %ebp movq %r13, %r14 movq 0x8(%rsp), %r13 movq %rax, %rdx movq 0x10(%rsp), %r8 movq %rdx, (%r8,%r12,8) incq %r12 movslq 0x38(%rbx), %rax addq $0x60, %r15 cmpq %rax, %r12 jl 0x2ae77 jmp 0x2b07d leaq 0x50c(%rip), %rcx # 0x2b56f movq %rcx, 0x68(%r14,%r12,8) movb %sil, 0xe8(%r14,%r12) movb %al, 0xf2(%r14,%r12) jmp 0x2afe9 addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jdsample.c
h2v1_fancy_upsample
METHODDEF(void) h2v1_fancy_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr, JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) { JSAMPARRAY output_data = *output_data_ptr; register JSAMPROW inptr, outptr; register int invalue; register JDIMENSION colctr; int inrow; for (inrow = 0; inrow < cinfo->max_v_samp_factor; inrow++) { inptr = input_data[inrow]; outptr = output_data[inrow]; /* Special case for first column */ invalue = *inptr++; *outptr++ = (JSAMPLE)invalue; *outptr++ = (JSAMPLE)((invalue * 3 + inptr[0] + 2) >> 2); for (colctr = compptr->downsampled_width - 2; colctr > 0; colctr--) { /* General case: 3/4 * nearer pixel + 1/4 * further pixel */ invalue = (*inptr++) * 3; *outptr++ = (JSAMPLE)((invalue + inptr[-2] + 1) >> 2); *outptr++ = (JSAMPLE)((invalue + inptr[0] + 2) >> 2); } /* Special case for last column */ invalue = *inptr; *outptr++ = (JSAMPLE)((invalue * 3 + inptr[-1] + 1) >> 2); *outptr++ = (JSAMPLE)invalue; } }
cmpl $0x0, 0x19c(%rdi) jle 0x2b2a0 pushq %rbx movq (%rcx), %rax xorl %ecx, %ecx movq (%rdx,%rcx,8), %r8 movq (%rax,%rcx,8), %r10 leaq 0x1(%r8), %r9 movzbl (%r8), %r11d movb %r11b, (%r10) leal (%r11,%r11,2), %r11d movzbl 0x1(%r8), %r8d addl %r11d, %r8d addl $0x2, %r8d shrl $0x2, %r8d movb %r8b, 0x1(%r10) movl 0x28(%rsi), %r11d leaq 0x2(%r10), %r8 addl $-0x2, %r11d je 0x2b26e movzbl (%r9), %r10d leal (%r10,%r10,2), %r10d movzbl -0x1(%r9), %ebx addl %r10d, %ebx incl %ebx shrl $0x2, %ebx movb %bl, (%r8) movzbl 0x1(%r9), %ebx incq %r9 addl %ebx, %r10d addl $0x2, %r10d shrl $0x2, %r10d movb %r10b, 0x1(%r8) addq $0x2, %r8 decl %r11d jne 0x2b232 leaq -0x2(%r8), %r10 movzbl (%r9), %r11d leal (%r11,%r11,2), %ebx movzbl -0x1(%r9), %r9d addl %ebx, %r9d incl %r9d shrl $0x2, %r9d movb %r9b, (%r8) movb %r11b, 0x3(%r10) incq %rcx movslq 0x19c(%rdi), %r8 cmpq %r8, %rcx jl 0x2b1f9 popq %rbx retq
/rui314[P]libjpeg-turbo/jdsample.c
h1v2_fancy_upsample
METHODDEF(void) h1v2_fancy_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr, JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) { JSAMPARRAY output_data = *output_data_ptr; JSAMPROW inptr0, inptr1, outptr; #if BITS_IN_JSAMPLE == 8 int thiscolsum, bias; #else JLONG thiscolsum, bias; #endif JDIMENSION colctr; int inrow, outrow, v; inrow = outrow = 0; while (outrow < cinfo->max_v_samp_factor) { for (v = 0; v < 2; v++) { /* inptr0 points to nearest input row, inptr1 points to next nearest */ inptr0 = input_data[inrow]; if (v == 0) { /* next nearest is row above */ inptr1 = input_data[inrow - 1]; bias = 1; } else { /* next nearest is row below */ inptr1 = input_data[inrow + 1]; bias = 2; } outptr = output_data[outrow++]; for (colctr = 0; colctr < compptr->downsampled_width; colctr++) { thiscolsum = (*inptr0++) * 3 + (*inptr1++); *outptr++ = (JSAMPLE)((thiscolsum + bias) >> 2); } } inrow++; } }
cmpl $0x0, 0x19c(%rdi) jle 0x2b39b pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq (%rcx), %rax xorl %ecx, %ecx xorl %r8d, %r8d leaq (%rdx,%rcx,8), %r9 movslq %r8d, %r8 xorl %r10d, %r10d cmpl $0x1, %r10d movw $0x2, %r11w sbbw $0x0, %r11w cmpl $0x0, 0x28(%rsi) je 0x2b376 xorl %r14d, %r14d testl %r10d, %r10d setne %r14b movq (%rax,%r8,8), %rbx shll $0x4, %r14d movq -0x8(%r14,%r9), %r14 movq (%r9), %r15 xorl %r12d, %r12d movzbl (%r15,%r12), %r13d leal (%r13,%r13,2), %ebp movzbl (%r14,%r12), %r13d addl %r11d, %r13d addl %ebp, %r13d shrl $0x2, %r13d movb %r13b, (%rbx,%r12) incq %r12 cmpl 0x28(%rsi), %r12d jb 0x2b350 incq %r8 leal 0x1(%r10), %r11d testl %r10d, %r10d movl %r11d, %r10d je 0x2b31f incq %rcx cmpl %r8d, 0x19c(%rdi) jg 0x2b315 popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jdsample.c
h2v2_upsample
METHODDEF(void) h2v2_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr, JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr) { JSAMPARRAY output_data = *output_data_ptr; register JSAMPROW inptr, outptr; register JSAMPLE invalue; JSAMPROW outend; int inrow, outrow; inrow = outrow = 0; while (outrow < cinfo->max_v_samp_factor) { inptr = input_data[inrow]; outptr = output_data[outrow]; outend = outptr + cinfo->output_width; while (outptr < outend) { invalue = *inptr++; *outptr++ = invalue; *outptr++ = invalue; } jcopy_sample_rows(output_data, outrow, output_data, outrow + 1, 1, cinfo->output_width); inrow++; outrow += 2; } }
cmpl $0x0, 0x19c(%rdi) jle 0x2b56e pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx movq %rdx, %rbx movq %rdi, %r14 movq (%rcx), %r15 xorl %r13d, %r13d xorl %r12d, %r12d movl 0x88(%r14), %eax testq %rax, %rax je 0x2b534 movq (%r15,%r12,8), %rcx addq %rcx, %rax movq (%rbx,%r13,8), %rdx xorl %esi, %esi movb (%rdx,%rsi), %dil leaq (%rcx,%rsi,2), %r8 addq $0x2, %r8 movb %dil, -0x2(%r8) movb %dil, -0x1(%r8) incq %rsi cmpq %rax, %r8 jb 0x2b518 movl %r12d, %ecx orl $0x1, %ecx movl 0x88(%r14), %r9d movq %r15, %rdi movl %r12d, %esi movq %r15, %rdx movl $0x1, %r8d callq 0x53f0 incq %r13 addq $0x2, %r12 cmpl %r12d, 0x19c(%r14) jg 0x2b4ff popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq
/rui314[P]libjpeg-turbo/jdsample.c
jpeg_read_coefficients
GLOBAL(jvirt_barray_ptr *) jpeg_read_coefficients(j_decompress_ptr cinfo) { if (cinfo->global_state == DSTATE_READY) { /* First call: initialize active modules */ transdecode_master_selection(cinfo); cinfo->global_state = DSTATE_RDCOEFS; } if (cinfo->global_state == DSTATE_RDCOEFS) { /* Absorb whole file into the coef buffer */ for (;;) { int retcode; /* Call progress monitor hook if present */ if (cinfo->progress != NULL) (*cinfo->progress->progress_monitor) ((j_common_ptr)cinfo); /* Absorb some more input */ retcode = (*cinfo->inputctl->consume_input) (cinfo); if (retcode == JPEG_SUSPENDED) return NULL; if (retcode == JPEG_REACHED_EOI) break; /* Advance progress counter if appropriate */ if (cinfo->progress != NULL && (retcode == JPEG_ROW_COMPLETED || retcode == JPEG_REACHED_SOS)) { if (++cinfo->progress->pass_counter >= cinfo->progress->pass_limit) { /* startup underestimated number of scans; ratchet up one scan */ cinfo->progress->pass_limit += (long)cinfo->total_iMCU_rows; } } } /* Set state so that jpeg_finish_decompress does the right thing */ cinfo->global_state = DSTATE_STOPPING; } /* At this point we should be in state DSTATE_STOPPING if being used * standalone, or in state DSTATE_BUFIMAGE if being invoked to get access * to the coefficients during a full buffered-image-mode decompression. */ if ((cinfo->global_state == DSTATE_STOPPING || cinfo->global_state == DSTATE_BUFIMAGE) && cinfo->buffered_image) { return cinfo->coef->coef_arrays; } /* Oops, improper usage */ ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); return NULL; /* keep compiler happy */ }
pushq %rbx movq %rdi, %rbx cmpl $0xca, 0x24(%rdi) jne 0x2b744 movl $0x1, 0x58(%rbx) cmpl $0x0, 0x13c(%rbx) je 0x2b6a3 movq %rbx, %rdi callq 0x5090 jmp 0x2b6be cmpl $0x0, 0x138(%rbx) je 0x2b6b6 movq %rbx, %rdi callq 0x51e0 jmp 0x2b6be movq %rbx, %rdi callq 0x50b0 movq %rbx, %rdi movl $0x1, %esi callq 0x5050 movq 0x8(%rbx), %rax movq %rbx, %rdi callq *0x30(%rax) movq 0x240(%rbx), %rax movq %rbx, %rdi callq *0x10(%rax) movq 0x10(%rbx), %rax testq %rax, %rax je 0x2b73d cmpl $0x0, 0x138(%rbx) je 0x2b6ff movl 0x38(%rbx), %ecx leal (%rcx,%rcx,2), %ecx addl $0x2, %ecx jmp 0x2b716 movq 0x240(%rbx), %rcx cmpl $0x0, 0x20(%rcx) je 0x2b711 movl 0x38(%rbx), %ecx jmp 0x2b716 movl $0x1, %ecx movq $0x0, 0x8(%rax) movl 0x1a4(%rbx), %edx movslq %ecx, %rcx imulq %rdx, %rcx movq %rcx, 0x10(%rax) movabsq $0x100000000, %rcx # imm = 0x100000000 movq %rcx, 0x18(%rax) movl $0xd1, 0x24(%rbx) cmpl $0xd1, 0x24(%rbx) jne 0x2b7ca movq 0x10(%rbx), %rax testq %rax, %rax je 0x2b75b movq %rbx, %rdi callq *(%rax) movq 0x240(%rbx), %rax movq %rbx, %rdi callq *(%rax) testl %eax, %eax je 0x2b777 cmpl $0x2, %eax jne 0x2b77e movl $0x2, %eax jmp 0x2b7ba movl $0x1, %eax jmp 0x2b7ba movq 0x10(%rbx), %rcx testq %rcx, %rcx sete %dl andl $-0x3, %eax cmpl $0x1, %eax setne %sil xorl %eax, %eax orb %dl, %sil jne 0x2b7ba movq 0x8(%rcx), %rsi movq 0x10(%rcx), %rdx incq %rsi movq %rsi, 0x8(%rcx) cmpq %rdx, %rsi jl 0x2b7ba movl 0x1a4(%rbx), %esi addq %rsi, %rdx movq %rdx, 0x10(%rcx) testl %eax, %eax je 0x2b74d cmpl $0x1, %eax je 0x2b803 movl $0xd2, 0x24(%rbx) movl 0x24(%rbx), %eax cmpl $0xd2, %eax je 0x2b7db cmpl $0xcf, %eax jne 0x2b7ee cmpl $0x0, 0x58(%rbx) je 0x2b7ee movq 0x230(%rbx), %rax movq 0x20(%rax), %rax jmp 0x2b805 movq (%rbx), %rcx movl $0x14, 0x28(%rcx) movl %eax, 0x2c(%rcx) movq (%rbx), %rax movq %rbx, %rdi callq *(%rax) xorl %eax, %eax popq %rbx retq nop
/rui314[P]libjpeg-turbo/jdtrans.c
jpeg_fdct_float
GLOBAL(void) jpeg_fdct_float(FAST_FLOAT *data) { FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; FAST_FLOAT tmp10, tmp11, tmp12, tmp13; FAST_FLOAT z1, z2, z3, z4, z5, z11, z13; FAST_FLOAT *dataptr; int ctr; /* Pass 1: process rows. */ dataptr = data; for (ctr = DCTSIZE - 1; ctr >= 0; ctr--) { tmp0 = dataptr[0] + dataptr[7]; tmp7 = dataptr[0] - dataptr[7]; tmp1 = dataptr[1] + dataptr[6]; tmp6 = dataptr[1] - dataptr[6]; tmp2 = dataptr[2] + dataptr[5]; tmp5 = dataptr[2] - dataptr[5]; tmp3 = dataptr[3] + dataptr[4]; tmp4 = dataptr[3] - dataptr[4]; /* Even part */ tmp10 = tmp0 + tmp3; /* phase 2 */ tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[0] = tmp10 + tmp11; /* phase 3 */ dataptr[4] = tmp10 - tmp11; z1 = (tmp12 + tmp13) * ((FAST_FLOAT)0.707106781); /* c4 */ dataptr[2] = tmp13 + z1; /* phase 5 */ dataptr[6] = tmp13 - z1; /* Odd part */ tmp10 = tmp4 + tmp5; /* phase 2 */ tmp11 = tmp5 + tmp6; tmp12 = tmp6 + tmp7; /* The rotator is modified from fig 4-8 to avoid extra negations. */ z5 = (tmp10 - tmp12) * ((FAST_FLOAT)0.382683433); /* c6 */ z2 = ((FAST_FLOAT)0.541196100) * tmp10 + z5; /* c2-c6 */ z4 = ((FAST_FLOAT)1.306562965) * tmp12 + z5; /* c2+c6 */ z3 = tmp11 * ((FAST_FLOAT)0.707106781); /* c4 */ z11 = tmp7 + z3; /* phase 5 */ z13 = tmp7 - z3; dataptr[5] = z13 + z2; /* phase 6 */ dataptr[3] = z13 - z2; dataptr[1] = z11 + z4; dataptr[7] = z11 - z4; dataptr += DCTSIZE; /* advance pointer to next row */ } /* Pass 2: process columns. */ dataptr = data; for (ctr = DCTSIZE - 1; ctr >= 0; ctr--) { tmp0 = dataptr[DCTSIZE * 0] + dataptr[DCTSIZE * 7]; tmp7 = dataptr[DCTSIZE * 0] - dataptr[DCTSIZE * 7]; tmp1 = dataptr[DCTSIZE * 1] + dataptr[DCTSIZE * 6]; tmp6 = dataptr[DCTSIZE * 1] - dataptr[DCTSIZE * 6]; tmp2 = dataptr[DCTSIZE * 2] + dataptr[DCTSIZE * 5]; tmp5 = dataptr[DCTSIZE * 2] - dataptr[DCTSIZE * 5]; tmp3 = dataptr[DCTSIZE * 3] + dataptr[DCTSIZE * 4]; tmp4 = dataptr[DCTSIZE * 3] - dataptr[DCTSIZE * 4]; /* Even part */ tmp10 = tmp0 + tmp3; /* phase 2 */ tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ dataptr[DCTSIZE * 4] = tmp10 - tmp11; z1 = (tmp12 + tmp13) * ((FAST_FLOAT)0.707106781); /* c4 */ dataptr[DCTSIZE * 2] = tmp13 + z1; /* phase 5 */ dataptr[DCTSIZE * 6] = tmp13 - z1; /* Odd part */ tmp10 = tmp4 + tmp5; /* phase 2 */ tmp11 = tmp5 + tmp6; tmp12 = tmp6 + tmp7; /* The rotator is modified from fig 4-8 to avoid extra negations. */ z5 = (tmp10 - tmp12) * ((FAST_FLOAT)0.382683433); /* c6 */ z2 = ((FAST_FLOAT)0.541196100) * tmp10 + z5; /* c2-c6 */ z4 = ((FAST_FLOAT)1.306562965) * tmp12 + z5; /* c2+c6 */ z3 = tmp11 * ((FAST_FLOAT)0.707106781); /* c4 */ z11 = tmp7 + z3; /* phase 5 */ z13 = tmp7 - z3; dataptr[DCTSIZE * 5] = z13 + z2; /* phase 6 */ dataptr[DCTSIZE * 3] = z13 - z2; dataptr[DCTSIZE * 1] = z11 + z4; dataptr[DCTSIZE * 7] = z11 - z4; dataptr++; /* advance pointer to next column */ } }
xorl %eax, %eax movaps 0x4170b(%rip), %xmm0 # 0x6d0c0 movups 0x50(%rdi,%rax), %xmm6 movups 0x70(%rdi,%rax), %xmm10 movups (%rdi,%rax), %xmm12 movups 0x10(%rdi,%rax), %xmm11 movups 0x20(%rdi,%rax), %xmm5 movups 0x30(%rdi,%rax), %xmm9 movups 0x40(%rdi,%rax), %xmm13 movups 0x60(%rdi,%rax), %xmm14 movaps %xmm14, %xmm7 movlhps %xmm13, %xmm7 # xmm7 = xmm7[0],xmm13[0] movaps %xmm12, %xmm4 unpcklps %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] shufps $0x24, %xmm7, %xmm4 # xmm4 = xmm4[0,1],xmm7[2,0] movaps %xmm13, %xmm8 unpcklps %xmm14, %xmm8 # xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1] movaps %xmm5, %xmm7 shufps $0x11, %xmm12, %xmm7 # xmm7 = xmm7[1,0],xmm12[1,0] shufps $0xe2, %xmm8, %xmm7 # xmm7 = xmm7[2,0],xmm8[2,3] movaps %xmm14, %xmm15 unpckhpd %xmm13, %xmm15 # xmm15 = xmm15[1],xmm13[1] movaps %xmm12, %xmm8 unpckhps %xmm5, %xmm8 # xmm8 = xmm8[2],xmm5[2],xmm8[3],xmm5[3] shufps $0x24, %xmm15, %xmm8 # xmm8 = xmm8[0,1],xmm15[2,0] unpckhps %xmm14, %xmm13 # xmm13 = xmm13[2],xmm14[2],xmm13[3],xmm14[3] shufps $0x33, %xmm12, %xmm5 # xmm5 = xmm5[3,0],xmm12[3,0] shufps $0xe2, %xmm13, %xmm5 # xmm5 = xmm5[2,0],xmm13[2,3] movaps %xmm10, %xmm13 movlhps %xmm6, %xmm13 # xmm13 = xmm13[0],xmm6[0] movaps %xmm11, %xmm12 unpcklps %xmm9, %xmm12 # xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1] shufps $0x24, %xmm13, %xmm12 # xmm12 = xmm12[0,1],xmm13[2,0] movaps %xmm6, %xmm13 unpcklps %xmm10, %xmm13 # xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1] movaps %xmm9, %xmm14 shufps $0x11, %xmm11, %xmm14 # xmm14 = xmm14[1,0],xmm11[1,0] shufps $0xe2, %xmm13, %xmm14 # xmm14 = xmm14[2,0],xmm13[2,3] movaps %xmm10, %xmm13 unpckhpd %xmm6, %xmm13 # xmm13 = xmm13[1],xmm6[1] movaps %xmm11, %xmm15 unpckhps %xmm9, %xmm15 # xmm15 = xmm15[2],xmm9[2],xmm15[3],xmm9[3] shufps $0x24, %xmm13, %xmm15 # xmm15 = xmm15[0,1],xmm13[2,0] unpckhps %xmm10, %xmm6 # xmm6 = xmm6[2],xmm10[2],xmm6[3],xmm10[3] shufps $0x33, %xmm11, %xmm9 # xmm9 = xmm9[3,0],xmm11[3,0] shufps $0xe2, %xmm6, %xmm9 # xmm9 = xmm9[2,0],xmm6[2,3] movaps %xmm4, %xmm6 addps %xmm9, %xmm6 subps %xmm9, %xmm4 movaps %xmm7, %xmm13 addps %xmm15, %xmm13 subps %xmm15, %xmm7 movaps %xmm8, %xmm10 addps %xmm14, %xmm10 subps %xmm14, %xmm8 movaps %xmm5, %xmm11 addps %xmm12, %xmm11 subps %xmm12, %xmm5 movaps %xmm6, %xmm2 addps %xmm11, %xmm2 subps %xmm11, %xmm6 movaps %xmm13, %xmm11 addps %xmm10, %xmm11 subps %xmm10, %xmm13 movaps %xmm11, %xmm3 addps %xmm2, %xmm3 subps %xmm11, %xmm2 addps %xmm6, %xmm13 mulps %xmm0, %xmm13 movaps %xmm6, %xmm11 addps %xmm13, %xmm11 subps %xmm13, %xmm6 addps %xmm8, %xmm5 addps %xmm7, %xmm8 addps %xmm4, %xmm7 movaps %xmm5, %xmm12 subps %xmm7, %xmm12 mulps 0x415d3(%rip), %xmm12 # 0x6d0d0 mulps 0x415dc(%rip), %xmm5 # 0x6d0e0 addps %xmm12, %xmm5 mulps 0x415e1(%rip), %xmm7 # 0x6d0f0 addps %xmm12, %xmm7 mulps %xmm0, %xmm8 movaps %xmm4, %xmm13 addps %xmm8, %xmm13 subps %xmm8, %xmm4 movaps %xmm4, %xmm8 addps %xmm5, %xmm8 subps %xmm5, %xmm4 movaps %xmm13, %xmm12 addps %xmm7, %xmm12 subps %xmm7, %xmm13 movaps %xmm13, %xmm7 movlhps %xmm6, %xmm7 # xmm7 = xmm7[0],xmm6[0] movaps %xmm2, %xmm5 unpcklps %xmm8, %xmm5 # xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1] shufps $0x24, %xmm7, %xmm5 # xmm5 = xmm5[0,1],xmm7[2,0] movaps %xmm4, %xmm14 movlhps %xmm11, %xmm14 # xmm14 = xmm14[0],xmm11[0] movaps %xmm3, %xmm7 unpcklps %xmm12, %xmm7 # xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1] shufps $0x24, %xmm14, %xmm7 # xmm7 = xmm7[0,1],xmm14[2,0] movaps %xmm6, %xmm15 unpcklps %xmm13, %xmm15 # xmm15 = xmm15[0],xmm13[0],xmm15[1],xmm13[1] movaps %xmm8, %xmm14 shufps $0x11, %xmm2, %xmm14 # xmm14 = xmm14[1,0],xmm2[1,0] shufps $0xe2, %xmm15, %xmm14 # xmm14 = xmm14[2,0],xmm15[2,3] movaps %xmm11, %xmm15 unpcklps %xmm4, %xmm15 # xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1] movaps %xmm12, %xmm1 shufps $0x11, %xmm3, %xmm1 # xmm1 = xmm1[1,0],xmm3[1,0] shufps $0xe2, %xmm15, %xmm1 # xmm1 = xmm1[2,0],xmm15[2,3] movaps %xmm13, %xmm15 unpckhpd %xmm6, %xmm15 # xmm15 = xmm15[1],xmm6[1] movaps %xmm2, %xmm9 unpckhps %xmm8, %xmm9 # xmm9 = xmm9[2],xmm8[2],xmm9[3],xmm8[3] shufps $0x24, %xmm15, %xmm9 # xmm9 = xmm9[0,1],xmm15[2,0] movaps %xmm4, %xmm15 unpckhpd %xmm11, %xmm15 # xmm15 = xmm15[1],xmm11[1] movaps %xmm3, %xmm10 unpckhps %xmm12, %xmm10 # xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3] shufps $0x24, %xmm15, %xmm10 # xmm10 = xmm10[0,1],xmm15[2,0] unpckhps %xmm13, %xmm6 # xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3] shufps $0x33, %xmm2, %xmm8 # xmm8 = xmm8[3,0],xmm2[3,0] shufps $0xe2, %xmm6, %xmm8 # xmm8 = xmm8[2,0],xmm6[2,3] unpckhps %xmm4, %xmm11 # xmm11 = xmm11[2],xmm4[2],xmm11[3],xmm4[3] shufps $0x33, %xmm3, %xmm12 # xmm12 = xmm12[3,0],xmm3[3,0] shufps $0xe2, %xmm11, %xmm12 # xmm12 = xmm12[2,0],xmm11[2,3] movups %xmm12, 0x60(%rdi,%rax) movups %xmm8, 0x70(%rdi,%rax) movups %xmm10, 0x40(%rdi,%rax) movups %xmm9, 0x50(%rdi,%rax) movups %xmm1, 0x20(%rdi,%rax) movups %xmm14, 0x30(%rdi,%rax) movups %xmm7, (%rdi,%rax) movups %xmm5, 0x10(%rdi,%rax) subq $-0x80, %rax cmpq $0x100, %rax # imm = 0x100 jne 0x2b9b5 movq $-0x8, %rax movaps 0x414b2(%rip), %xmm10 # 0x6d0d0 movaps 0x414ba(%rip), %xmm11 # 0x6d0e0 movaps 0x414c2(%rip), %xmm12 # 0x6d0f0 movups 0x100(%rdi,%rax,4), %xmm1 movups 0x20(%rdi,%rax,4), %xmm5 movups 0x40(%rdi,%rax,4), %xmm4 movups 0x60(%rdi,%rax,4), %xmm7 movups 0x80(%rdi,%rax,4), %xmm6 movaps %xmm5, %xmm8 addps %xmm1, %xmm8 subps %xmm1, %xmm5 movups 0xe0(%rdi,%rax,4), %xmm1 movaps %xmm4, %xmm9 addps %xmm1, %xmm9 subps %xmm1, %xmm4 movups 0xc0(%rdi,%rax,4), %xmm1 movaps %xmm7, %xmm2 addps %xmm1, %xmm2 subps %xmm1, %xmm7 movups 0xa0(%rdi,%rax,4), %xmm1 movaps %xmm6, %xmm3 addps %xmm1, %xmm3 subps %xmm1, %xmm6 movaps %xmm8, %xmm1 addps %xmm3, %xmm1 subps %xmm3, %xmm8 movaps %xmm9, %xmm3 addps %xmm2, %xmm3 subps %xmm2, %xmm9 movaps %xmm3, %xmm2 addps %xmm1, %xmm2 movups %xmm2, 0x20(%rdi,%rax,4) subps %xmm3, %xmm1 movups %xmm1, 0xa0(%rdi,%rax,4) addps %xmm8, %xmm9 mulps %xmm0, %xmm9 movaps %xmm8, %xmm1 addps %xmm9, %xmm1 movups %xmm1, 0x60(%rdi,%rax,4) subps %xmm9, %xmm8 movups %xmm8, 0xe0(%rdi,%rax,4) addps %xmm7, %xmm6 addps %xmm4, %xmm7 addps %xmm5, %xmm4 movaps %xmm6, %xmm1 subps %xmm4, %xmm1 mulps %xmm10, %xmm1 mulps %xmm11, %xmm6 addps %xmm1, %xmm6 mulps %xmm12, %xmm4 addps %xmm1, %xmm4 mulps %xmm0, %xmm7 movaps %xmm5, %xmm1 addps %xmm7, %xmm1 subps %xmm7, %xmm5 movaps %xmm5, %xmm2 addps %xmm6, %xmm2 movups %xmm2, 0xc0(%rdi,%rax,4) subps %xmm6, %xmm5 movups %xmm5, 0x80(%rdi,%rax,4) movaps %xmm1, %xmm2 addps %xmm4, %xmm2 movups %xmm2, 0x40(%rdi,%rax,4) subps %xmm4, %xmm1 movups %xmm1, 0x100(%rdi,%rax,4) addq $0x4, %rax jne 0x2bc2e retq nop
/rui314[P]libjpeg-turbo/jfdctflt.c
jpeg_fdct_islow
GLOBAL(void) jpeg_fdct_islow(DCTELEM *data) { JLONG tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; JLONG tmp10, tmp11, tmp12, tmp13; JLONG z1, z2, z3, z4, z5; DCTELEM *dataptr; int ctr; SHIFT_TEMPS /* Pass 1: process rows. */ /* Note results are scaled up by sqrt(8) compared to a true DCT; */ /* furthermore, we scale the results by 2**PASS1_BITS. */ dataptr = data; for (ctr = DCTSIZE - 1; ctr >= 0; ctr--) { tmp0 = dataptr[0] + dataptr[7]; tmp7 = dataptr[0] - dataptr[7]; tmp1 = dataptr[1] + dataptr[6]; tmp6 = dataptr[1] - dataptr[6]; tmp2 = dataptr[2] + dataptr[5]; tmp5 = dataptr[2] - dataptr[5]; tmp3 = dataptr[3] + dataptr[4]; tmp4 = dataptr[3] - dataptr[4]; /* Even part per LL&M figure 1 --- note that published figure is faulty; * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". */ tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[0] = (DCTELEM)LEFT_SHIFT(tmp10 + tmp11, PASS1_BITS); dataptr[4] = (DCTELEM)LEFT_SHIFT(tmp10 - tmp11, PASS1_BITS); z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); dataptr[2] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), CONST_BITS - PASS1_BITS); dataptr[6] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp12, -FIX_1_847759065), CONST_BITS - PASS1_BITS); /* Odd part per figure 8 --- note paper omits factor of sqrt(2). * cK represents cos(K*pi/16). * i0..i3 in the paper are tmp4..tmp7 here. */ z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; z3 = tmp4 + tmp6; z4 = tmp5 + tmp7; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, -FIX_0_899976223); /* sqrt(2) * ( c7-c3) */ z2 = MULTIPLY(z2, -FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, -FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, -FIX_0_390180644); /* sqrt(2) * ( c5-c3) */ z3 += z5; z4 += z5; dataptr[7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS - PASS1_BITS); dataptr[5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS - PASS1_BITS); dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS - PASS1_BITS); dataptr[1] = (DCTELEM)DESCALE(tmp7 + z1 + z4, CONST_BITS - PASS1_BITS); dataptr += DCTSIZE; /* advance pointer to next row */ } /* Pass 2: process columns. * We remove the PASS1_BITS scaling, but leave the results scaled up * by an overall factor of 8. */ dataptr = data; for (ctr = DCTSIZE - 1; ctr >= 0; ctr--) { tmp0 = dataptr[DCTSIZE * 0] + dataptr[DCTSIZE * 7]; tmp7 = dataptr[DCTSIZE * 0] - dataptr[DCTSIZE * 7]; tmp1 = dataptr[DCTSIZE * 1] + dataptr[DCTSIZE * 6]; tmp6 = dataptr[DCTSIZE * 1] - dataptr[DCTSIZE * 6]; tmp2 = dataptr[DCTSIZE * 2] + dataptr[DCTSIZE * 5]; tmp5 = dataptr[DCTSIZE * 2] - dataptr[DCTSIZE * 5]; tmp3 = dataptr[DCTSIZE * 3] + dataptr[DCTSIZE * 4]; tmp4 = dataptr[DCTSIZE * 3] - dataptr[DCTSIZE * 4]; /* Even part per LL&M figure 1 --- note that published figure is faulty; * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". */ tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[DCTSIZE * 0] = (DCTELEM)DESCALE(tmp10 + tmp11, PASS1_BITS); dataptr[DCTSIZE * 4] = (DCTELEM)DESCALE(tmp10 - tmp11, PASS1_BITS); z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); dataptr[DCTSIZE * 2] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), CONST_BITS + PASS1_BITS); dataptr[DCTSIZE * 6] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp12, -FIX_1_847759065), CONST_BITS + PASS1_BITS); /* Odd part per figure 8 --- note paper omits factor of sqrt(2). * cK represents cos(K*pi/16). * i0..i3 in the paper are tmp4..tmp7 here. */ z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; z3 = tmp4 + tmp6; z4 = tmp5 + tmp7; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, -FIX_0_899976223); /* sqrt(2) * ( c7-c3) */ z2 = MULTIPLY(z2, -FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, -FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, -FIX_0_390180644); /* sqrt(2) * ( c5-c3) */ z3 += z5; z4 += z5; dataptr[DCTSIZE * 7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS + PASS1_BITS); dataptr[DCTSIZE * 5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS + PASS1_BITS); dataptr[DCTSIZE * 3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS + PASS1_BITS); dataptr[DCTSIZE * 1] = (DCTELEM)DESCALE(tmp7 + z1 + z4, CONST_BITS + PASS1_BITS); dataptr++; /* advance pointer to next column */ } }
pushq %rbp pushq %r15 pushq %r14 pushq %rbx subq $0x1a8, %rsp # imm = 0x1A8 xorl %eax, %eax movswl (%rdi,%rax), %ecx movswl 0xe(%rdi,%rax), %edx leal (%rdx,%rcx), %r10d subl %edx, %ecx movswl 0x2(%rdi,%rax), %edx movswl 0xc(%rdi,%rax), %esi leal (%rsi,%rdx), %r9d subl %esi, %edx movswl 0x4(%rdi,%rax), %esi movswl 0xa(%rdi,%rax), %r8d leal (%r8,%rsi), %r11d subl %r8d, %esi movswl 0x6(%rdi,%rax), %r8d movswl 0x8(%rdi,%rax), %ebx leal (%rbx,%r8), %r14d subl %ebx, %r8d leal (%r14,%r10), %ebx subl %r14d, %r10d leal (%r11,%r9), %r14d subl %r11d, %r9d leal (%rbx,%r14), %r11d shll $0x2, %r11d movw %r11w, (%rdi,%rax) subl %r14d, %ebx shll $0x2, %ebx movw %bx, 0x8(%rdi,%rax) leal (%r10,%r9), %r11d imull $0x1151, %r11d, %r11d # imm = 0x1151 imull $0x187e, %r10d, %r10d # imm = 0x187E addl %r11d, %r10d addl $0x400, %r10d # imm = 0x400 shrl $0xb, %r10d movw %r10w, 0x4(%rdi,%rax) imull $0x7ffc4df, %r9d, %r9d # imm = 0x7FFC4DF addl %r11d, %r9d addl $0x400, %r9d # imm = 0x400 shrl $0xb, %r9d movw %r9w, 0xc(%rdi,%rax) leal (%r8,%rcx), %r9d leal (%rsi,%rdx), %r10d leal (%r8,%rdx), %r11d leal (%rsi,%rcx), %ebx leal (%r11,%rbx), %ebp imull $0x25a1, %ebp, %ebp # imm = 0x25A1 imull $0x98e, %r8d, %r8d # imm = 0x98E imull $0x41b3, %esi, %r14d # imm = 0x41B3 imull $0x6254, %edx, %r15d # imm = 0x6254 imull $0x300b, %ecx, %ecx # imm = 0x300B imull $0xffffe333, %r9d, %edx # imm = 0xFFFFE333 imull $0xffffadfd, %r10d, %r9d # imm = 0xFFFFADFD imull $0xffffc13b, %r11d, %r10d # imm = 0xFFFFC13B imull $0xfffff384, %ebx, %esi # imm = 0xFFFFF384 addl %ebp, %r10d addl %ebp, %esi addl %edx, %r8d addl $0x400, %r8d # imm = 0x400 addl %r10d, %r8d shrl $0xb, %r8d movw %r8w, 0xe(%rdi,%rax) leal (%r9,%r14), %r8d addl $0x400, %r8d # imm = 0x400 addl %esi, %r8d shrl $0xb, %r8d movw %r8w, 0xa(%rdi,%rax) leal (%r9,%r15), %r8d addl $0x400, %r8d # imm = 0x400 addl %r10d, %r8d shrl $0xb, %r8d movw %r8w, 0x6(%rdi,%rax) addl %edx, %ecx addl $0x400, %ecx # imm = 0x400 addl %esi, %ecx shrl $0xb, %ecx movw %cx, 0x2(%rdi,%rax) addq $0x10, %rax cmpl $0x80, %eax jne 0x2c5ab movdqu (%rdi), %xmm1 movdqu 0x10(%rdi), %xmm0 movdqu 0x20(%rdi), %xmm5 movdqu 0x30(%rdi), %xmm6 punpcklwd %xmm1, %xmm12 # xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1],xmm12[2],xmm1[2],xmm12[3],xmm1[3] psrad $0x10, %xmm12 punpckhwd %xmm1, %xmm9 # xmm9 = xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7] psrad $0x10, %xmm9 movdqu 0x70(%rdi), %xmm1 punpcklwd %xmm1, %xmm4 # xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] psrad $0x10, %xmm4 punpckhwd %xmm1, %xmm1 # xmm1 = xmm1[4,4,5,5,6,6,7,7] psrad $0x10, %xmm1 movdqa %xmm1, %xmm2 paddd %xmm9, %xmm2 movdqa %xmm4, %xmm3 paddd %xmm12, %xmm3 psubd %xmm1, %xmm9 psubd %xmm4, %xmm12 punpcklwd %xmm0, %xmm15 # xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3] psrad $0x10, %xmm15 punpckhwd %xmm0, %xmm14 # xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7] psrad $0x10, %xmm14 movdqu 0x60(%rdi), %xmm0 punpcklwd %xmm0, %xmm4 # xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] psrad $0x10, %xmm4 punpckhwd %xmm0, %xmm0 # xmm0 = xmm0[4,4,5,5,6,6,7,7] psrad $0x10, %xmm0 movdqa %xmm0, %xmm1 paddd %xmm14, %xmm1 movdqa %xmm1, -0x80(%rsp) movdqa %xmm4, %xmm8 paddd %xmm15, %xmm8 psubd %xmm0, %xmm14 psubd %xmm4, %xmm15 punpcklwd %xmm5, %xmm0 # xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] psrad $0x10, %xmm0 movdqa %xmm0, %xmm1 punpckhwd %xmm5, %xmm4 # xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] psrad $0x10, %xmm4 movdqu 0x50(%rdi), %xmm0 punpcklwd %xmm0, %xmm5 # xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3] psrad $0x10, %xmm5 punpckhwd %xmm0, %xmm0 # xmm0 = xmm0[4,4,5,5,6,6,7,7] psrad $0x10, %xmm0 movdqa %xmm0, %xmm7 paddd %xmm4, %xmm7 movdqa %xmm7, -0x30(%rsp) movdqa %xmm5, %xmm7 paddd %xmm1, %xmm7 movdqa %xmm7, -0x50(%rsp) psubd %xmm0, %xmm4 psubd %xmm5, %xmm1 movdqa %xmm1, (%rsp) punpckhwd %xmm6, %xmm1 # xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] psrad $0x10, %xmm1 movdqu 0x40(%rdi), %xmm0 punpckhwd %xmm0, %xmm5 # xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] psrad $0x10, %xmm5 movdqa %xmm5, %xmm11 paddd %xmm1, %xmm11 psubd %xmm5, %xmm1 punpcklwd %xmm6, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3] psrad $0x10, %xmm5 punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3] psrad $0x10, %xmm0 movdqa %xmm0, %xmm13 paddd %xmm5, %xmm13 psubd %xmm0, %xmm5 pxor %xmm10, %xmm10 movdqa %xmm3, %xmm7 punpckhdq %xmm10, %xmm7 # xmm7 = xmm7[2],xmm10[2],xmm7[3],xmm10[3] punpckldq %xmm10, %xmm3 # xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1] movdqa %xmm2, %xmm6 punpckhdq %xmm10, %xmm6 # xmm6 = xmm6[2],xmm10[2],xmm6[3],xmm10[3] punpckldq %xmm10, %xmm2 # xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1] movdqa %xmm12, %xmm0 punpckhdq %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] movdqa %xmm0, 0xf0(%rsp) punpckldq %xmm10, %xmm12 # xmm12 = xmm12[0],xmm10[0],xmm12[1],xmm10[1] movdqa %xmm12, 0x30(%rsp) movdqa %xmm9, %xmm0 punpckhdq %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] movdqa %xmm0, 0x60(%rsp) punpckldq %xmm10, %xmm9 # xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1] movdqa %xmm9, 0x50(%rsp) movdqa %xmm8, %xmm0 punpckhdq %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] movdqa %xmm0, -0x60(%rsp) punpckldq %xmm10, %xmm8 # xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1] movdqa -0x80(%rsp), %xmm0 movdqa %xmm0, %xmm9 punpckhdq %xmm10, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3] movdqa %xmm9, -0x70(%rsp) punpckldq %xmm10, %xmm0 # xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1] movdqa %xmm0, -0x80(%rsp) movdqa %xmm15, %xmm0 punpckhdq %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] movdqa %xmm0, 0xe0(%rsp) punpckldq %xmm10, %xmm15 # xmm15 = xmm15[0],xmm10[0],xmm15[1],xmm10[1] movdqa %xmm15, 0x10(%rsp) movdqa %xmm14, %xmm0 punpckhdq %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] movdqa %xmm0, 0x20(%rsp) punpckldq %xmm10, %xmm14 # xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1] movdqa %xmm14, 0x40(%rsp) movdqa -0x50(%rsp), %xmm9 movdqa %xmm9, %xmm0 punpckhdq %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] movdqa %xmm0, -0x10(%rsp) punpckldq %xmm10, %xmm9 # xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1] movdqa %xmm9, -0x50(%rsp) movdqa -0x30(%rsp), %xmm14 movdqa %xmm14, %xmm9 punpckhdq %xmm10, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3] punpckldq %xmm10, %xmm14 # xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1] movdqa (%rsp), %xmm0 movdqa %xmm0, %xmm12 punpckhdq %xmm10, %xmm12 # xmm12 = xmm12[2],xmm10[2],xmm12[3],xmm10[3] movdqa %xmm12, 0xd0(%rsp) punpckldq %xmm10, %xmm0 # xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1] movdqa %xmm0, (%rsp) movdqa %xmm4, %xmm0 punpckhdq %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] movdqa %xmm0, 0xa0(%rsp) punpckldq %xmm10, %xmm4 # xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1] movdqa %xmm4, -0x30(%rsp) movdqa %xmm13, %xmm0 punpckhdq %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] punpckldq %xmm10, %xmm13 # xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1] movdqa %xmm11, %xmm15 punpckhdq %xmm10, %xmm15 # xmm15 = xmm15[2],xmm10[2],xmm15[3],xmm10[3] punpckldq %xmm10, %xmm11 # xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1] movdqa %xmm5, %xmm4 punpckhdq %xmm10, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3] movdqa %xmm4, 0xc0(%rsp) punpckldq %xmm10, %xmm5 # xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1] movdqa %xmm5, 0xb0(%rsp) movdqa %xmm1, %xmm4 punpckhdq %xmm10, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3] movdqa %xmm4, 0x90(%rsp) punpckldq %xmm10, %xmm1 # xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] movdqa %xmm1, 0x80(%rsp) movdqa %xmm11, %xmm10 paddq %xmm2, %xmm10 psubq %xmm11, %xmm2 movdqa %xmm15, %xmm12 paddq %xmm6, %xmm12 psubq %xmm15, %xmm6 movdqa %xmm13, %xmm11 paddq %xmm3, %xmm11 psubq %xmm13, %xmm3 movdqa %xmm0, %xmm13 paddq %xmm7, %xmm13 psubq %xmm0, %xmm7 movdqa %xmm14, %xmm1 movdqa -0x80(%rsp), %xmm0 paddq %xmm0, %xmm1 movdqa %xmm1, -0x20(%rsp) psubq %xmm14, %xmm0 movdqa %xmm0, -0x80(%rsp) movdqa %xmm9, %xmm1 movdqa -0x70(%rsp), %xmm0 paddq %xmm0, %xmm1 movdqa %xmm1, -0x40(%rsp) psubq %xmm9, %xmm0 movdqa %xmm0, -0x70(%rsp) movdqa -0x50(%rsp), %xmm0 movdqa %xmm0, %xmm4 paddq %xmm8, %xmm4 psubq %xmm0, %xmm8 movdqa -0x10(%rsp), %xmm1 movdqa %xmm1, %xmm14 movdqa -0x60(%rsp), %xmm0 paddq %xmm0, %xmm14 psubq %xmm1, %xmm0 movdqa %xmm0, -0x60(%rsp) movdqa %xmm14, %xmm15 paddq %xmm13, %xmm15 movdqa %xmm4, %xmm9 paddq %xmm11, %xmm9 movdqa 0x1e548(%rip), %xmm5 # 0x4b010 paddq %xmm5, %xmm9 paddq %xmm5, %xmm15 psrlq $0x2, %xmm15 psrlq $0x2, %xmm9 shufps $0x88, %xmm15, %xmm9 # xmm9 = xmm9[0,2],xmm15[0,2] movdqa -0x40(%rsp), %xmm15 paddq %xmm12, %xmm15 movdqa -0x20(%rsp), %xmm1 movdqa %xmm1, %xmm0 paddq %xmm10, %xmm0 paddq %xmm5, %xmm0 paddq %xmm5, %xmm15 psrlq $0x2, %xmm15 psrlq $0x2, %xmm0 shufps $0x88, %xmm15, %xmm0 # xmm0 = xmm0[0,2],xmm15[0,2] pslld $0x10, %xmm0 psrad $0x10, %xmm0 pslld $0x10, %xmm9 psrad $0x10, %xmm9 packssdw %xmm0, %xmm9 movdqu %xmm9, (%rdi) psubq %xmm14, %xmm13 psubq %xmm4, %xmm11 psubq -0x40(%rsp), %xmm12 psubq %xmm1, %xmm10 paddq %xmm5, %xmm10 paddq %xmm5, %xmm12 paddq %xmm5, %xmm11 paddq %xmm5, %xmm13 psrlq $0x2, %xmm13 psrlq $0x2, %xmm11 shufps $0x88, %xmm13, %xmm11 # xmm11 = xmm11[0,2],xmm13[0,2] psrlq $0x2, %xmm12 psrlq $0x2, %xmm10 shufps $0x88, %xmm12, %xmm10 # xmm10 = xmm10[0,2],xmm12[0,2] pslld $0x10, %xmm10 psrad $0x10, %xmm10 pslld $0x10, %xmm11 psrad $0x10, %xmm11 packssdw %xmm10, %xmm11 movdqu %xmm11, 0x40(%rdi) movdqa %xmm7, %xmm15 movdqa -0x60(%rsp), %xmm1 paddq %xmm1, %xmm15 movdqa %xmm3, %xmm14 paddq %xmm8, %xmm14 movdqa %xmm6, %xmm13 paddq -0x70(%rsp), %xmm13 movdqa %xmm2, %xmm12 paddq -0x80(%rsp), %xmm12 movdqa 0x40570(%rip), %xmm0 # 0x6d150 movdqa %xmm12, %xmm4 pmuludq %xmm0, %xmm4 psrlq $0x20, %xmm12 pmuludq %xmm0, %xmm12 movdqa %xmm13, %xmm9 pmuludq %xmm0, %xmm9 psrlq $0x20, %xmm13 pmuludq %xmm0, %xmm13 movdqa %xmm14, %xmm10 pmuludq %xmm0, %xmm10 psrlq $0x20, %xmm14 pmuludq %xmm0, %xmm14 movdqa %xmm15, %xmm11 pmuludq %xmm0, %xmm11 psrlq $0x20, %xmm15 pmuludq %xmm0, %xmm15 movdqa 0x40525(%rip), %xmm0 # 0x6d160 movdqa %xmm7, %xmm5 pmuludq %xmm0, %xmm5 psrlq $0x20, %xmm7 pmuludq %xmm0, %xmm7 psllq $0x20, %xmm7 paddq %xmm5, %xmm7 movdqa %xmm3, %xmm5 pmuludq %xmm0, %xmm5 psrlq $0x20, %xmm3 pmuludq %xmm0, %xmm3 psllq $0x20, %xmm3 paddq %xmm5, %xmm3 movdqa %xmm6, %xmm5 pmuludq %xmm0, %xmm5 psrlq $0x20, %xmm6 pmuludq %xmm0, %xmm6 psllq $0x20, %xmm6 paddq %xmm5, %xmm6 movdqa %xmm2, %xmm5 pmuludq %xmm0, %xmm5 psrlq $0x20, %xmm2 pmuludq %xmm0, %xmm2 psllq $0x20, %xmm2 paddq %xmm5, %xmm2 psllq $0x20, %xmm15 movdqa 0x404bf(%rip), %xmm0 # 0x6d170 paddq %xmm0, %xmm11 paddq %xmm15, %xmm11 psllq $0x20, %xmm14 paddq %xmm0, %xmm10 paddq %xmm14, %xmm10 psllq $0x20, %xmm13 paddq %xmm0, %xmm9 paddq %xmm13, %xmm9 psllq $0x20, %xmm12 paddq %xmm0, %xmm4 paddq %xmm12, %xmm4 paddq %xmm10, %xmm3 paddq %xmm11, %xmm7 psrlq $0xf, %xmm7 psrlq $0xf, %xmm3 shufps $0x88, %xmm7, %xmm3 # xmm3 = xmm3[0,2],xmm7[0,2] paddq %xmm4, %xmm2 paddq %xmm9, %xmm6 psrlq $0xf, %xmm6 psrlq $0xf, %xmm2 shufps $0x88, %xmm6, %xmm2 # xmm2 = xmm2[0,2],xmm6[0,2] pslld $0x10, %xmm2 psrad $0x10, %xmm2 pslld $0x10, %xmm3 psrad $0x10, %xmm3 packssdw %xmm2, %xmm3 movdqu %xmm3, 0x20(%rdi) movdqa 0x40442(%rip), %xmm0 # 0x6d180 movdqa %xmm1, %xmm2 pmuludq %xmm0, %xmm2 psrlq $0x20, %xmm1 pmuludq %xmm0, %xmm1 psllq $0x20, %xmm1 paddq %xmm2, %xmm1 movdqa %xmm1, %xmm5 movdqa %xmm8, %xmm2 pmuludq %xmm0, %xmm2 psrlq $0x20, %xmm8 pmuludq %xmm0, %xmm8 psllq $0x20, %xmm8 paddq %xmm2, %xmm8 movdqa -0x70(%rsp), %xmm1 movdqa %xmm1, %xmm2 pmuludq %xmm0, %xmm2 psrlq $0x20, %xmm1 pmuludq %xmm0, %xmm1 psllq $0x20, %xmm1 paddq %xmm2, %xmm1 movdqa %xmm1, %xmm3 movdqa -0x80(%rsp), %xmm1 movdqa %xmm1, %xmm2 pmuludq %xmm0, %xmm2 psrlq $0x20, %xmm1 pmuludq %xmm0, %xmm1 psllq $0x20, %xmm1 paddq %xmm2, %xmm1 paddq %xmm4, %xmm1 paddq %xmm9, %xmm3 paddq %xmm10, %xmm8 paddq %xmm11, %xmm5 psrlq $0xf, %xmm5 psrlq $0xf, %xmm8 shufps $0x88, %xmm5, %xmm8 # xmm8 = xmm8[0,2],xmm5[0,2] psrlq $0xf, %xmm3 psrlq $0xf, %xmm1 shufps $0x88, %xmm3, %xmm1 # xmm1 = xmm1[0,2],xmm3[0,2] pslld $0x10, %xmm1 psrad $0x10, %xmm1 pslld $0x10, %xmm8 psrad $0x10, %xmm8 packssdw %xmm1, %xmm8 movdqu %xmm8, 0x60(%rdi) movdqa 0x80(%rsp), %xmm10 movdqa %xmm10, %xmm0 paddq 0x40(%rsp), %xmm0 movdqa %xmm0, %xmm2 movdqa %xmm0, -0x20(%rsp) movdqa -0x30(%rsp), %xmm0 paddq 0x50(%rsp), %xmm0 movdqa %xmm0, -0x80(%rsp) paddq %xmm0, %xmm2 movdqa 0x40342(%rip), %xmm0 # 0x6d190 movdqa %xmm2, %xmm1 pmuludq %xmm0, %xmm1 psrlq $0x20, %xmm2 pmuludq %xmm0, %xmm2 psllq $0x20, %xmm2 paddq %xmm1, %xmm2 movdqa %xmm2, 0x190(%rsp) movdqa 0x90(%rsp), %xmm4 movdqa %xmm4, %xmm2 paddq 0x20(%rsp), %xmm2 movdqa %xmm2, -0x50(%rsp) movdqa 0xa0(%rsp), %xmm7 movdqa %xmm7, %xmm1 paddq 0x60(%rsp), %xmm1 movdqa %xmm1, -0x70(%rsp) paddq %xmm1, %xmm2 movdqa %xmm2, %xmm1 pmuludq %xmm0, %xmm1 psrlq $0x20, %xmm2 pmuludq %xmm0, %xmm2 psllq $0x20, %xmm2 paddq %xmm1, %xmm2 movdqa %xmm2, 0x180(%rsp) movdqa 0xb0(%rsp), %xmm13 movdqa %xmm13, %xmm15 paddq 0x10(%rsp), %xmm15 movdqa %xmm15, -0x60(%rsp) movdqa (%rsp), %xmm3 movdqa %xmm3, %xmm1 paddq 0x30(%rsp), %xmm1 movdqa %xmm1, -0x40(%rsp) paddq %xmm1, %xmm15 movdqa %xmm15, %xmm1 pmuludq %xmm0, %xmm1 psrlq $0x20, %xmm15 pmuludq %xmm0, %xmm15 psllq $0x20, %xmm15 paddq %xmm1, %xmm15 movdqa 0xc0(%rsp), %xmm2 movdqa %xmm2, %xmm9 movdqa 0xe0(%rsp), %xmm6 paddq %xmm6, %xmm9 movdqa %xmm9, 0x70(%rsp) movdqa 0xd0(%rsp), %xmm11 movdqa %xmm11, %xmm1 movdqa 0xf0(%rsp), %xmm5 paddq %xmm5, %xmm1 movdqa %xmm1, -0x10(%rsp) paddq %xmm1, %xmm9 movdqa %xmm9, %xmm1 pmuludq %xmm0, %xmm1 psrlq $0x20, %xmm9 pmuludq %xmm0, %xmm9 psllq $0x20, %xmm9 paddq %xmm1, %xmm9 movdqa %xmm10, %xmm1 movdqa %xmm10, %xmm12 movdqa 0x40205(%rip), %xmm0 # 0x6d1a0 pmuludq %xmm0, %xmm1 movdqa %xmm1, 0x80(%rsp) movdqa %xmm4, %xmm8 pmuludq %xmm0, %xmm4 movdqa %xmm4, 0x90(%rsp) movdqa %xmm13, %xmm1 pmuludq %xmm0, %xmm1 movdqa %xmm1, 0xb0(%rsp) movdqa %xmm2, %xmm4 pmuludq %xmm0, %xmm2 movdqa %xmm2, 0xc0(%rsp) movdqa -0x30(%rsp), %xmm1 movdqa %xmm1, %xmm10 movdqa 0x401c0(%rip), %xmm0 # 0x6d1b0 pmuludq %xmm0, %xmm1 movdqa %xmm1, -0x30(%rsp) movdqa %xmm7, %xmm14 pmuludq %xmm0, %xmm7 movdqa %xmm7, 0xa0(%rsp) movdqa %xmm3, %xmm7 pmuludq %xmm0, %xmm3 movdqa %xmm3, (%rsp) movdqa %xmm11, %xmm1 pmuludq %xmm0, %xmm1 movdqa %xmm1, 0xd0(%rsp) movdqa 0x40(%rsp), %xmm2 paddq %xmm2, %xmm10 movdqa 0x20(%rsp), %xmm1 paddq %xmm1, %xmm14 movdqa 0x10(%rsp), %xmm3 paddq %xmm3, %xmm7 paddq %xmm6, %xmm11 movdqa 0x40168(%rip), %xmm0 # 0x6d1c0 pmuludq %xmm0, %xmm2 movdqa %xmm2, 0x40(%rsp) pmuludq %xmm0, %xmm1 movdqa %xmm1, 0x20(%rsp) pmuludq %xmm0, %xmm3 movdqa %xmm3, 0x10(%rsp) pmuludq %xmm0, %xmm6 movdqa %xmm6, 0xe0(%rsp) movdqa 0x50(%rsp), %xmm2 paddq %xmm2, %xmm12 movdqa 0x60(%rsp), %xmm1 paddq %xmm1, %xmm8 movdqa 0x30(%rsp), %xmm3 paddq %xmm3, %xmm13 paddq %xmm5, %xmm4 movdqa 0x40120(%rip), %xmm0 # 0x6d1d0 pmuludq %xmm0, %xmm2 movdqa %xmm2, 0x50(%rsp) pmuludq %xmm0, %xmm1 movdqa %xmm1, 0x60(%rsp) pmuludq %xmm0, %xmm3 movdqa %xmm3, 0x30(%rsp) pmuludq %xmm0, %xmm5 movdqa %xmm5, 0xf0(%rsp) movdqa 0x400fd(%rip), %xmm1 # 0x6d1e0 movdqa %xmm4, %xmm2 pmuludq %xmm1, %xmm2 movdqa %xmm4, %xmm3 movdqa %xmm4, %xmm5 psrlq $0x20, %xmm3 movdqa 0x400f0(%rip), %xmm0 # 0x6d1f0 pmuludq %xmm0, %xmm3 paddq %xmm2, %xmm3 movdqa %xmm3, 0x160(%rsp) movdqa %xmm13, %xmm2 pmuludq %xmm1, %xmm2 movdqa %xmm13, %xmm3 psrlq $0x20, %xmm3 pmuludq %xmm0, %xmm3 paddq %xmm2, %xmm3 movdqa %xmm3, 0x140(%rsp) movdqa %xmm8, %xmm2 pmuludq %xmm1, %xmm2 movdqa %xmm8, %xmm3 movdqa %xmm8, %xmm4 psrlq $0x20, %xmm3 pmuludq %xmm0, %xmm3 paddq %xmm2, %xmm3 movdqa %xmm3, 0x120(%rsp) movdqa %xmm12, %xmm6 pmuludq %xmm1, %xmm6 movdqa %xmm12, %xmm8 psrlq $0x20, %xmm8 pmuludq %xmm0, %xmm8 paddq %xmm6, %xmm8 pmuludq %xmm0, %xmm5 movdqa %xmm5, 0x110(%rsp) pmuludq %xmm0, %xmm13 pmuludq %xmm0, %xmm4 movdqa %xmm4, 0x100(%rsp) pmuludq %xmm0, %xmm12 movdqa %xmm11, %xmm6 pmuludq %xmm1, %xmm6 movdqa %xmm11, %xmm2 psrlq $0x20, %xmm2 movdqa 0x40045(%rip), %xmm0 # 0x6d200 pmuludq %xmm0, %xmm2 paddq %xmm6, %xmm2 movdqa %xmm2, 0x170(%rsp) movdqa %xmm7, %xmm6 pmuludq %xmm1, %xmm6 movdqa %xmm7, %xmm2 psrlq $0x20, %xmm2 pmuludq %xmm0, %xmm2 paddq %xmm6, %xmm2 movdqa %xmm2, 0x150(%rsp) movdqa %xmm14, %xmm6 pmuludq %xmm1, %xmm6 movdqa %xmm14, %xmm2 psrlq $0x20, %xmm2 pmuludq %xmm0, %xmm2 paddq %xmm6, %xmm2 movdqa %xmm2, 0x130(%rsp) movdqa %xmm10, %xmm5 pmuludq %xmm1, %xmm5 movdqa %xmm10, %xmm6 psrlq $0x20, %xmm6 pmuludq %xmm0, %xmm6 paddq %xmm5, %xmm6 pmuludq %xmm0, %xmm11 pmuludq %xmm0, %xmm7 pmuludq %xmm0, %xmm14 pmuludq %xmm0, %xmm10 movdqa -0x20(%rsp), %xmm3 movdqa %xmm3, %xmm5 pmuludq %xmm1, %xmm5 movdqa %xmm3, %xmm0 psrlq $0x20, %xmm0 movdqa 0x3ffb1(%rip), %xmm2 # 0x6d210 pmuludq %xmm2, %xmm0 paddq %xmm5, %xmm0 psllq $0x20, %xmm0 pmuludq %xmm2, %xmm3 paddq %xmm0, %xmm3 movdqa %xmm3, -0x20(%rsp) movdqa -0x50(%rsp), %xmm3 movdqa %xmm3, %xmm0 pmuludq %xmm1, %xmm0 movdqa %xmm3, %xmm5 psrlq $0x20, %xmm5 pmuludq %xmm2, %xmm5 paddq %xmm0, %xmm5 psllq $0x20, %xmm5 pmuludq %xmm2, %xmm3 paddq %xmm5, %xmm3 movdqa %xmm3, -0x50(%rsp) movdqa -0x60(%rsp), %xmm3 movdqa %xmm3, %xmm0 pmuludq %xmm1, %xmm0 movdqa %xmm3, %xmm5 psrlq $0x20, %xmm5 pmuludq %xmm2, %xmm5 paddq %xmm0, %xmm5 psllq $0x20, %xmm5 pmuludq %xmm2, %xmm3 paddq %xmm5, %xmm3 movdqa %xmm3, -0x60(%rsp) movdqa 0x70(%rsp), %xmm4 movdqa %xmm4, %xmm0 pmuludq %xmm1, %xmm0 movdqa %xmm4, %xmm5 psrlq $0x20, %xmm5 pmuludq %xmm2, %xmm5 paddq %xmm0, %xmm5 pmuludq %xmm2, %xmm4 psllq $0x20, %xmm5 paddq %xmm5, %xmm4 movdqa -0x80(%rsp), %xmm0 movdqa %xmm0, %xmm2 pmuludq %xmm1, %xmm2 movdqa %xmm0, %xmm5 psrlq $0x20, %xmm5 movdqa 0x3fef7(%rip), %xmm3 # 0x6d220 pmuludq %xmm3, %xmm5 paddq %xmm2, %xmm5 psllq $0x20, %xmm5 pmuludq %xmm3, %xmm0 paddq %xmm5, %xmm0 movdqa %xmm0, -0x80(%rsp) movdqa -0x70(%rsp), %xmm2 movdqa %xmm2, %xmm0 pmuludq %xmm1, %xmm0 movdqa %xmm2, %xmm5 psrlq $0x20, %xmm5 pmuludq %xmm3, %xmm5 paddq %xmm0, %xmm5 psllq $0x20, %xmm5 pmuludq %xmm3, %xmm2 paddq %xmm5, %xmm2 movdqa %xmm2, -0x70(%rsp) movdqa -0x40(%rsp), %xmm0 movdqa %xmm0, %xmm2 pmuludq %xmm1, %xmm2 movdqa %xmm0, %xmm5 psrlq $0x20, %xmm5 pmuludq %xmm3, %xmm5 paddq %xmm2, %xmm5 psllq $0x20, %xmm5 pmuludq %xmm3, %xmm0 paddq %xmm5, %xmm0 movdqa %xmm0, %xmm5 movdqa -0x10(%rsp), %xmm0 pmuludq %xmm0, %xmm1 movdqa %xmm0, %xmm2 psrlq $0x20, %xmm2 pmuludq %xmm3, %xmm2 paddq %xmm1, %xmm2 pmuludq %xmm3, %xmm0 psllq $0x20, %xmm2 paddq %xmm2, %xmm0 paddq %xmm9, %xmm4 movdqa %xmm4, 0x70(%rsp) paddq %xmm9, %xmm0 movdqa %xmm0, -0x10(%rsp) movdqa -0x60(%rsp), %xmm2 paddq %xmm15, %xmm2 movdqa %xmm2, -0x60(%rsp) paddq %xmm15, %xmm5 movdqa %xmm5, -0x40(%rsp) movdqa 0x180(%rsp), %xmm0 movdqa -0x50(%rsp), %xmm3 paddq %xmm0, %xmm3 movdqa %xmm3, -0x50(%rsp) movdqa -0x70(%rsp), %xmm1 paddq %xmm0, %xmm1 movdqa %xmm1, -0x70(%rsp) movdqa 0x190(%rsp), %xmm0 movdqa -0x20(%rsp), %xmm15 paddq %xmm0, %xmm15 movdqa -0x80(%rsp), %xmm1 paddq %xmm0, %xmm1 movdqa %xmm1, -0x80(%rsp) psllq $0x20, %xmm8 movdqa 0x3fd14(%rip), %xmm5 # 0x6d170 paddq %xmm5, %xmm12 paddq %xmm8, %xmm12 movdqa 0x120(%rsp), %xmm0 psllq $0x20, %xmm0 movdqa 0x100(%rsp), %xmm8 paddq %xmm5, %xmm8 paddq %xmm0, %xmm8 movdqa 0x140(%rsp), %xmm0 psllq $0x20, %xmm0 paddq %xmm5, %xmm13 paddq %xmm0, %xmm13 movdqa 0x160(%rsp), %xmm0 psllq $0x20, %xmm0 movdqa 0x110(%rsp), %xmm9 paddq %xmm5, %xmm9 paddq %xmm0, %xmm9 movdqa 0xc0(%rsp), %xmm0 paddq %xmm9, %xmm0 movdqa 0xb0(%rsp), %xmm1 paddq %xmm13, %xmm1 paddq %xmm2, %xmm1 paddq %xmm4, %xmm0 psrlq $0xf, %xmm0 psrlq $0xf, %xmm1 shufps $0x88, %xmm0, %xmm1 # xmm1 = xmm1[0,2],xmm0[0,2] movdqa 0x90(%rsp), %xmm2 paddq %xmm8, %xmm2 movdqa 0x80(%rsp), %xmm0 paddq %xmm12, %xmm0 paddq %xmm15, %xmm0 paddq %xmm3, %xmm2 psrlq $0xf, %xmm2 psrlq $0xf, %xmm0 shufps $0x88, %xmm2, %xmm0 # xmm0 = xmm0[0,2],xmm2[0,2] pslld $0x10, %xmm0 psrad $0x10, %xmm0 pslld $0x10, %xmm1 psrad $0x10, %xmm1 packssdw %xmm0, %xmm1 movdqu %xmm1, 0x70(%rdi) psllq $0x20, %xmm6 paddq %xmm5, %xmm10 paddq %xmm6, %xmm10 movdqa 0x130(%rsp), %xmm0 psllq $0x20, %xmm0 paddq %xmm5, %xmm14 paddq %xmm0, %xmm14 movdqa 0x150(%rsp), %xmm0 psllq $0x20, %xmm0 paddq %xmm5, %xmm7 paddq %xmm0, %xmm7 paddq %xmm5, %xmm11 movdqa 0x170(%rsp), %xmm0 psllq $0x20, %xmm0 paddq %xmm0, %xmm11 movdqa 0xd0(%rsp), %xmm1 paddq %xmm11, %xmm1 movdqa (%rsp), %xmm0 paddq %xmm7, %xmm0 movdqa -0x40(%rsp), %xmm4 paddq %xmm4, %xmm0 movdqa -0x10(%rsp), %xmm6 paddq %xmm6, %xmm1 psrlq $0xf, %xmm1 psrlq $0xf, %xmm0 shufps $0x88, %xmm1, %xmm0 # xmm0 = xmm0[0,2],xmm1[0,2] movdqa 0xa0(%rsp), %xmm2 paddq %xmm14, %xmm2 movdqa -0x30(%rsp), %xmm1 paddq %xmm10, %xmm1 movdqa -0x80(%rsp), %xmm3 paddq %xmm3, %xmm1 movdqa -0x70(%rsp), %xmm5 paddq %xmm5, %xmm2 psrlq $0xf, %xmm2 psrlq $0xf, %xmm1 shufps $0x88, %xmm2, %xmm1 # xmm1 = xmm1[0,2],xmm2[0,2] pslld $0x10, %xmm1 psrad $0x10, %xmm1 pslld $0x10, %xmm0 psrad $0x10, %xmm0 packssdw %xmm1, %xmm0 movdqu %xmm0, 0x50(%rdi) paddq 0xe0(%rsp), %xmm11 paddq 0x10(%rsp), %xmm7 paddq 0x20(%rsp), %xmm14 paddq 0x40(%rsp), %xmm10 paddq %xmm15, %xmm10 paddq -0x50(%rsp), %xmm14 paddq -0x60(%rsp), %xmm7 paddq 0x70(%rsp), %xmm11 psrlq $0xf, %xmm11 psrlq $0xf, %xmm7 shufps $0x88, %xmm11, %xmm7 # xmm7 = xmm7[0,2],xmm11[0,2] psrlq $0xf, %xmm14 psrlq $0xf, %xmm10 shufps $0x88, %xmm14, %xmm10 # xmm10 = xmm10[0,2],xmm14[0,2] pslld $0x10, %xmm10 psrad $0x10, %xmm10 pslld $0x10, %xmm7 psrad $0x10, %xmm7 packssdw %xmm10, %xmm7 movdqu %xmm7, 0x30(%rdi) paddq 0xf0(%rsp), %xmm9 paddq 0x30(%rsp), %xmm13 paddq 0x60(%rsp), %xmm8 paddq 0x50(%rsp), %xmm12 paddq %xmm3, %xmm12 paddq %xmm5, %xmm8 paddq %xmm4, %xmm13 paddq %xmm6, %xmm9 psrlq $0xf, %xmm9 psrlq $0xf, %xmm13 shufps $0x88, %xmm9, %xmm13 # xmm13 = xmm13[0,2],xmm9[0,2] psrlq $0xf, %xmm8 psrlq $0xf, %xmm12 shufps $0x88, %xmm8, %xmm12 # xmm12 = xmm12[0,2],xmm8[0,2] pslld $0x10, %xmm12 psrad $0x10, %xmm12 pslld $0x10, %xmm13 psrad $0x10, %xmm13 packssdw %xmm12, %xmm13 movdqu %xmm13, 0x10(%rdi) addq $0x1a8, %rsp # imm = 0x1A8 popq %rbx popq %r14 popq %r15 popq %rbp retq
/rui314[P]libjpeg-turbo/jfdctint.c
glfwInputMonitor
void _glfwInputMonitor(_GLFWmonitor* monitor, int action, int placement) { if (action == GLFW_CONNECTED) { _glfw.monitorCount++; _glfw.monitors = realloc(_glfw.monitors, sizeof(_GLFWmonitor*) * _glfw.monitorCount); if (placement == _GLFW_INSERT_FIRST) { memmove(_glfw.monitors + 1, _glfw.monitors, ((size_t) _glfw.monitorCount - 1) * sizeof(_GLFWmonitor*)); _glfw.monitors[0] = monitor; } else _glfw.monitors[_glfw.monitorCount - 1] = monitor; } else if (action == GLFW_DISCONNECTED) { int i; _GLFWwindow* window; for (window = _glfw.windowListHead; window; window = window->next) { if (window->monitor == monitor) { int width, height, xoff, yoff; _glfwPlatformGetWindowSize(window, &width, &height); _glfwPlatformSetWindowMonitor(window, NULL, 0, 0, width, height, 0); _glfwPlatformGetWindowFrameSize(window, &xoff, &yoff, NULL, NULL); _glfwPlatformSetWindowPos(window, xoff, yoff); } } for (i = 0; i < _glfw.monitorCount; i++) { if (_glfw.monitors[i] == monitor) { _glfw.monitorCount--; memmove(_glfw.monitors + i, _glfw.monitors + i + 1, ((size_t) _glfw.monitorCount - i) * sizeof(_GLFWmonitor*)); break; } } } if (_glfw.callbacks.monitor) _glfw.callbacks.monitor((GLFWmonitor*) monitor, action); if (action == GLFW_DISCONNECTED) _glfwFreeMonitor(monitor); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x18, %rsp movl %esi, %ebp movq %rdi, %rbx cmpl $0x40001, %esi # imm = 0x40001 je 0x1098d cmpl $0x40002, %ebp # imm = 0x40002 jne 0x10a22 leaq 0x3cf06(%rip), %rax # 0x4d7d8 movq 0x408(%rax), %r14 testq %r14, %r14 je 0x1094d leaq 0x10(%rsp), %r12 leaq 0xc(%rsp), %r13 leaq 0x8(%rsp), %r15 cmpq %rbx, 0x50(%r14) jne 0x10945 movq %r14, %rdi leaq 0x14(%rsp), %rsi movq %r12, %rdx callq 0x1891f movl 0x14(%rsp), %r8d movl 0x10(%rsp), %r9d movl $0x0, (%rsp) movq %r14, %rdi xorl %esi, %esi xorl %edx, %edx xorl %ecx, %ecx callq 0x19481 movq %r14, %rdi movq %r13, %rsi movq %r15, %rdx xorl %ecx, %ecx xorl %r8d, %r8d callq 0x18bee movl 0xc(%rsp), %esi movl 0x8(%rsp), %edx movq %r14, %rdi callq 0x18802 movq (%r14), %r14 testq %r14, %r14 jne 0x108ed leaq 0x3ce84(%rip), %rsi # 0x4d7d8 movslq 0x418(%rsi), %rax testq %rax, %rax jle 0x10a22 movq 0x410(%rsi), %rdi leaq (,%rax,8), %rdx xorl %ecx, %ecx cmpq %rbx, (%rdi,%rcx) je 0x10a03 addq $0x8, %rcx cmpq %rcx, %rdx jne 0x10975 jmp 0x10a22 movl %edx, %r14d leaq 0x3ce41(%rip), %r12 # 0x4d7d8 movslq 0x418(%r12), %rax leaq 0x1(%rax), %rcx movl %ecx, 0x418(%r12) movq 0x410(%r12), %rdi leaq 0x8(,%rax,8), %rsi callq 0x5320 movq %rax, %r15 movq %rax, 0x410(%r12) testl %r14d, %r14d je 0x109df movslq 0x418(%r12), %rax movq %rbx, -0x8(%r15,%rax,8) jmp 0x10a22 movq %r15, %rdi addq $0x8, %rdi movslq 0x418(%r12), %rax leaq -0x8(,%rax,8), %rdx movq %r15, %rsi callq 0x5350 movq %rbx, (%r15) jmp 0x10a22 decl %eax movl %eax, 0x418(%rsi) leaq (%rdi,%rcx), %rsi addq $0x8, %rsi addq %rcx, %rdi subq %rcx, %rdx addq $-0x8, %rdx callq 0x5350 leaq 0x3cdaf(%rip), %rax # 0x4d7d8 movq 0x20648(%rax), %rax testq %rax, %rax je 0x10a3c movq %rbx, %rdi movl %ebp, %esi callq *%rax cmpl $0x40002, %ebp # imm = 0x40002 jne 0x10a4c movq %rbx, %rdi callq 0x10a5b addq $0x18, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq
/wjakob[P]glfw/src/monitor.c
inja::InjaError::InjaError(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>> const&)
explicit InjaError(const std::string& type, const std::string& message) : std::runtime_error("[inja.exception." + type + "] " + message), type(type), message(message), location({0, 0}) {}
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x68, %rsp movq %rdx, %r14 movq %rsi, %r15 movq %rdi, %rbx leaq 0x283b5(%rip), %rsi # 0x320ce leaq 0x48(%rsp), %r12 movq %r12, %rdi movq %r15, %rdx callq 0x9e34 leaq 0x2e0c4(%rip), %rdx # 0x37df4 leaq 0x8(%rsp), %rdi movq %r12, %rsi callq 0x9b4b leaq 0x28(%rsp), %rdi leaq 0x8(%rsp), %rsi movq %r14, %rdx callq 0x9e15 leaq 0x28(%rsp), %rsi movq %rbx, %rdi callq 0x6480 leaq 0x28(%rsp), %rdi callq 0x66e8 leaq 0x8(%rsp), %rdi callq 0x66e8 leaq 0x48(%rsp), %rdi callq 0x66e8 leaq 0x4638f(%rip), %rax # 0x50110 movq %rax, (%rbx) leaq 0x10(%rbx), %r12 movq %r12, %rdi movq %r15, %rsi callq 0x6500 leaq 0x30(%rbx), %rdi movq %r14, %rsi callq 0x6500 xorps %xmm0, %xmm0 movups %xmm0, 0x50(%rbx) addq $0x68, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %r14 movq %r12, %rdi callq 0x66e8 jmp 0x9dc2 movq %rax, %r14 movq %rbx, %rdi callq 0x6410 jmp 0x9df7 movq %rax, %r14 leaq 0x28(%rsp), %rdi callq 0x66e8 jmp 0x9dde movq %rax, %r14 leaq 0x8(%rsp), %rdi callq 0x66e8 jmp 0x9ded movq %rax, %r14 leaq 0x48(%rsp), %rdi callq 0x66e8 movq %r14, %rdi callq 0x6630 nop
/pantor[P]inja/include/inja/exceptions.hpp
nlohmann::json_abi_v3_11_3::detail::parser<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, nlohmann::json_abi_v3_11_3::detail::iterator_input_adapter<std::istreambuf_iterator<char, std::char_traits<char>>>> nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>::parser<nlohmann::json_abi_v3_11_3::detail::iterator_input_adapter<std::istreambuf_iterator<char, std::char_traits<char>>>>(nlohmann::json_abi_v3_11_3::detail::iterator_input_adapter<std::istreambuf_iterator<char, std::char_traits<char>>>, std::function<bool (int, nlohmann::json_abi_v3_11_3::detail::parse_event_t, nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>&)>, bool, bool)
static ::nlohmann::detail::parser<basic_json, InputAdapterType> parser( InputAdapterType adapter, detail::parser_callback_t<basic_json>cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false ) { return ::nlohmann::detail::parser<basic_json, InputAdapterType>(std::move(adapter), std::move(cb), allow_exceptions, ignore_comments); }
pushq %rbx subq $0x20, %rsp movq %rsi, %rax movq %rdi, %rbx leaq 0x30(%rsp), %rsi xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) andq $0x0, 0x10(%rsp) movq 0x18(%rax), %rdi movq %rdi, 0x18(%rsp) movq 0x10(%rax), %rdi testq %rdi, %rdi je 0x9f22 movups (%rax), %xmm1 addq $0x10, %rax movaps %xmm1, (%rsp) movq %rdi, 0x10(%rsp) movups %xmm0, (%rax) movzbl %dl, %eax movzbl %cl, %r8d movq %rsp, %rdx movq %rbx, %rdi movl %eax, %ecx callq 0xabbc movq %rsp, %rdi callq 0x9ce2 movq %rbx, %rax addq $0x20, %rsp popq %rbx retq movq %rax, %rbx movq %rsp, %rdi callq 0x9ce2 movq %rbx, %rdi callq 0x6630
/pantor[P]inja/third_party/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>::json_value::json_value(nlohmann::json_abi_v3_11_3::detail::value_t)
json_value(value_t t) { switch (t) { case value_t::object: { object = create<object_t>(); break; } case value_t::array: { array = create<array_t>(); break; } case value_t::string: { string = create<string_t>(""); break; } case value_t::binary: { binary = create<binary_t>(); break; } case value_t::boolean: { boolean = static_cast<boolean_t>(false); break; } case value_t::number_integer: { number_integer = static_cast<number_integer_t>(0); break; } case value_t::number_unsigned: { number_unsigned = static_cast<number_unsigned_t>(0); break; } case value_t::number_float: { number_float = static_cast<number_float_t>(0.0); break; } case value_t::null: { object = nullptr; // silence warning, see #821 break; } case value_t::discarded: default: { object = nullptr; // silence warning, see #821 if (JSON_HEDLEY_UNLIKELY(t == value_t::null)) { JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.11.3", nullptr)); // LCOV_EXCL_LINE } break; } } }
pushq %rbx movq %rdi, %rbx cmpl $0x8, %esi ja 0xa47d movl %esi, %eax leaq 0x26b90(%rip), %rcx # 0x31004 movslq (%rcx,%rax,4), %rax addq %rcx, %rax jmpq *%rax andq $0x0, (%rbx) jmp 0xa4ac callq 0xa4ae jmp 0xa4a9 movb $0x0, (%rbx) jmp 0xa4ac callq 0xa502 jmp 0xa4a9 leaq 0x2bdcd(%rip), %rdi # 0x3626a callq 0xa544 jmp 0xa4a9 callq 0xa5d0 movq %rax, (%rbx) popq %rbx retq
/pantor[P]inja/third_party/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_3::detail::lexer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, nlohmann::json_abi_v3_11_3::detail::iterator_input_adapter<std::istreambuf_iterator<char, std::char_traits<char>>>>::lexer(nlohmann::json_abi_v3_11_3::detail::iterator_input_adapter<std::istreambuf_iterator<char, std::char_traits<char>>>&&, bool)
explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false) noexcept : ia(std::move(adapter)) , ignore_comments(ignore_comments_) , decimal_point_char(static_cast<char_int_type>(get_decimal_point())) {}
pushq %rbx movq %rdi, %rbx movups (%rsi), %xmm0 movups 0x10(%rsi), %xmm1 movups %xmm1, 0x10(%rdi) movups %xmm0, (%rdi) movb %dl, 0x20(%rdi) orl $-0x1, 0x24(%rdi) xorl %eax, %eax movb %al, 0x28(%rdi) leaq 0x70(%rdi), %rcx xorps %xmm0, %xmm0 movups %xmm0, 0x30(%rdi) movups %xmm0, 0x40(%rdi) movups %xmm0, 0x50(%rdi) movq %rcx, 0x60(%rdi) andq $0x0, 0x68(%rdi) movb %al, 0x70(%rdi) leaq 0x2b606(%rip), %rax # 0x3626a movq %rax, 0x80(%rdi) movups %xmm0, 0x88(%rdi) andq $0x0, 0x98(%rdi) callq 0xacb6 movsbl %al, %eax movl %eax, 0xa0(%rbx) popq %rbx retq
/pantor[P]inja/third_party/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_3::detail::lexer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, nlohmann::json_abi_v3_11_3::detail::iterator_input_adapter<std::istreambuf_iterator<char, std::char_traits<char>>>>::reset()
void reset() noexcept { token_buffer.clear(); token_string.clear(); token_string.push_back(char_traits<char_type>::to_char_type(current)); }
pushq %rax movq %rdi, %rax andq $0x0, 0x68(%rdi) movq 0x60(%rdi), %rcx movb $0x0, (%rcx) addq $0x48, %rdi movq 0x48(%rax), %rcx cmpq %rcx, 0x50(%rax) je 0xba78 movq %rcx, 0x50(%rax) movb 0x24(%rax), %al leaq 0x7(%rsp), %rsi movb %al, (%rsi) callq 0xb8ee popq %rax retq movq %rax, %rdi callq 0x7a5d nop
/pantor[P]inja/third_party/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_3::detail::lexer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, nlohmann::json_abi_v3_11_3::detail::iterator_input_adapter<std::istreambuf_iterator<char, std::char_traits<char>>>>::get_codepoint()
int get_codepoint() { // this function only makes sense after reading `\u` JSON_ASSERT(current == 'u'); int codepoint = 0; const auto factors = { 12u, 8u, 4u, 0u }; for (const auto factor : factors) { get(); if (current >= '0' && current <= '9') { codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x30u) << factor); } else if (current >= 'A' && current <= 'F') { codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x37u) << factor); } else if (current >= 'a' && current <= 'f') { codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x57u) << factor); } else { return -1; } } JSON_ASSERT(0x0000 <= codepoint && codepoint <= 0xFFFF); return codepoint; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x10, %rsp cmpl $0x75, 0x24(%rdi) jne 0xbb1b movq %rdi, %rbx movaps 0x25612(%rip), %xmm0 # 0x310c0 movaps %xmm0, (%rsp) xorl %r15d, %r15d pushq $-0x1 popq %r12 xorl %ebp, %ebp cmpq $0x10, %r15 je 0xbaff movl (%rsp,%r15), %r14d movq %rbx, %rdi callq 0xb734 movl 0x24(%rbx), %eax leal -0x30(%rax), %edx cmpl $0xa, %edx jb 0xbaf2 leal -0x41(%rax), %ecx cmpl $0x5, %ecx ja 0xbae5 addl $-0x37, %eax jmp 0xbaf0 leal -0x61(%rax), %ecx cmpl $0x5, %ecx ja 0xbb16 addl $-0x57, %eax movl %eax, %edx movl %r14d, %ecx shll %cl, %edx addl %edx, %ebp addq $0x4, %r15 jmp 0xbabb cmpl $0x10000, %ebp # imm = 0x10000 jae 0xbb3a movl %ebp, %eax addq $0x10, %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq movl %r12d, %ebp jmp 0xbb07 leaq 0x27af4(%rip), %rdi # 0x33616 leaq 0x265c5(%rip), %rsi # 0x320ee leaq 0x27af5(%rip), %rcx # 0x33625 movl $0x1d78, %edx # imm = 0x1D78 callq 0x6270 leaq 0x27bf9(%rip), %rdi # 0x3373a leaq 0x265a6(%rip), %rsi # 0x320ee leaq 0x27ad6(%rip), %rcx # 0x33625 movl $0x1d92, %edx # imm = 0x1D92 callq 0x6270 nop
/pantor[P]inja/third_party/include/nlohmann/json.hpp
nlohmann::json_abi_v3_11_3::detail::lexer<nlohmann::json_abi_v3_11_3::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>>, bool, long, unsigned long, double, std::allocator, nlohmann::json_abi_v3_11_3::adl_serializer, std::vector<unsigned char, std::allocator<unsigned char>>, void>, nlohmann::json_abi_v3_11_3::detail::iterator_input_adapter<std::istreambuf_iterator<char, std::char_traits<char>>>>::next_byte_in_range(std::initializer_list<int>)
bool next_byte_in_range(std::initializer_list<char_int_type> ranges) { JSON_ASSERT(ranges.size() == 2 || ranges.size() == 4 || ranges.size() == 6); add(current); for (auto range = ranges.begin(); range != ranges.end(); ++range) { get(); if (JSON_HEDLEY_LIKELY(*range <= current && current <= *(++range))) // NOLINT(bugprone-inc-dec-in-conditions) { add(current); } else { error_message = "invalid string: ill-formed UTF-8 byte"; return false; } } return true; }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax cmpq $0x6, %rdx ja 0xbbdf movq %rdx, %rbx pushq $0x54 popq %rax btq %rdx, %rax jae 0xbbdf movq %rsi, %r15 movq %rdi, %r14 movsbl 0x24(%rdi), %esi leaq 0x60(%rdi), %r12 movq %r12, %rdi callq 0x6320 shlq $0x2, %rbx testq %rbx, %rbx je 0xbbcd movq %r14, %rdi callq 0xb734 movl 0x24(%r14), %eax cmpl %eax, (%r15) jg 0xbbbf cmpl 0x4(%r15), %eax jg 0xbbbf movsbl %al, %esi movq %r12, %rdi callq 0x6320 addq $0x8, %r15 addq $-0x8, %rbx jmp 0xbb8e leaq 0x27a2a(%rip), %rax # 0x335f0 movq %rax, 0x80(%r14) testq %rbx, %rbx sete %al addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq leaq 0x27b7f(%rip), %rdi # 0x33765 leaq 0x26501(%rip), %rsi # 0x320ee leaq 0x27bb0(%rip), %rcx # 0x337a4 movl $0x1da7, %edx # imm = 0x1DA7 callq 0x6270
/pantor[P]inja/third_party/include/nlohmann/json.hpp
inja::Renderer::visit(inja::ExtendsStatementNode const&)
void visit(const ExtendsStatementNode& node) { const auto included_template_it = template_storage.find(node.file); if (included_template_it != template_storage.end()) { const Template* parent_template = &included_template_it->second; render_to(*output_stream, *parent_template, *data_input, &additional_data); break_rendering = true; } else if (config.throw_at_missing_includes) { throw_renderer_error("extends '" + node.file + "' not found", node); } }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x48, %rsp movq %rsi, %r14 movq %rdi, %rbx movq 0x10(%rdi), %rdi leaq 0x10(%rsi), %r15 movq %r15, %rsi callq 0x228ba movq 0x10(%rbx), %rcx addq $0x8, %rcx cmpq %rcx, %rax je 0x16a1e addq $0x40, %rax movq 0x60(%rbx), %rcx movq 0x68(%rbx), %rsi leaq 0x70(%rbx), %r8 movq %rbx, %rdi movq %rax, %rdx callq 0x141cc movb $0x1, 0x140(%rbx) jmp 0x16a73 cmpb $0x1, 0x8(%rbx) jne 0x16a73 leaq 0x2060f(%rip), %rsi # 0x3703a leaq 0x28(%rsp), %r12 movq %r12, %rdi movq %r15, %rdx callq 0x9e34 leaq 0x1eb39(%rip), %rdx # 0x3557b leaq 0x8(%rsp), %rdi movq %r12, %rsi callq 0x9b4b leaq 0x8(%rsp), %rsi movq %rbx, %rdi movq %r14, %rdx callq 0x1a1b4 leaq 0x8(%rsp), %rdi callq 0x66e8 leaq 0x28(%rsp), %rdi callq 0x66e8 addq $0x48, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x8(%rsp), %rdi callq 0x66e8 jmp 0x16a91 movq %rax, %rbx leaq 0x28(%rsp), %rdi callq 0x66e8 movq %rbx, %rdi callq 0x6630 nop
/pantor[P]inja/include/inja/renderer.hpp