name
string | code
string | asm
string | file
string |
|---|---|---|---|
stbi__gif_load_next(stbi__context*, stbi__gif*, int*, int, unsigned char*)
|
static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back)
{
int dispose;
int first_frame;
int pi;
int pcount;
STBI_NOTUSED(req_comp);
// on first frame, any non-written pixels get the background colour (non-transparent)
first_frame = 0;
if (g->out == 0) {
if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header
if (!stbi__mad3sizes_valid(4, g->w, g->h, 0))
return stbi__errpuc("too large", "GIF image is too large");
pcount = g->w * g->h;
g->out = (stbi_uc *) stbi__malloc(4 * pcount);
g->background = (stbi_uc *) stbi__malloc(4 * pcount);
g->history = (stbi_uc *) stbi__malloc(pcount);
if (!g->out || !g->background || !g->history)
return stbi__errpuc("outofmem", "Out of memory");
// image is treated as "transparent" at the start - ie, nothing overwrites the current background;
// background colour is only used for pixels that are not rendered first frame, after that "background"
// color refers to the color that was there the previous frame.
memset(g->out, 0x00, 4 * pcount);
memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent)
memset(g->history, 0x00, pcount); // pixels that were affected previous frame
first_frame = 1;
} else {
// second frame - how do we dispoase of the previous one?
dispose = (g->eflags & 0x1C) >> 2;
pcount = g->w * g->h;
if ((dispose == 3) && (two_back == 0)) {
dispose = 2; // if I don't have an image to revert back to, default to the old background
}
if (dispose == 3) { // use previous graphic
for (pi = 0; pi < pcount; ++pi) {
if (g->history[pi]) {
memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 );
}
}
} else if (dispose == 2) {
// restore what was changed last frame to background before that frame;
for (pi = 0; pi < pcount; ++pi) {
if (g->history[pi]) {
memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 );
}
}
} else {
// This is a non-disposal case eithe way, so just
// leave the pixels as is, and they will become the new background
// 1: do not dispose
// 0: not specified.
}
// background is what out is after the undoing of the previou frame;
memcpy( g->background, g->out, 4 * g->w * g->h );
}
// clear my history;
memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame
for (;;) {
int tag = stbi__get8(s);
switch (tag) {
case 0x2C: /* Image Descriptor */
{
stbi__int32 x, y, w, h;
stbi_uc *o;
x = stbi__get16le(s);
y = stbi__get16le(s);
w = stbi__get16le(s);
h = stbi__get16le(s);
if (((x + w) > (g->w)) || ((y + h) > (g->h)))
return stbi__errpuc("bad Image Descriptor", "Corrupt GIF");
g->line_size = g->w * 4;
g->start_x = x * 4;
g->start_y = y * g->line_size;
g->max_x = g->start_x + w * 4;
g->max_y = g->start_y + h * g->line_size;
g->cur_x = g->start_x;
g->cur_y = g->start_y;
// if the width of the specified rectangle is 0, that means
// we may not see *any* pixels or the image is malformed;
// to make sure this is caught, move the current y down to
// max_y (which is what out_gif_code checks).
if (w == 0)
g->cur_y = g->max_y;
g->lflags = stbi__get8(s);
if (g->lflags & 0x40) {
g->step = 8 * g->line_size; // first interlaced spacing
g->parse = 3;
} else {
g->step = g->line_size;
g->parse = 0;
}
if (g->lflags & 0x80) {
stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1);
g->color_table = (stbi_uc *) g->lpal;
} else if (g->flags & 0x80) {
g->color_table = (stbi_uc *) g->pal;
} else
return stbi__errpuc("missing color table", "Corrupt GIF");
o = stbi__process_gif_raster(s, g);
if (!o) return NULL;
// if this was the first frame,
pcount = g->w * g->h;
if (first_frame && (g->bgindex > 0)) {
// if first frame, any pixel not drawn to gets the background color
for (pi = 0; pi < pcount; ++pi) {
if (g->history[pi] == 0) {
g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be;
memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 );
}
}
}
return o;
}
case 0x21: // Comment Extension.
{
int len;
int ext = stbi__get8(s);
if (ext == 0xF9) { // Graphic Control Extension.
len = stbi__get8(s);
if (len == 4) {
g->eflags = stbi__get8(s);
g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths.
// unset old transparent
if (g->transparent >= 0) {
g->pal[g->transparent][3] = 255;
}
if (g->eflags & 0x01) {
g->transparent = stbi__get8(s);
if (g->transparent >= 0) {
g->pal[g->transparent][3] = 0;
}
} else {
// don't need transparent
stbi__skip(s, 1);
g->transparent = -1;
}
} else {
stbi__skip(s, len);
break;
}
}
while ((len = stbi__get8(s)) != 0) {
stbi__skip(s, len);
}
break;
}
case 0x3B: // gif stream termination code
return (stbi_uc *) s; // using '1' causes warning on some compilers
default:
return stbi__errpuc("unknown code", "Corrupt GIF");
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq 0x8(%rsi), %r13
movq %rsi, %r14
movq %rdi, %rbx
pushq $0x2
popq %rdi
testq %r13, %r13
je 0x5e5af
movl $0x302, %eax # imm = 0x302
bextrl %eax, 0x30(%r14), %esi
movl 0x4(%r14), %eax
imull (%r14), %eax
cmpl $0x3, %esi
movl %esi, %edx
cmovel %edi, %edx
testq %rcx, %rcx
cmovnel %esi, %edx
cmpl $0x3, %edx
je 0x5e67f
cmpl $0x2, %edx
jne 0x5e6a9
movl %eax, %ecx
sarl $0x1f, %ecx
andnl %eax, %ecx, %eax
xorl %ecx, %ecx
cmpq %rcx, %rax
je 0x5e6a9
movq 0x18(%r14), %rdx
cmpb $0x0, (%rdx,%rcx)
je 0x5e5aa
movq 0x10(%r14), %rsi
movq 0x8(%r14), %rdx
movl (%rsi,%rcx,4), %esi
movl %esi, (%rdx,%rcx,4)
incq %rcx
jmp 0x5e589
movq %rbx, %rdi
movq %r14, %rsi
xorl %ecx, %ecx
callq 0x5eb89
testl %eax, %eax
je 0x5e8bc
movl (%r14), %ebp
movl 0x4(%r14), %r15d
pushq $0x4
popq %rdi
movl %ebp, %esi
movl %r15d, %edx
xorl %ecx, %ecx
callq 0x5b9b3
testl %eax, %eax
je 0x5e8ac
imull %ebp, %r15d
movq %r13, 0x20(%rsp)
leal (,%r15,4), %eax
movslq %eax, %r13
movq %r13, %rdi
callq 0x81b0
movq %r13, %rdi
movq %rax, %rbp
movq %rax, 0x8(%r14)
callq 0x81b0
movslq %r15d, %r15
movq %rax, 0x10(%rsp)
movq %rax, 0x10(%r14)
movq %r15, %rdi
callq 0x81b0
leaq 0x2d9f(%rip), %rcx # 0x613c7
movq %rax, 0x18(%r14)
testq %rbp, %rbp
je 0x5e8b3
cmpq $0x0, 0x10(%rsp)
movq %rax, %r12
sete %al
testq %r12, %r12
sete %dl
orb %al, %dl
jne 0x5e8b3
movq %rbp, %rdi
xorl %esi, %esi
movq %r13, %rdx
callq 0x80b0
movq 0x10(%rsp), %rdi
xorl %esi, %esi
movq %r13, %rdx
callq 0x80b0
movq %r12, %rdi
xorl %esi, %esi
movq %r15, %rdx
callq 0x80b0
movq 0x20(%rsp), %r13
jmp 0x5e6d4
movl %eax, %edx
sarl $0x1f, %edx
andnl %eax, %edx, %eax
xorl %edx, %edx
cmpq %rdx, %rax
je 0x5e6a9
movq 0x18(%r14), %rsi
cmpb $0x0, (%rsi,%rdx)
je 0x5e6a4
movq 0x8(%r14), %rsi
movl (%rcx,%rdx,4), %edi
movl %edi, (%rsi,%rdx,4)
incq %rdx
jmp 0x5e68b
movslq (%r14), %rax
movslq 0x4(%r14), %rdx
movq 0x8(%r14), %rsi
movq 0x10(%r14), %rdi
imulq %rax, %rdx
shlq $0x2, %rdx
callq 0x80e0
movslq (%r14), %rax
movslq 0x4(%r14), %r15
movq 0x18(%r14), %r12
imulq %rax, %r15
movq %r12, %rdi
xorl %esi, %esi
movq %r15, %rdx
callq 0x80b0
leaq 0x34(%r14), %r15
pushq $0x1
popq %rbp
movq %rbx, %rdi
callq 0x5b739
cmpb $0x21, %al
jne 0x5e79f
movq %rbx, %rdi
callq 0x5b739
cmpb $-0x7, %al
jne 0x5e78e
movq %rbx, %rdi
callq 0x5b739
cmpb $0x4, %al
jne 0x5e75c
movq %rbx, %rdi
callq 0x5b739
movzbl %al, %eax
movq %rbx, %rdi
movl %eax, 0x30(%r14)
callq 0x5e186
imull $0xa, %eax, %eax
movl %eax, 0x8868(%r14)
movslq 0x2c(%r14), %rax
testq %rax, %rax
js 0x5e744
movb $-0x1, 0x3(%r15,%rax,4)
testb $0x1, 0x30(%r14)
jne 0x5e76c
movq %rbx, %rdi
movl %ebp, %esi
callq 0x5b95d
orl $-0x1, 0x2c(%r14)
jmp 0x5e78e
movzbl %al, %esi
movq %rbx, %rdi
callq 0x5b95d
jmp 0x5e6e8
movq %rbx, %rdi
callq 0x5b739
movzbl %al, %eax
movl %eax, 0x2c(%r14)
movb $0x0, 0x37(%r14,%rax,4)
jmp 0x5e78e
movzbl %al, %esi
movq %rbx, %rdi
callq 0x5b95d
movq %rbx, %rdi
callq 0x5b739
testb %al, %al
jne 0x5e783
jmp 0x5e6e8
movzbl %al, %eax
cmpl $0x3b, %eax
je 0x5e8be
cmpl $0x2c, %eax
jne 0x5e8a3
movq %rbx, %rdi
movq %r13, 0x20(%rsp)
callq 0x5e186
movq %rbx, %rdi
movl %eax, %r12d
callq 0x5e186
movq %rbx, %rdi
movl %eax, %r13d
callq 0x5e186
movq %rbx, %rdi
movl %eax, %ebp
callq 0x5e186
movl (%r14), %edx
leal (%rbp,%r12), %esi
leaq 0x2dd0(%rip), %rcx # 0x615c0
cmpl %edx, %esi
jg 0x5e8b3
addl %r13d, %eax
cmpl 0x4(%r14), %eax
jg 0x5e8b3
shll $0x2, %edx
shll $0x2, %r12d
shll $0x2, %esi
movq %rbx, %rdi
movl %edx, 0x8864(%r14)
imull %edx, %r13d
imull %eax, %edx
testl %ebp, %ebp
movl %r12d, 0x884c(%r14)
movl %r13d, 0x8850(%r14)
movl %esi, 0x8854(%r14)
movl %edx, 0x8858(%r14)
cmovnel %r13d, %edx
movl %r12d, 0x885c(%r14)
movl %edx, 0x8860(%r14)
callq 0x5b739
movzbl %al, %ecx
movl %ecx, 0x8848(%r14)
movl %ecx, %edx
shll $0x19, %edx
movl 0x8864(%r14), %esi
sarl $0x1f, %edx
testb $0x40, %cl
leal (,%rsi,8), %edi
cmovel %esi, %edi
andl $0x3, %edx
movl %edi, 0x8844(%r14)
movl %edx, 0x8840(%r14)
testb %cl, %cl
js 0x5e8d0
testb $-0x80, 0x20(%r14)
pushq $0x2
popq %rbp
jne 0x5e8fc
leaq 0x2d34(%rip), %rcx # 0x615d5
jmp 0x5e8b3
leaq 0x2d3f(%rip), %rcx # 0x615e9
jmp 0x5e8b3
leaq 0x2b0a(%rip), %rcx # 0x613bd
movq %rcx, %fs:-0x10
xorl %ebx, %ebx
movq %rbx, %rax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x434(%r14), %r15
andb $0x7, %al
pushq $0x2
popq %rbp
testb $0x1, 0x30(%r14)
shlxl %eax, %ebp, %edx
jne 0x5e8ed
pushq $-0x1
popq %rcx
jmp 0x5e8f1
movl 0x2c(%r14), %ecx
movq %rbx, %rdi
movq %r15, %rsi
callq 0x5ecad
movq %rbx, %rdi
movq %r15, 0x8838(%r14)
callq 0x5b739
cmpb $0xc, %al
ja 0x5e8bc
movzbl %al, %edx
pushq $0x1
popq %rcx
shlxl %eax, %ecx, %r12d
xorl %ecx, %ecx
cmpq %rcx, %r12
je 0x5e940
orw $-0x1, 0x834(%r14,%rcx,4)
movb %cl, 0x836(%r14,%rcx,4)
movb %cl, 0x837(%r14,%rcx,4)
incq %rcx
jmp 0x5e91c
shlxl %eax, %ebp, %eax
movl %edx, %ecx
movl %r12d, %esi
incl %esi
incl %ecx
leal 0x2(%r12), %edx
xorl %edi, %edi
decl %eax
movl %esi, 0x2c(%rsp)
pushq $-0x1
popq %rsi
movl %ecx, 0x38(%rsp)
movl %eax, 0x34(%rsp)
movl %edx, 0x30(%rsp)
xorl %r8d, %r8d
xorl %r15d, %r15d
xorl %ebp, %ebp
movq %r12, 0x40(%rsp)
movl %eax, 0x1c(%rsp)
movl %ecx, %eax
movl %esi, 0x3c(%rsp)
movl %edx, 0xc(%rsp)
movq %rax, 0x10(%rsp)
movb %dil, 0x8(%rsp)
movl %r8d, %r13d
movl %r15d, %r12d
subl 0x10(%rsp), %r15d
jge 0x5e9d1
testl %ebp, %ebp
jne 0x5e9b3
movq %rbx, %rdi
callq 0x5b739
testb %al, %al
je 0x5eb07
movzbl %al, %ebp
movq %rbx, %rdi
decl %ebp
callq 0x5b739
movzbl %al, %eax
shlxl %r12d, %eax, %eax
addl $0x8, %r12d
orl %eax, %r13d
movl %r12d, %r15d
jmp 0x5e995
movq 0x10(%rsp), %rax
movl 0x38(%rsp), %ecx
movl 0x30(%rsp), %edx
movb $0x1, %dil
sarxl %eax, %r13d, %r8d
andl 0x1c(%rsp), %r13d
movl 0x34(%rsp), %eax
pushq $-0x1
popq %rsi
movq 0x40(%rsp), %r12
cmpl %r12d, %r13d
je 0x5e977
cmpl 0x2c(%rsp), %r13d
je 0x5eaee
movl 0xc(%rsp), %edx
leaq 0x2be8(%rip), %rcx # 0x615fe
leaq 0x2bfe(%rip), %rax # 0x6161b
cmpl %edx, %r13d
cmovgq %rax, %rcx
jg 0x5e8b3
movb 0x8(%rsp), %al
xorb $0x1, %al
testb $0x1, %al
jne 0x5e8b3
movl 0x3c(%rsp), %ecx
testl %ecx, %ecx
js 0x5ea86
cmpl $0x1fff, %edx # imm = 0x1FFF
jg 0x5eb71
movslq %edx, %rax
incl %edx
movw %cx, 0x834(%r14,%rax,4)
movl %ecx, %ecx
movb 0x836(%r14,%rcx,4), %cl
movb %cl, 0x836(%r14,%rax,4)
cmpl %edx, %r13d
je 0x5ea7c
movslq %r13d, %rcx
movb 0x836(%r14,%rcx,4), %cl
movb %cl, 0x837(%r14,%rax,4)
jmp 0x5ea8f
cmpl %edx, %r13d
je 0x5eb7d
movzwl %r13w, %esi
movq %r14, %rdi
movl %edx, 0xc(%rsp)
movl %r8d, 0x8(%rsp)
callq 0x5ed13
movl 0x1c(%rsp), %r8d
movl 0xc(%rsp), %r9d
movq 0x10(%rsp), %rdi
testl %r8d, %r9d
leal 0x1(%rdi), %esi
sete %cl
cmpl $0x1000, %r9d # imm = 0x1000
setl %dl
pushq $-0x1
popq %rax
shlxl %esi, %eax, %eax
testb %cl, %dl
movl %r9d, %edx
notl %eax
cmovnel %esi, %edi
movl %r13d, %esi
cmovel %r8d, %eax
movl 0x8(%rsp), %r8d
movl %edi, %ecx
movb $0x1, %dil
jmp 0x5e977
movq %rbx, %rdi
movl %ebp, %esi
callq 0x5b95d
movq %rbx, %rdi
callq 0x5b739
movzbl %al, %ebp
testb %al, %al
jne 0x5eaee
movq 0x8(%r14), %rbx
testq %rbx, %rbx
je 0x5e8bc
cmpq $0x0, 0x20(%rsp)
jne 0x5e8be
cmpl $0x0, 0x24(%r14)
jle 0x5e8be
movl 0x4(%r14), %eax
imull (%r14), %eax
movl %eax, %ecx
sarl $0x1f, %ecx
andnl %eax, %ecx, %eax
xorl %ecx, %ecx
cmpq %rcx, %rax
je 0x5e8be
movq 0x18(%r14), %rdx
cmpb $0x0, (%rdx,%rcx)
jne 0x5eb6c
movslq 0x24(%r14), %rdx
movb $-0x1, 0x37(%r14,%rdx,4)
movslq 0x24(%r14), %rsi
movq 0x8(%r14), %rdx
movl 0x34(%r14,%rsi,4), %esi
movl %esi, (%rdx,%rcx,4)
incq %rcx
jmp 0x5eb3f
leaq 0x2a94(%rip), %rcx # 0x6160c
jmp 0x5e8b3
leaq 0x2a97(%rip), %rcx # 0x6161b
jmp 0x5e8b3
|
/laurinpaech[P]Haswellhof/src/base/stb_image.h
|
stbi__readval(stbi__context*, int, unsigned char*)
|
static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest)
{
int mask=0x80, i;
for (i=0; i<4; ++i, mask>>=1) {
if (channel & mask) {
if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short");
dest[i]=stbi__get8(s);
}
}
return dest;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl $0x80, %r15d
movq %rdx, %rbx
movl %esi, %ebp
movq %rdi, %r14
xorl %r12d, %r12d
cmpq $0x4, %r12
je 0x5eeb0
testl %ebp, %r15d
je 0x5ee96
movq %r14, %rdi
callq 0x5b6ff
testl %eax, %eax
jne 0x5ee9e
movq %r14, %rdi
callq 0x5b739
movb %al, (%rbx,%r12)
incq %r12
shrl %r15d
jmp 0x5ee73
leaq 0x27fa(%rip), %rax # 0x6169f
xorl %ebx, %ebx
movq %rax, %fs:-0x10
movq %rbx, %rax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/laurinpaech[P]Haswellhof/src/base/stb_image.h
|
stbi__pnm_skip_whitespace(stbi__context*, char*)
|
static void stbi__pnm_skip_whitespace(stbi__context *s, char *c)
{
for (;;) {
while (!stbi__at_eof(s) && stbi__pnm_isspace(*c))
*c = (char) stbi__get8(s);
if (stbi__at_eof(s) || *c != '#')
break;
while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' )
*c = (char) stbi__get8(s);
}
}
|
pushq %r15
pushq %r14
pushq %rbx
movabsq $0x100003e00, %r15 # imm = 0x100003E00
movq %rsi, %rbx
movq %rdi, %r14
movq %r14, %rdi
callq 0x5b6ff
testl %eax, %eax
je 0x5f040
movq %r14, %rdi
callq 0x5b6ff
testl %eax, %eax
jne 0x5f05b
cmpb $0x23, (%rbx)
jne 0x5f05b
movq %r14, %rdi
callq 0x5b6ff
testl %eax, %eax
jne 0x5effe
movzbl (%rbx), %eax
cmpl $0xa, %eax
je 0x5effe
cmpl $0xd, %eax
je 0x5effe
movq %r14, %rdi
callq 0x5b739
movb %al, (%rbx)
jmp 0x5f01b
movzbl (%rbx), %eax
cmpq $0x20, %rax
ja 0x5f00a
btq %rax, %r15
jae 0x5f00a
movq %r14, %rdi
callq 0x5b739
movb %al, (%rbx)
jmp 0x5effe
popq %rbx
popq %r14
popq %r15
retq
|
/laurinpaech[P]Haswellhof/src/base/stb_image.h
|
stbi__hdr_gettoken(stbi__context*, char*)
|
static char *stbi__hdr_gettoken(stbi__context *z, char *buffer)
{
int len=0;
char c = '\0';
c = (char) stbi__get8(z);
while (!stbi__at_eof(z) && c != '\n') {
buffer[len++] = c;
if (len == STBI__HDR_BUFLEN-1) {
// flush to end of line
while (!stbi__at_eof(z) && stbi__get8(z) != '\n')
;
break;
}
c = (char) stbi__get8(z);
}
buffer[len] = 0;
return buffer;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %rbx
movq %rdi, %r14
xorl %r15d, %r15d
movq %r14, %rdi
callq 0x5b739
movq %r14, %rdi
movl %eax, %ebp
callq 0x5b6ff
cmpb $0xa, %bpl
je 0x5f0f8
testl %eax, %eax
jne 0x5f0f8
movb %bpl, (%rbx,%r15)
incq %r15
cmpq $0x3ff, %r15 # imm = 0x3FF
jne 0x5f0ae
movl $0x3ff, %r15d # imm = 0x3FF
movq %r14, %rdi
callq 0x5b6ff
testl %eax, %eax
jne 0x5f0f8
movq %r14, %rdi
callq 0x5b739
cmpb $0xa, %al
jne 0x5f0e0
movb $0x0, (%rbx,%r15)
movq %rbx, %rax
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/laurinpaech[P]Haswellhof/src/base/stb_image.h
|
oonf_viewer_telnet_help
|
enum oonf_telnet_result
oonf_viewer_telnet_help(
struct autobuf *out, const char *cmd, const char *parameter, struct oonf_viewer_template *template, size_t count)
{
const char *next;
/* skip the layer2info command, NULL output is acceptable */
next = str_hasnextword(parameter, cmd);
/* print out own help text */
abuf_appendf(out, "%s command:\n", cmd);
oonf_viewer_print_help(out, next, template, count);
return TELNET_RESULT_ACTIVE;
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq %r8, %rbx
movq %rcx, %r14
movq %rsi, %r15
movq %rdi, %r12
movq %rdx, %rdi
callq 0x10e0
movq %rax, %r13
leaq 0xa1e(%rip), %rsi # 0x2123
movq %r12, %rdi
movq %r15, %rdx
xorl %eax, %eax
callq 0x1080
movq %r12, %rdi
movq %r13, %rsi
movq %r14, %rdx
movq %rbx, %rcx
callq 0x1040
xorl %eax, %eax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/OLSR[P]OONF/src/base/oonf_viewer.c
|
curlx_sltoui
|
unsigned int curlx_sltoui(long slnum)
{
#ifdef __INTEL_COMPILER
# pragma warning(push)
# pragma warning(disable:810) /* conversion may lose significant bits */
#endif
DEBUGASSERT(slnum >= 0);
#if (SIZEOF_INT < CURL_SIZEOF_LONG)
DEBUGASSERT((unsigned long) slnum <= (unsigned long) CURL_MASK_UINT);
#endif
return (unsigned int)(slnum & (long) CURL_MASK_UINT);
#ifdef __INTEL_COMPILER
# pragma warning(pop)
#endif
}
|
movq %rdi, %rax
retq
|
/pelya[P]curl/lib/warnless.c
|
curlx_uitosi
|
int curlx_uitosi(unsigned int uinum)
{
#ifdef __INTEL_COMPILER
# pragma warning(push)
# pragma warning(disable:810) /* conversion may lose significant bits */
#endif
DEBUGASSERT(uinum <= (unsigned int) CURL_MASK_SINT);
return (int) (uinum & (unsigned int) CURL_MASK_SINT);
#ifdef __INTEL_COMPILER
# pragma warning(pop)
#endif
}
|
movl %edi, %eax
andl $0x7fffffff, %eax # imm = 0x7FFFFFFF
retq
|
/pelya[P]curl/lib/warnless.c
|
nni_reap_sys_init
|
int
nni_reap_sys_init(void)
{
int rv;
reap_exit = false;
nni_mtx_init(&reap_mtx);
nni_cv_init(&reap_work_cv, &reap_mtx);
nni_cv_init(&reap_empty_cv, &reap_mtx);
// If this fails, we don't fail init, instead we will try to
// start up at reap time.
if ((rv = nni_thr_init(&reap_thr, reap_worker, NULL)) != 0) {
return (rv);
}
nni_thr_run(&reap_thr);
return (0);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %rbx
pushq %rax
movb $0x0, 0x4723d(%rip) # 0x60f48
leaq 0x4718e(%rip), %rbx # 0x60ea0
movq %rbx, %rdi
callq 0x1d66c
leaq 0x471b7(%rip), %rdi # 0x60ed8
movq %rbx, %rsi
callq 0x1d694
leaq 0x471e0(%rip), %rdi # 0x60f10
movq %rbx, %rsi
callq 0x1d694
leaq 0x47211(%rip), %rdi # 0x60f50
leaq 0x20(%rip), %rsi # 0x19d66
xorl %edx, %edx
callq 0x1d6e8
testl %eax, %eax
jne 0x19d5f
leaq 0x471f8(%rip), %rdi # 0x60f50
callq 0x1d848
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %rbp
retq
|
/nanomsg[P]nng/src/core/reap.c
|
reap_worker
|
static void
reap_worker(void *unused)
{
NNI_ARG_UNUSED(unused);
nni_thr_set_name(NULL, "nng:reap2");
nni_mtx_lock(&reap_mtx);
for (;;) {
nni_reap_list *list;
bool reaped = false;
for (list = reap_list; list != NULL; list = list->rl_next) {
nni_reap_node *node;
size_t offset;
nni_cb func;
if ((node = list->rl_nodes) == NULL) {
continue;
}
reaped = true;
offset = list->rl_offset;
func = list->rl_func;
list->rl_nodes = NULL;
// We process our list of nodes while not holding
// the lock.
nni_mtx_unlock(&reap_mtx);
while (node != NULL) {
void *ptr;
ptr = ((char *) node) - offset;
node = node->rn_next;
func(ptr);
}
nni_mtx_lock(&reap_mtx);
}
if (!reaped) {
reap_empty = true;
nni_cv_wake(&reap_empty_cv);
if (reap_exit) {
nni_mtx_unlock(&reap_mtx);
return;
}
nni_cv_wait(&reap_work_cv);
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
leaq 0x2abac(%rip), %rsi # 0x44927
xorl %edi, %edi
callq 0x1d91e
leaq 0x47117(%rip), %rbx # 0x60ea0
movq %rbx, %rdi
callq 0x1d680
movq 0x47130(%rip), %r15 # 0x60ec8
testq %r15, %r15
je 0x19e04
xorl %eax, %eax
movq 0x8(%r15), %r12
testq %r12, %r12
je 0x19dea
movq 0x18(%r15), %r14
xorl %eax, %eax
movq %rax, 0x8(%r15)
movq 0x10(%r15), %rax
xorl %r13d, %r13d
subq %rax, %r13
movq %rbx, %rdi
callq 0x1d68a
movq (%r12), %rbx
addq %r13, %r12
movq %r12, %rdi
callq *%r14
movq %rbx, %r12
testq %rbx, %rbx
jne 0x19dc4
leaq 0x470c0(%rip), %rbx # 0x60ea0
movq %rbx, %rdi
callq 0x1d680
movb $0x1, %al
movq (%r15), %r15
testq %r15, %r15
jne 0x19d9f
testb $0x1, %al
je 0x19e04
movq 0x470cb(%rip), %r15 # 0x60ec8
xorl %eax, %eax
testq %r15, %r15
jne 0x19d9f
movb $0x1, 0x470c5(%rip) # 0x60ed0
leaq 0x470fe(%rip), %rdi # 0x60f10
callq 0x1d6d4
cmpb $0x1, 0x4712a(%rip) # 0x60f48
je 0x19e31
leaq 0x470b1(%rip), %rdi # 0x60ed8
callq 0x1d6a8
jmp 0x19d91
leaq 0x47068(%rip), %rdi # 0x60ea0
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x1d68a
|
/nanomsg[P]nng/src/core/reap.c
|
nng_str_sockaddr
|
const char *
nng_str_sockaddr(const nng_sockaddr *sa, char *buf, size_t bufsz)
{
switch (sa->s_family) {
case NNG_AF_INPROC:
return (str_sa_inproc(&sa->s_inproc, buf, bufsz));
case NNG_AF_INET:
return (str_sa_inet(&sa->s_in, buf, bufsz));
case NNG_AF_INET6:
return (str_sa_inet6(&sa->s_in6, buf, bufsz));
case NNG_AF_IPC:
return (str_sa_ipc(&sa->s_ipc, buf, bufsz));
case NNG_AF_ABSTRACT:
return (str_sa_abstract(&sa->s_abstract, buf, bufsz));
case NNG_AF_UNSPEC:
default:
return ("unknown");
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x30, %rsp
movzwl (%rdi), %eax
decl %eax
cmpl $0x4, %eax
ja 0x1a012
movq %rdx, %r14
movq %rsi, %rbx
movq %rdi, %r15
leaq 0x2a9eb(%rip), %rcx # 0x44934
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
addq $0x2, %r15
leaq 0x2a9ff(%rip), %rdx # 0x4495c
jmp 0x1a000
addq $0x4, %r15
leaq 0x2aa9f(%rip), %rdx # 0x44a0c
jmp 0x1a000
movzbl 0x4(%r15), %ecx
movzbl 0x5(%r15), %r8d
movzbl 0x6(%r15), %r9d
movzbl 0x7(%r15), %r10d
movzwl 0x2(%r15), %eax
rolw $0x8, %ax
movzwl %ax, %r11d
leaq 0x2a9cd(%rip), %rdx # 0x44967
movq %rbx, %rdi
movq %r14, %rsi
xorl %eax, %eax
pushq %r11
pushq %r10
callq 0x90b0
addq $0x10, %rsp
jmp 0x1a040
movl 0x14(%r15), %r12d
leaq 0x4(%r15), %rdi
leaq -0x50(%rbp), %rsi
callq 0x1a06e
testl %r12d, %r12d
je 0x1a01b
movl 0x14(%r15), %r8d
movzwl 0x2(%r15), %eax
rolw $0x8, %ax
movzwl %ax, %r9d
leaq 0x2a994(%rip), %rdx # 0x44976
leaq -0x50(%rbp), %rcx
movq %rbx, %rdi
movq %r14, %rsi
xorl %eax, %eax
callq 0x90b0
jmp 0x1a040
addq $0x2, %r15
leaq 0x2d578(%rip), %rdx # 0x47578
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rcx
xorl %eax, %eax
callq 0x90b0
jmp 0x1a040
leaq 0x2a93b(%rip), %rbx # 0x44954
jmp 0x1a040
movzwl 0x2(%r15), %eax
rolw $0x8, %ax
movzwl %ax, %r8d
leaq 0x2a953(%rip), %rdx # 0x44982
leaq -0x50(%rbp), %rcx
movq %rbx, %rdi
movq %r14, %rsi
xorl %eax, %eax
callq 0x90b0
movq %rbx, %rax
addq $0x30, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/core/sockaddr.c
|
nng_sockaddr_port
|
uint32_t
nng_sockaddr_port(const nng_sockaddr *sa)
{
uint16_t port16;
switch (sa->s_family) {
case NNG_AF_INET:
NNI_GET16(&sa->s_in.sa_port, port16);
return (port16);
case NNG_AF_INET6:
NNI_GET16(&sa->s_in6.sa_port, port16);
return (port16);
default:
return (0);
}
}
|
pushq %rbp
movq %rsp, %rbp
movl (%rdi), %ecx
addl $-0x3, %ecx
xorl %eax, %eax
cmpw $0x1, %cx
ja 0x1a06c
movzwl 0x2(%rdi), %eax
rolw $0x8, %ax
movzwl %ax, %eax
popq %rbp
retq
|
/nanomsg[P]nng/src/core/sockaddr.c
|
sock_get_fd
|
static int
sock_get_fd(nni_sock *s, unsigned flag, int *fdp)
{
int rv;
nni_pollable *p;
if ((flag & nni_sock_flags(s)) == 0) {
return (NNG_ENOTSUP);
}
if (flag == NNI_PROTO_FLAG_SND) {
rv = nni_msgq_get_sendable(s->s_uwq, &p);
} else {
rv = nni_msgq_get_recvable(s->s_urq, &p);
}
if (rv == 0) {
rv = nni_pollable_getfd(p, fdp);
}
return (rv);
}
|
testl %esi, 0xac(%rdi)
je 0x1a288
pushq %rbp
movq %rsp, %rbp
pushq %rbx
pushq %rax
movq %rdx, %rbx
cmpl $0x2, %esi
jne 0x1a28e
movq 0xc8(%rdi), %rdi
leaq -0x10(%rbp), %rsi
callq 0x27b70
jmp 0x1a29e
movl $0x9, %eax
retq
movq 0xd0(%rdi), %rdi
leaq -0x10(%rbp), %rsi
callq 0x27b3d
testl %eax, %eax
jne 0x1a2ae
movq -0x10(%rbp), %rdi
movq %rbx, %rsi
callq 0x27c62
addq $0x8, %rsp
popq %rbx
popq %rbp
retq
|
/nanomsg[P]nng/src/core/socket.c
|
nni_sock_open
|
int
nni_sock_open(nni_sock **sockp, const nni_proto *proto)
{
nni_sock *s = NULL;
int rv;
if (proto->proto_version != NNI_PROTOCOL_VERSION) {
// unsupported protocol version
return (NNG_ENOTSUP);
}
if ((rv = nni_sock_create(&s, proto)) != 0) {
return (rv);
}
nni_mtx_lock(&sock_lk);
if ((rv = nni_id_alloc32(&sock_ids, &s->s_id, s)) != 0) {
nni_mtx_unlock(&sock_lk);
sock_destroy(s);
return (rv);
} else {
nni_list_append(&sock_list, s);
s->s_sock_ops.sock_open(s->s_data);
*sockp = s;
}
nni_mtx_unlock(&sock_lk);
#ifdef NNG_ENABLE_STATS
// Set up basic stat values. The socket id wasn't
// known at stat creation time, so we set it now.
nni_stat_set_id(&s->st_id, (int) s->s_id);
nni_stat_set_id(&s->st_root, (int) s->s_id);
// Add our stats chain.
nni_stat_register(&s->st_root);
#endif
return (0);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movl $0x9, %r15d
cmpl $0x50520003, (%rsi) # imm = 0x50520003
jne 0x1a820
movq %rsi, %r12
movq %rdi, %r13
movq 0x30(%rsi), %rax
movl $0x558, %edi # imm = 0x558
addq (%rax), %rdi
callq 0x1e787
movq %rax, %rbx
xorl %eax, %eax
testq %rbx, %rbx
je 0x1a748
leaq 0x558(%rbx), %rcx
movq %rcx, 0xb8(%rbx)
movaps 0x2a5f4(%rip), %xmm0 # 0x44a20
movups %xmm0, 0x1a8(%rbx)
movq $0x0, 0x1b8(%rbx)
movl %eax, 0xa8(%rbx)
movl %eax, 0xb0(%rbx)
movups 0x8(%r12), %xmm0
movups %xmm0, 0xd8(%rbx)
movups 0x18(%r12), %xmm0
movups %xmm0, 0xe8(%rbx)
movl 0x28(%r12), %eax
movl %eax, 0xac(%rbx)
leaq 0x128(%rbx), %rdi
movq 0x30(%r12), %rsi
movl $0xa, %ecx
rep movsq (%rsi), %es:(%rdi)
movq 0x38(%r12), %rax
movups (%rax), %xmm0
movups 0x10(%rax), %xmm1
movups 0x20(%rax), %xmm2
movups %xmm2, 0x118(%rbx)
movups %xmm1, 0x108(%rbx)
movups %xmm0, 0xf8(%rbx)
movw $0x0, 0x220(%rbx)
movq 0x40(%r12), %rax
testq %rax, %rax
je 0x1a4db
movups (%rax), %xmm0
movups 0x10(%rax), %xmm1
movups 0x20(%rax), %xmm2
movups %xmm2, 0x198(%rbx)
movups %xmm1, 0x188(%rbx)
movups %xmm0, 0x178(%rbx)
cmpq $0x0, 0x140(%rbx)
jne 0x1a506
leaq 0x29a74(%rip), %rdi # 0x43f60
leaq 0x2a53d(%rip), %rsi # 0x44a30
leaq 0x2a6b9(%rip), %rcx # 0x44bb3
movl $0x212, %edx # imm = 0x212
xorl %eax, %eax
callq 0x19375
movq %r13, -0x38(%rbp)
cmpq $0x0, 0x148(%rbx)
jne 0x1a535
leaq 0x29a45(%rip), %rdi # 0x43f60
leaq 0x2a50e(%rip), %rsi # 0x44a30
leaq 0x2a6aa(%rip), %rcx # 0x44bd3
movl $0x213, %edx # imm = 0x213
xorl %eax, %eax
callq 0x19375
leaq 0x208(%rbx), %rdi
xorps %xmm0, %xmm0
movups %xmm0, (%rbx)
xorl %esi, %esi
callq 0x17560
leaq 0x1f0(%rbx), %rdi
movl $0x98, %esi
callq 0x17560
leaq 0x1c0(%rbx), %rdi
movl $0x80, %esi
callq 0x17560
leaq 0x1d8(%rbx), %rdi
movl $0x70, %esi
callq 0x17560
leaq 0x10(%rbx), %r15
movq %r15, %rdi
callq 0x1d66c
leaq 0x228(%rbx), %rdi
callq 0x1d66c
leaq 0x38(%rbx), %rdi
movq %r15, %rsi
callq 0x1d694
leaq 0x70(%rbx), %rdi
leaq 0x46a3d(%rip), %rsi # 0x60fe8
callq 0x1d694
leaq 0x298(%rbx), %r15
leaq 0x43252(%rip), %rsi # 0x5d810
movq %r15, %rdi
callq 0x1c516
leaq 0x2d8(%rbx), %r12
leaq 0x4325c(%rip), %rsi # 0x5d830
movq %r12, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r12, %rsi
callq 0x1c3e0
leaq 0x318(%rbx), %r13
leaq 0x4325b(%rip), %rsi # 0x5d850
movq %r13, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r13, %rsi
callq 0x1c3e0
leaq 0x358(%rbx), %r14
leaq 0x4325a(%rip), %rsi # 0x5d870
movq %r14, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r14, %rsi
callq 0x1c3e0
leaq 0x398(%rbx), %r14
leaq 0x43259(%rip), %rsi # 0x5d890
movq %r14, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r14, %rsi
callq 0x1c3e0
leaq 0x3d8(%rbx), %r14
leaq 0x43258(%rip), %rsi # 0x5d8b0
movq %r14, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r14, %rsi
callq 0x1c3e0
leaq 0x518(%rbx), %r14
leaq 0x43257(%rip), %rsi # 0x5d8d0
movq %r14, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r14, %rsi
callq 0x1c3e0
leaq 0x4d8(%rbx), %r14
leaq 0x43256(%rip), %rsi # 0x5d8f0
movq %r14, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r14, %rsi
callq 0x1c3e0
leaq 0x498(%rbx), %r14
leaq 0x43255(%rip), %rsi # 0x5d910
movq %r14, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r14, %rsi
callq 0x1c3e0
leaq 0x458(%rbx), %r14
leaq 0x43254(%rip), %rsi # 0x5d930
movq %r14, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r14, %rsi
callq 0x1c3e0
leaq 0x418(%rbx), %r14
leaq 0x43253(%rip), %rsi # 0x5d950
movq %r14, %rdi
callq 0x1c516
movq %r15, %rdi
movq %r14, %rsi
callq 0x1c3e0
movl 0xa8(%rbx), %esi
movq %r12, %rdi
callq 0x1c58b
movq 0xe0(%rbx), %rsi
movq %r13, %rdi
callq 0x1c59e
leaq 0xc8(%rbx), %rdi
xorl %esi, %esi
callq 0x27484
testl %eax, %eax
je 0x1a750
movl %eax, %r15d
movq -0x38(%rbp), %r13
jmp 0x1a770
movl $0x2, %r15d
jmp 0x1a778
leaq 0xd0(%rbx), %rdi
movl $0x1, %esi
callq 0x27484
testl %eax, %eax
movq -0x38(%rbp), %r13
je 0x1a832
movl %eax, %r15d
movq %rbx, %rdi
callq 0x1a8ff
xorl %ebx, %ebx
testl %r15d, %r15d
jne 0x1a820
leaq 0x4685e(%rip), %rdi # 0x60fe8
callq 0x1d680
leaq 0xa8(%rbx), %rsi
leaq 0x44dcb(%rip), %rdi # 0x5f568
movq %rbx, %rdx
callq 0x17231
testl %eax, %eax
je 0x1a7c2
movl %eax, %r15d
leaq 0x46835(%rip), %rdi # 0x60fe8
callq 0x1d68a
movq %rbx, %rdi
callq 0x1a8ff
jmp 0x1a820
leaq 0x44dd7(%rip), %rdi # 0x5f5a0
movq %rbx, %rsi
callq 0x1759e
movq 0xb8(%rbx), %rdi
callq *0x140(%rbx)
movq %rbx, (%r13)
leaq 0x467ff(%rip), %rdi # 0x60fe8
callq 0x1d68a
leaq 0x2d8(%rbx), %rdi
movl 0xa8(%rbx), %esi
callq 0x1c58b
movl 0xa8(%rbx), %esi
addq $0x298, %rbx # imm = 0x298
movq %rbx, %rdi
callq 0x1c58b
movq %rbx, %rdi
callq 0x1c420
xorl %r15d, %r15d
movl %r15d, %eax
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x1a8(%rbx), %r14
leaq 0x1ac(%rbx), %r12
movq %rbx, %r15
addq $0x1b8, %r15 # imm = 0x1B8
movq 0xb8(%rbx), %rdi
movq %rbx, %rsi
callq *0x130(%rbx)
leaq 0x27fd3(%rip), %rsi # 0x42834
movl $0x4, %ecx
movq %rbx, %rdi
movq %r14, %rdx
movl $0x4, %r8d
callq 0x1b2af
leaq 0x27f49(%rip), %rsi # 0x427c7
movl $0x4, %ecx
movq %rbx, %rdi
movq %r12, %rdx
movl $0x4, %r8d
callq 0x1b2af
leaq 0x28158(%rip), %rsi # 0x429f3
movl $0x8, %ecx
movq %rbx, %rdi
movq %r15, %rdx
movl $0x3, %r8d
callq 0x1b2af
leaq -0x29(%rbp), %r14
movb $0x1, (%r14)
leaq 0x2a334(%rip), %rsi # 0x44bf4
movl $0x1, %ecx
movq %rbx, %rdi
movq %r14, %rdx
movl $0x1, %r8d
callq 0x1b2af
movb $0x0, (%r14)
leaq 0x2a31f(%rip), %rsi # 0x44c00
movl $0x1, %ecx
movq %rbx, %rdi
movq %r14, %rdx
movl $0x1, %r8d
callq 0x1b2af
xorl %r15d, %r15d
jmp 0x1a77a
|
/nanomsg[P]nng/src/core/socket.c
|
sock_destroy
|
static void
sock_destroy(nni_sock *s)
{
#ifdef NNG_ENABLE_STATS
nni_stat_unregister(&s->st_root);
#endif
// The protocol needs to clean up its state.
if (s->s_data != NULL) {
s->s_sock_ops.sock_fini(s->s_data);
}
nni_msgq_fini(s->s_urq);
nni_msgq_fini(s->s_uwq);
nni_cv_fini(&s->s_close_cv);
nni_cv_fini(&s->s_cv);
nni_mtx_fini(&s->s_mx);
nni_mtx_fini(&s->s_pipe_cbs_mtx);
nni_free(s, s->s_size);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %rbx
pushq %rax
movq %rdi, %rbx
addq $0x298, %rdi # imm = 0x298
callq 0x1c4a6
movq 0xb8(%rbx), %rdi
testq %rdi, %rdi
je 0x1a926
callq *0x138(%rbx)
movq 0xd0(%rbx), %rdi
callq 0x27532
movq 0xc8(%rbx), %rdi
callq 0x27532
leaq 0x70(%rbx), %rdi
callq 0x1d69e
leaq 0x38(%rbx), %rdi
callq 0x1d69e
leaq 0x10(%rbx), %rdi
callq 0x1d676
leaq 0x228(%rbx), %rdi
callq 0x1d676
movq 0xc0(%rbx), %rsi
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %rbp
jmp 0x1e7a2
|
/nanomsg[P]nng/src/core/socket.c
|
nni_sock_remove_listener
|
void
nni_sock_remove_listener(nni_listener *l)
{
nni_sock *s = l->l_sock;
nni_mtx_lock(&s->s_mx);
NNI_ASSERT(nni_list_node_active(&l->l_node));
nni_list_node_remove(&l->l_node);
nni_mtx_unlock(&s->s_mx);
// also drop the hold from the socket
nni_listener_rele(l);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
movq 0x90(%rdi), %r14
addq $0x10, %r14
movq %r14, %rdi
callq 0x1d680
leaq 0x80(%rbx), %r15
movq %r15, %rdi
callq 0x17779
testl %eax, %eax
jne 0x1b0dc
leaq 0x28e9e(%rip), %rdi # 0x43f60
leaq 0x29967(%rip), %rsi # 0x44a30
leaq 0x29a33(%rip), %rcx # 0x44b03
movl $0x376, %edx # imm = 0x376
xorl %eax, %eax
callq 0x19375
movq %r15, %rdi
callq 0x17788
movq %r14, %rdi
callq 0x1d68a
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp 0x17dad
|
/nanomsg[P]nng/src/core/socket.c
|
nni_sock_add_dialer
|
int
nni_sock_add_dialer(nni_sock *s, nni_dialer *d)
{
int rv;
// grab a hold on the dialer for the socket
if ((rv = nni_dialer_hold(d)) != 0) {
return (rv);
}
// copy initial values for some options from socket
for (int i = 0; ep_options[i].eo_name != NULL; i++) {
uint64_t val; // big enough
const nni_ep_option *o = &ep_options[i];
rv = nni_sock_getopt(s, o->eo_name, &val, NULL, o->eo_type);
if (rv == 0) {
rv = nni_dialer_setopt(
d, o->eo_name, &val, 0, o->eo_type);
}
if (rv != 0 && rv != NNG_ENOTSUP) {
nni_dialer_rele(d);
return (rv);
}
}
nni_mtx_lock(&s->s_mx);
if (s->s_closing) {
nni_mtx_unlock(&s->s_mx);
nni_dialer_rele(d);
return (NNG_ECLOSED);
}
nni_list_append(&s->s_dialers, d);
#ifdef NNG_ENABLE_STATS
nni_stat_inc(&s->st_dialers, 1);
#endif
nni_mtx_unlock(&s->s_mx);
return (0);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rdi, %rbx
movq %rsi, -0x30(%rbp)
movq %rsi, %rdi
callq 0x164b3
testl %eax, %eax
jne 0x1b22d
movq %rbx, -0x48(%rbp)
leaq 0x29091(%rip), %r13 # 0x441c2
movl $0x10, %r12d
movq $0x0, -0x40(%rbp)
leaq 0x425ca(%rip), %r14 # 0x5d710
movq %r14, %rbx
movl -0x8(%r12,%r14), %r14d
movq -0x48(%rbp), %rdi
movq %r13, %rsi
leaq -0x50(%rbp), %rdx
xorl %ecx, %ecx
movl %r14d, %r8d
callq 0x1afce
movl %eax, %r15d
testl %eax, %eax
jne 0x1b182
movq -0x30(%rbp), %rdi
movq %r13, %rsi
leaq -0x50(%rbp), %rdx
xorl %ecx, %ecx
movl %r14d, %r8d
callq 0x16708
movl %eax, %r15d
testl %r15d, %r15d
je 0x1b19f
cmpl $0x9, %r15d
movq %rbx, %r14
je 0x1b1a2
movq -0x30(%rbp), %rdi
callq 0x164f4
movl %r15d, -0x34(%rbp)
jmp 0x1b1a2
movq %rbx, %r14
cmpl $0x9, %r15d
je 0x1b1ad
testl %r15d, %r15d
jne 0x1b1c5
movq (%r12,%r14), %r13
testq %r13, %r13
sete %al
movq %rax, -0x40(%rbp)
addq $0x10, %r12
cmpq $0x40, %r12
jne 0x1b146
testb $0x1, -0x40(%rbp)
movq -0x48(%rbp), %rbx
movl -0x34(%rbp), %eax
je 0x1b22d
leaq 0x10(%rbx), %r15
movq %r15, %rdi
callq 0x1d680
cmpb $0x1, 0x220(%rbx)
jne 0x1b1ff
movq %r15, %rdi
callq 0x1d68a
movq -0x30(%rbp), %rdi
callq 0x164f4
movl $0x7, %eax
jmp 0x1b22d
leaq 0x1d8(%rbx), %rdi
movq -0x30(%rbp), %rsi
callq 0x1759e
addq $0x358, %rbx # imm = 0x358
movl $0x1, %esi
movq %rbx, %rdi
callq 0x1c551
movq %r15, %rdi
callq 0x1d68a
xorl %eax, %eax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/core/socket.c
|
nni_sock_remove_dialer
|
void
nni_sock_remove_dialer(nni_dialer *d)
{
nni_sock *s = d->d_sock;
nni_mtx_lock(&s->s_mx);
NNI_ASSERT(nni_list_node_active(&d->d_node));
nni_list_node_remove(&d->d_node);
nni_mtx_unlock(&s->s_mx);
// also drop the hold from the socket
nni_dialer_rele(d);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
movq 0x80(%rdi), %r14
addq $0x10, %r14
movq %r14, %rdi
callq 0x1d680
leaq 0x70(%rbx), %r15
movq %r15, %rdi
callq 0x17779
testl %eax, %eax
jne 0x1b28d
leaq 0x28ced(%rip), %rdi # 0x43f60
leaq 0x297b6(%rip), %rsi # 0x44a30
leaq 0x298a3(%rip), %rcx # 0x44b24
movl $0x3ac, %edx # imm = 0x3AC
xorl %eax, %eax
callq 0x19375
movq %r15, %rdi
callq 0x17788
movq %r14, %rdi
callq 0x1d68a
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp 0x164f4
|
/nanomsg[P]nng/src/core/socket.c
|
nni_sock_set_pipe_cb
|
void
nni_sock_set_pipe_cb(nni_sock *s, int ev, nng_pipe_cb cb, void *arg)
{
if ((ev > NNG_PIPE_EV_NONE) && (ev < NNG_PIPE_EV_NUM)) {
nni_mtx_lock(&s->s_pipe_cbs_mtx);
s->s_pipe_cbs[ev].cb_fn = cb;
s->s_pipe_cbs[ev].cb_arg = arg;
s->s_want_evs = false;
for (ev = NNG_PIPE_EV_NONE; ev < NNG_PIPE_EV_NUM; ev++) {
if (s->s_pipe_cbs[ev].cb_fn != NULL) {
s->s_want_evs = true;
}
}
nni_mtx_unlock(&s->s_pipe_cbs_mtx);
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl %esi, %r13d
leal -0x1(%r13), %eax
cmpl $0x2, %eax
ja 0x1b3d7
movq %rcx, %r15
movq %rdx, %r12
movq %rdi, %rbx
leaq 0x228(%rdi), %r14
movq %r14, %rdi
callq 0x1d680
leaq 0x250(%rbx), %rax
movl %r13d, %ecx
shlq $0x4, %rcx
movq %r12, 0x250(%rbx,%rcx)
movq %r15, 0x258(%rbx,%rcx)
movb $0x0, 0x290(%rbx)
xorl %ecx, %ecx
cmpq $0x0, (%rax,%rcx)
je 0x1b3b7
movb $0x1, 0x290(%rbx)
addq $0x10, %rcx
cmpq $0x40, %rcx
jne 0x1b3a9
movq %r14, %rdi
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x1d68a
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/core/socket.c
|
nni_ctx_find
|
int
nni_ctx_find(nni_ctx **cp, uint32_t id)
{
int rv = 0;
nni_ctx *ctx;
nni_mtx_lock(&sock_lk);
if ((ctx = nni_id_get(&ctx_ids, id)) != NULL) {
// We refuse a reference if either the socket is
// closed, or the context is closed. (If the socket
// is closed, and we are only getting the reference so
// we can close it, then we still allow. In the case
// the only valid operation will be to close the
// socket.)
if (ctx->c_closed || ctx->c_sock->s_closed) {
rv = NNG_ECLOSED;
} else {
ctx->c_ref++;
*cp = ctx;
}
} else {
rv = NNG_ECLOSED;
}
nni_mtx_unlock(&sock_lk);
return (rv);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r14
pushq %rbx
movl %esi, %r14d
movq %rdi, %rbx
leaq 0x45bee(%rip), %rdi # 0x60fe8
callq 0x1d680
movl %r14d, %esi
leaq 0x441af(%rip), %rdi # 0x5f5b8
callq 0x16c3d
movl $0x7, %r14d
testq %rax, %rax
je 0x1b435
cmpb $0x0, 0x58(%rax)
jne 0x1b435
movq 0x10(%rax), %rcx
cmpb $0x0, 0x221(%rcx)
jne 0x1b435
incl 0x5c(%rax)
movq %rax, (%rbx)
xorl %r14d, %r14d
leaq 0x45bac(%rip), %rdi # 0x60fe8
callq 0x1d68a
movl %r14d, %eax
popq %rbx
popq %r14
popq %rbp
retq
|
/nanomsg[P]nng/src/core/socket.c
|
nni_ctx_rele
|
void
nni_ctx_rele(nni_ctx *ctx)
{
nni_sock *sock = ctx->c_sock;
nni_mtx_lock(&sock_lk);
ctx->c_ref--;
if ((ctx->c_ref > 0) || (!ctx->c_closed)) {
// Either still have an active reference, or not
// actually closing yet.
nni_mtx_unlock(&sock_lk);
return;
}
// Remove us from the hash, so we can't be found any more.
// This allows our ID to be reused later, although the system
// tries to avoid ID reuse.
nni_id_remove(&ctx_ids, ctx->c_id);
nni_list_remove(&sock->s_ctxs, ctx);
nni_cv_wake(&sock->s_close_cv);
nni_mtx_unlock(&sock_lk);
nni_ctx_destroy(ctx);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r14
pushq %rbx
movq %rdi, %rbx
movq 0x10(%rdi), %r14
leaq 0x45b80(%rip), %rdi # 0x60fe8
callq 0x1d680
decl 0x5c(%rbx)
jne 0x1b4ca
cmpb $0x0, 0x58(%rbx)
je 0x1b4ca
movl 0x60(%rbx), %esi
leaq 0x44136(%rip), %rdi # 0x5f5b8
callq 0x16d6c
leaq 0x208(%r14), %rdi
movq %rbx, %rsi
callq 0x17726
addq $0x70, %r14
movq %r14, %rdi
callq 0x1d6d4
leaq 0x45b3f(%rip), %rdi # 0x60fe8
callq 0x1d68a
movq 0x48(%rbx), %rdi
testq %rdi, %rdi
je 0x1b4ba
callq *0x28(%rbx)
movq 0x50(%rbx), %rsi
movq %rbx, %rdi
popq %rbx
popq %r14
popq %rbp
jmp 0x1e7a2
leaq 0x45b17(%rip), %rdi # 0x60fe8
popq %rbx
popq %r14
popq %rbp
jmp 0x1d68a
|
/nanomsg[P]nng/src/core/socket.c
|
nni_pipe_start
|
void
nni_pipe_start(nni_pipe *p)
{
// exactly one of these must be set.
NNI_ASSERT(p->p_listener == NULL || p->p_dialer == NULL);
NNI_ASSERT(p->p_listener != NULL || p->p_dialer != NULL);
// NB: starting the pipe can actually cause the pipe
// to be deallocated before this returns (if it is rejected)
if (p->p_listener) {
listener_start_pipe(p->p_listener, p);
} else if (p->p_dialer) {
dialer_start_pipe(p->p_dialer, p);
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x90, %rsp
movq %rdi, %rbx
cmpq $0x0, 0xc8(%rdi)
je 0x1bbcd
cmpq $0x0, 0xc0(%rbx)
je 0x1bbcd
leaq 0x283ad(%rip), %rdi # 0x43f60
leaq 0x28e76(%rip), %rsi # 0x44a30
leaq 0x28f98(%rip), %rcx # 0x44b59
movl $0x602, %edx # imm = 0x602
xorl %eax, %eax
callq 0x19375
cmpq $0x0, 0xc8(%rbx)
jne 0x1bc02
cmpq $0x0, 0xc0(%rbx)
jne 0x1bc02
leaq 0x28378(%rip), %rdi # 0x43f60
leaq 0x28e41(%rip), %rsi # 0x44a30
leaq 0x28f90(%rip), %rcx # 0x44b86
movl $0x603, %edx # imm = 0x603
xorl %eax, %eax
callq 0x19375
movq 0xc8(%rbx), %r15
testq %r15, %r15
je 0x1bc81
movq 0x90(%r15), %r14
leaq 0x5d8(%r15), %rdi
movl $0x1, %esi
callq 0x1c551
leaq 0x3d8(%r14), %rdi
movl $0x1, %esi
callq 0x1c551
movq %rbx, %rdi
movl $0x1, %esi
callq 0x1bee8
movq %rbx, %rdi
callq 0x19545
testb %al, %al
je 0x1bd70
addq $0x818, %r15 # imm = 0x818
movl $0x1, %esi
movq %r15, %rdi
callq 0x1c551
addq $0x518, %r14 # imm = 0x518
movl $0x1, %esi
movq %r14, %rdi
callq 0x1c551
jmp 0x1bdc9
movq 0xc0(%rbx), %r15
testq %r15, %r15
je 0x1bed8
movq 0x80(%r15), %r14
leaq 0x10(%r14), %r12
movq %r12, %rdi
callq 0x1d680
movq %rbx, 0x88(%r15)
movl 0x478(%r15), %eax
movl %eax, 0x474(%r15)
movq %r12, %rdi
callq 0x1d68a
leaq 0x3d8(%r14), %rdi
movl $0x1, %esi
callq 0x1c551
leaq 0x610(%r15), %rdi
movl $0x1, %esi
callq 0x1c551
movq %rbx, %rdi
movl $0x1, %esi
callq 0x1bee8
movq %rbx, %rdi
callq 0x19545
testb %al, %al
je 0x1bd87
addq $0x890, %r15 # imm = 0x890
movl $0x1, %esi
movq %r15, %rdi
callq 0x1c551
addq $0x518, %r14 # imm = 0x518
movl $0x1, %esi
movq %r14, %rdi
callq 0x1c551
callq 0x18328
cmpl $0x7, %eax
jb 0x1bed0
movq %rbx, %rdi
callq 0x19a43
movl %eax, %r14d
leaq -0xb0(%rbp), %rsi
movq %rbx, %rdi
callq 0x19b24
leaq 0x28f74(%rip), %rdi # 0x44ccb
leaq 0x28f7c(%rip), %rsi # 0x44cda
movl %r14d, %edx
movq %rax, %rcx
xorl %eax, %eax
callq 0x183f3
jmp 0x1bed0
movq 0x90(%rbx), %rdi
callq *0x68(%rbx)
testl %eax, %eax
je 0x1bde0
addq $0x818, %r15 # imm = 0x818
jmp 0x1bda0
movq 0x90(%rbx), %rdi
callq *0x68(%rbx)
testl %eax, %eax
je 0x1be53
addq $0x890, %r15 # imm = 0x890
movl $0x1, %esi
movq %r15, %rdi
callq 0x1c551
addq $0x518, %r14 # imm = 0x518
movl $0x1, %esi
movq %r14, %rdi
callq 0x1c551
movq %rbx, %rdi
callq 0x1950b
movq %rbx, %rdi
addq $0x90, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp 0x194ce
leaq 0x100(%rbx), %r15
movl (%rbx), %esi
movq %r15, %rdi
callq 0x1c58b
leaq 0x140(%rbx), %rdi
movl (%rbx), %esi
callq 0x1c58b
movq %r15, %rdi
callq 0x1c420
movq %rbx, %rdi
movl $0x2, %esi
callq 0x1bee8
callq 0x18328
cmpl $0x7, %eax
jb 0x1bed0
movq %rbx, %rdi
callq 0x194df
movl %eax, %r15d
movl 0xa8(%r14), %r14d
leaq -0xb0(%rbp), %rsi
movq %rbx, %rdi
callq 0x19b24
leaq 0x28e4e(%rip), %rdi # 0x44c98
leaq 0x28e52(%rip), %rsi # 0x44ca3
jmp 0x1bec0
leaq 0x100(%rbx), %r15
movl (%rbx), %esi
movq %r15, %rdi
callq 0x1c58b
leaq 0x140(%rbx), %rdi
movl (%rbx), %esi
callq 0x1c58b
movq %r15, %rdi
callq 0x1c420
movq %rbx, %rdi
movl $0x2, %esi
callq 0x1bee8
callq 0x18328
cmpl $0x7, %eax
jb 0x1bed0
movq %rbx, %rdi
callq 0x194df
movl %eax, %r15d
movl 0xa8(%r14), %r14d
leaq -0xb0(%rbp), %rsi
movq %rbx, %rdi
callq 0x19b24
leaq 0x28e51(%rip), %rdi # 0x44d0a
leaq 0x28e56(%rip), %rsi # 0x44d16
movl %r15d, %edx
movl %r14d, %ecx
movq %rax, %r8
xorl %eax, %eax
callq 0x183f3
movq %rbx, %rdi
callq 0x194ce
addq $0x90, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/core/socket.c
|
xreq0_putq_cb
|
static void
xreq0_putq_cb(void *arg)
{
xreq0_pipe *p = arg;
if (nni_aio_result(&p->aio_putq) != 0) {
nni_msg_free(nni_aio_get_msg(&p->aio_putq));
nni_aio_set_msg(&p->aio_putq, NULL);
nni_pipe_close(p->pipe);
return;
}
nni_aio_set_msg(&p->aio_putq, NULL);
nni_pipe_recv(p->pipe, &p->aio_recv);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r14
pushq %rbx
movq %rdi, %rbx
leaq 0x568(%rdi), %r14
movq %r14, %rdi
callq 0x14dab
testl %eax, %eax
je 0x21e09
movq %r14, %rdi
callq 0x14d42
movq %rax, %rdi
callq 0x188d9
movq %r14, %rdi
xorl %esi, %esi
callq 0x14d35
movq (%rbx), %rdi
popq %rbx
popq %r14
popq %rbp
jmp 0x1950b
movq %r14, %rdi
xorl %esi, %esi
callq 0x14d35
movq (%rbx), %rdi
addq $0x3a0, %rbx # imm = 0x3A0
movq %rbx, %rsi
popq %rbx
popq %r14
popq %rbp
jmp 0x194e7
|
/nanomsg[P]nng/src/sp/protocol/reqrep0/xreq.c
|
nuts_clock
|
uint64_t
nuts_clock(void)
{
#ifdef _WIN32
return (GetTickCount64());
#elif defined(CLOCK_MONTONIC)
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
uint64_t val;
val = ts.tv_sec;
val *= 1000;
val += ts.tv_nsec / 1000000;
return (val);
#else
static time_t epoch;
struct timeval tv;
if (epoch == 0) {
epoch = time(NULL);
}
gettimeofday(&tv, NULL);
if (tv.tv_sec < epoch) {
// Broken clock.
// This will force all other timing tests to fail
return (0);
}
tv.tv_sec -= epoch;
return (
((uint64_t) (tv.tv_sec) * 1000) + (uint64_t) (tv.tv_usec / 1000));
#endif
#ifdef _WIN32
#else
#include <fcntl.h>
#include <unistd.h>
#endif
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r14
pushq %rbx
subq $0x10, %rsp
cmpq $0x0, 0x3d2e9(%rip) # 0x61210
jne 0x23f37
xorl %edi, %edi
callq 0x9800
movq %rax, 0x3d2d9(%rip) # 0x61210
xorl %ebx, %ebx
leaq -0x20(%rbp), %r14
movq %r14, %rdi
xorl %esi, %esi
callq 0x9210
movq (%r14), %rcx
subq 0x3d2bf(%rip), %rcx # 0x61210
jl 0x23f80
movq %rcx, -0x20(%rbp)
movabsq $0x20c49ba5e353f7cf, %rax # imm = 0x20C49BA5E353F7CF
imulq -0x18(%rbp)
movq %rdx, %rbx
imulq $0x3e8, %rcx, %rax # imm = 0x3E8
movq %rdx, %rcx
shrq $0x3f, %rcx
sarq $0x7, %rbx
addq %rcx, %rbx
addq %rax, %rbx
movq %rbx, %rax
addq $0x10, %rsp
popq %rbx
popq %r14
popq %rbp
retq
|
/nanomsg[P]nng/src/testing/util.c
|
nuts_poll_fd
|
bool
nuts_poll_fd(int fd)
{
#ifdef _WIN32
struct pollfd pfd;
pfd.fd = (SOCKET) fd;
pfd.events = POLLRDNORM;
pfd.revents = 0;
switch (WSAPoll(&pfd, 1, 0)) {
case 0:
return (false);
case 1:
return (true);
}
#else
struct pollfd pfd;
pfd.fd = fd;
pfd.events = POLLRDNORM;
pfd.revents = 0;
switch (poll(&pfd, 1, 0)) {
case 0:
return (false);
case 1:
return (true);
}
#endif
return (false);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x10, %rsp
leaq -0x8(%rbp), %rax
movl %edi, (%rax)
movl $0x40, 0x4(%rax)
movl $0x1, %esi
movq %rax, %rdi
xorl %edx, %edx
callq 0x9440
cmpl $0x1, %eax
sete %al
addq $0x10, %rsp
popq %rbp
retq
|
/nanomsg[P]nng/src/testing/util.c
|
nuts_sleep
|
void
nuts_sleep(int msec)
{
#ifdef _WIN32
Sleep(msec);
#elif defined(CLOCK_MONOTONIC)
struct timespec ts;
ts.tv_sec = msec / 1000;
ts.tv_nsec = (msec % 1000) * 1000000;
// Do this in a loop, so that interrupts don't actually wake us.
while (ts.tv_sec || ts.tv_nsec) {
if (nanosleep(&ts, &ts) == 0) {
break;
}
}
#else
poll(NULL, 0, msec);
#endif
}
|
pushq %rbp
movq %rsp, %rbp
pushq %rbx
subq $0x18, %rsp
movslq %edi, %rax
imulq $0x10624dd3, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $0x3f, %rdx
sarq $0x26, %rcx
addl %edx, %ecx
movslq %ecx, %rcx
leaq -0x18(%rbp), %rbx
movq %rcx, (%rbx)
imull $0x3e8, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
imull $0xf4240, %eax, %eax # imm = 0xF4240
cltq
movq %rax, 0x8(%rbx)
movq -0x10(%rbp), %rax
orq -0x18(%rbp), %rax
je 0x24029
movq %rbx, %rdi
movq %rbx, %rsi
callq 0x9600
testl %eax, %eax
jne 0x24010
addq $0x18, %rsp
popq %rbx
popq %rbp
retq
|
/nanomsg[P]nng/src/testing/util.c
|
nuts_has_ipv6
|
bool
nuts_has_ipv6(void)
{
nng_sockaddr sa = { 0 };
nng_udp *u;
int rv;
sa.s_in6.sa_family = NNG_AF_INET6;
sa.s_in6.sa_port = 0;
memset(sa.s_in6.sa_addr, 0, 16);
sa.s_in6.sa_addr[15] = 1;
rv = nng_udp_open(&u, &sa);
if (rv == 0) {
nng_udp_close(u);
}
return (rv == 0 ? 1 : 0);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %rbx
subq $0x98, %rsp
leaq -0x98(%rbp), %rbx
movl $0x88, %edx
movq %rbx, %rdi
xorl %esi, %esi
callq 0x90a0
movw $0x4, (%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x2(%rbx)
movw $0x0, 0x12(%rbx)
movb $0x1, 0x13(%rbx)
leaq -0x10(%rbp), %rdi
movq %rbx, %rsi
callq 0x148e6
movl %eax, %ebx
testl %eax, %eax
jne 0x24083
movq -0x10(%rbp), %rdi
callq 0x148f0
testl %ebx, %ebx
sete %al
addq $0x98, %rsp
popq %rbx
popq %rbp
retq
|
/nanomsg[P]nng/src/testing/util.c
|
ipc_ep_init_listener
|
static int
ipc_ep_init_listener(void *arg, nng_url *url, nni_listener *listener)
{
ipc_ep *ep = arg;
int rv;
nni_sock *sock = nni_listener_sock(listener);
ipc_ep_init(ep, sock, ipc_ep_accept_cb);
ep->nlistener = listener;
if ((rv = nng_stream_listener_alloc_url(&ep->listener, url)) != 0) {
return (rv);
}
#ifdef NNG_ENABLE_STATS
nni_listener_add_stat(listener, &ep->st_rcv_max);
#endif
return (0);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r14
movq %rdx, %rdi
callq 0x17fd5
leaq 0x1d9(%rip), %rdx # 0x2b306
movq %r14, %rdi
movq %rax, %rsi
callq 0x2ae1e
movq %rbx, 0x48(%r14)
leaq 0x40(%r14), %rdi
movq %r15, %rsi
callq 0x36282
testl %eax, %eax
jne 0x2b160
addq $0x420, %r14 # imm = 0x420
movq %rbx, %rdi
movq %r14, %rsi
callq 0x181c7
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/sp/transport/ipc/ipc.c
|
tcptran_dialer_setopt
|
static int
tcptran_dialer_setopt(
void *arg, const char *name, const void *buf, size_t sz, nni_type t)
{
tcptran_ep *ep = arg;
int rv;
rv = nni_stream_dialer_set(ep->dialer, name, buf, sz, t);
if (rv == NNG_ENOTSUP) {
rv = nni_setopt(tcptran_ep_opts, name, ep, buf, sz, t);
}
return (rv);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl %r8d, %ebx
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %r13
movq %rdi, %r12
movq 0x410(%rdi), %rdi
callq 0x361af
cmpl $0x9, %eax
jne 0x2c3ca
leaq 0x31a18(%rip), %rdi # 0x5ddc0
movq %r13, %rsi
movq %r12, %rdx
movq %r15, %rcx
movq %r14, %r8
movl %ebx, %r9d
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x19256
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/sp/transport/tcp/tcp.c
|
tcptran_ep_init
|
static void
tcptran_ep_init(tcptran_ep *ep, nni_sock *sock, void (*conn_cb)(void *))
{
nni_mtx_init(&ep->mtx);
NNI_LIST_INIT(&ep->waitpipes, tcptran_pipe, node);
NNI_LIST_INIT(&ep->negopipes, tcptran_pipe, node);
ep->proto = nni_sock_proto_id(sock);
nni_aio_init(&ep->connaio, conn_cb, ep);
nni_aio_init(&ep->timeaio, tcptran_timer_cb, ep);
#ifdef NNG_ENABLE_STATS
static const nni_stat_info rcv_max_info = {
.si_name = "rcv_max",
.si_desc = "maximum receive size",
.si_type = NNG_STAT_LEVEL,
.si_unit = NNG_UNIT_BYTES,
.si_atomic = true,
};
nni_stat_init(&ep->st_rcv_max, &rcv_max_info);
#endif
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %rbx
callq 0x1d66c
leaq 0x3e0(%rbx), %rdi
movl $0x28, %esi
callq 0x17560
leaq 0x3f8(%rbx), %rdi
movl $0x28, %esi
callq 0x17560
movq %r15, %rdi
callq 0x1ae0e
movw %ax, 0x28(%rbx)
leaq 0x50(%rbx), %rdi
movq %r14, %rsi
movq %rbx, %rdx
callq 0x149d4
leaq 0x218(%rbx), %rdi
leaq 0xc2(%rip), %rsi # 0x2c4fe
movq %rbx, %rdx
callq 0x149d4
addq $0x430, %rbx # imm = 0x430
leaq 0x3194e(%rip), %rsi # 0x5dda0
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp 0x1c516
|
/nanomsg[P]nng/src/sp/transport/tcp/tcp.c
|
tcptran_dial_cb
|
static void
tcptran_dial_cb(void *arg)
{
tcptran_ep *ep = arg;
nni_aio *aio = &ep->connaio;
tcptran_pipe *p;
int rv;
nng_stream *conn;
nni_mtx_lock(&ep->mtx);
if ((rv = nni_aio_result(aio)) != 0) {
goto error;
}
conn = nni_aio_get_output(aio, 0);
if (ep->closed) {
nng_stream_free(conn);
rv = NNG_ECLOSED;
goto error;
}
if ((rv = nni_pipe_alloc_dialer((void **) &p, ep->ndialer)) != 0) {
nng_stream_free(conn);
goto error;
}
tcptran_pipe_start(p, conn, ep);
nni_mtx_unlock(&ep->mtx);
return;
error:
// Error connecting. We need to pass this straight back
// to the user.
if ((aio = ep->useraio) != NULL) {
ep->useraio = NULL;
nni_aio_finish_error(aio, rv);
}
nni_mtx_unlock(&ep->mtx);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
leaq 0x50(%rdi), %r15
callq 0x1d680
movq %r15, %rdi
callq 0x14dab
movl %eax, %r14d
testl %eax, %eax
jne 0x2c4c1
movq %r15, %rdi
xorl %esi, %esi
callq 0x14d92
movq %rax, %r15
movl $0x7, %r14d
cmpb $0x0, 0x3a(%rbx)
jne 0x2c4b9
movq 0x428(%rbx), %rsi
leaq -0x20(%rbp), %rdi
callq 0x19568
movl %eax, %r14d
testl %eax, %eax
je 0x2c4ed
movq %r15, %rdi
callq 0x3603a
movq 0x48(%rbx), %rdi
testq %rdi, %rdi
je 0x2c4da
movq $0x0, 0x48(%rbx)
movl %r14d, %esi
callq 0x1515e
movq %rbx, %rdi
callq 0x1d68a
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
movq -0x20(%rbp), %rdi
movq %r15, %rsi
movq %rbx, %rdx
callq 0x2c537
jmp 0x2c4da
|
/nanomsg[P]nng/src/sp/transport/tcp/tcp.c
|
tcptran_pipe_start
|
static void
tcptran_pipe_start(tcptran_pipe *p, nng_stream *conn, tcptran_ep *ep)
{
nni_iov iov;
p->conn = conn;
p->ep = ep;
p->proto = ep->proto;
p->txlen[0] = 0;
p->txlen[1] = 'S';
p->txlen[2] = 'P';
p->txlen[3] = 0;
NNI_PUT16(&p->txlen[4], p->proto);
NNI_PUT16(&p->txlen[6], 0);
p->gotrxhead = 0;
p->gottxhead = 0;
p->wantrxhead = 8;
p->wanttxhead = 8;
iov.iov_len = 8;
iov.iov_buf = &p->txlen[0];
nni_aio_set_iov(&p->negoaio, 1, &iov);
nni_list_append(&ep->negopipes, p);
nni_aio_set_timeout(&p->negoaio, 10000); // 10 sec timeout to negotiate
nng_stream_send(p->conn, &p->negoaio);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x18, %rsp
movq %rdx, %r14
movq %rdi, %rbx
movq %rsi, (%rdi)
movq %rdx, 0x38(%rdi)
movzwl 0x28(%rdx), %eax
movw %ax, 0x12(%rdi)
leaq 0x40(%rdi), %rcx
xorl %edx, %edx
movb %dl, 0x40(%rdi)
movb $0x53, 0x41(%rdi)
movb $0x50, 0x42(%rdi)
movb %dl, 0x43(%rdi)
movb %ah, 0x44(%rdi)
movb %al, 0x45(%rdi)
movb %dl, 0x46(%rdi)
movb %dl, 0x47(%rdi)
xorps %xmm0, %xmm0
movups %xmm0, 0x50(%rdi)
movl $0x8, %eax
movq %rax, 0x68(%rdi)
movq %rax, 0x60(%rdi)
leaq -0x28(%rbp), %rdx
movq %rax, 0x8(%rdx)
movq %rcx, (%rdx)
leaq 0x430(%rdi), %r15
movq %r15, %rdi
movl $0x1, %esi
callq 0x14b76
addq $0x3f8, %r14 # imm = 0x3F8
movq %r14, %rdi
movq %rbx, %rsi
callq 0x1759e
movq %r15, %rdi
movl $0x2710, %esi # imm = 0x2710
callq 0x14d11
movq (%rbx), %rdi
movq %r15, %rsi
callq 0x36048
addq $0x18, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/sp/transport/tcp/tcp.c
|
tcptran_ep_cancel
|
static void
tcptran_ep_cancel(nni_aio *aio, void *arg, int rv)
{
tcptran_ep *ep = arg;
nni_mtx_lock(&ep->mtx);
if (ep->useraio == aio) {
ep->useraio = NULL;
nni_aio_finish_error(aio, rv);
}
nni_mtx_unlock(&ep->mtx);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
pushq %rax
movl %edx, %r14d
movq %rsi, %rbx
movq %rdi, %r15
movq %rsi, %rdi
callq 0x1d680
cmpq %r15, 0x48(%rbx)
jne 0x2c615
movq $0x0, 0x48(%rbx)
movq %r15, %rdi
movl %r14d, %esi
callq 0x1515e
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
jmp 0x1d68a
|
/nanomsg[P]nng/src/sp/transport/tcp/tcp.c
|
tcptran_ep_get_recvmaxsz
|
static int
tcptran_ep_get_recvmaxsz(void *arg, void *v, size_t *szp, nni_opt_type t)
{
tcptran_ep *ep = arg;
int rv;
nni_mtx_lock(&ep->mtx);
rv = nni_copyout_size(ep->rcvmax, v, szp, t);
nni_mtx_unlock(&ep->mtx);
return (rv);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl %ecx, %ebx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
callq 0x1d680
movq 0x30(%r12), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl %ebx, %ecx
callq 0x1915d
movl %eax, %ebx
movq %r12, %rdi
callq 0x1d68a
movl %ebx, %eax
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/sp/transport/tcp/tcp.c
|
tcptran_ep_set_recvmaxsz
|
static int
tcptran_ep_set_recvmaxsz(void *arg, const void *v, size_t sz, nni_opt_type t)
{
tcptran_ep *ep = arg;
size_t val;
int rv;
if ((rv = nni_copyin_size(&val, v, sz, 0, NNI_MAXSZ, t)) == 0) {
nni_mtx_lock(&ep->mtx);
ep->rcvmax = val;
nni_mtx_unlock(&ep->mtx);
#ifdef NNG_ENABLE_STATS
nni_stat_set_value(&ep->st_rcv_max, val);
#endif
}
return (rv);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r14
pushq %rbx
subq $0x10, %rsp
movl %ecx, %r9d
movq %rdi, %rbx
leaq -0x18(%rbp), %rdi
movl $0xffffffff, %r8d # imm = 0xFFFFFFFF
xorl %ecx, %ecx
callq 0x1907a
movl %eax, %r14d
testl %eax, %eax
jne 0x2c6bd
movq %rbx, %rdi
callq 0x1d680
movq -0x18(%rbp), %rax
movq %rax, 0x30(%rbx)
movq %rbx, %rdi
callq 0x1d68a
addq $0x430, %rbx # imm = 0x430
movq -0x18(%rbp), %rsi
movq %rbx, %rdi
callq 0x1c62d
movl %r14d, %eax
addq $0x10, %rsp
popq %rbx
popq %r14
popq %rbp
retq
|
/nanomsg[P]nng/src/sp/transport/tcp/tcp.c
|
nng_http_handler_alloc_directory
|
nng_err
nng_http_handler_alloc_directory(
nng_http_handler **hp, const char *uri, const char *path)
{
#ifdef NNG_SUPP_HTTP
return (nni_http_handler_init_directory(hp, uri, path));
#else
NNI_ARG_UNUSED(hp);
NNI_ARG_UNUSED(uri);
NNI_ARG_UNUSED(path);
return (NNG_ENOTSUP);
#endif
}
|
pushq %rbp
movq %rsp, %rbp
popq %rbp
jmp 0x3dbeb
|
/nanomsg[P]nng/src/supplemental/http/http_public.c
|
http_txn_cb
|
static void
http_txn_cb(void *arg)
{
http_txn *txn = arg;
const char *str;
char *end;
nng_err rv;
uint64_t len;
nni_iov iov;
char *dst;
size_t sz;
nni_http_chunk *chunk = NULL;
nni_mtx_lock(&http_txn_lk);
if ((rv = nni_aio_result(&txn->aio)) != NNG_OK) {
http_txn_finish_aios(txn, rv);
nni_mtx_unlock(&http_txn_lk);
http_txn_fini(txn);
return;
}
switch (txn->state) {
case HTTP_SENDING:
txn->state = HTTP_RECVING;
nni_http_read_res(txn->conn, &txn->aio);
nni_mtx_unlock(&http_txn_lk);
return;
case HTTP_RECVING:
// Detect chunked encoding. You poor bastard. (Only if not
// HEAD.)
if ((strcmp(nni_http_get_method(txn->conn), "HEAD") != 0) &&
((str = nni_http_get_header(
txn->conn, "Transfer-Encoding")) != NULL) &&
(strstr(str, "chunked") != NULL)) {
if ((rv = nni_http_chunks_init(&txn->chunks, 0)) !=
NNG_OK) {
goto error;
}
txn->state = HTTP_RECVING_CHUNKS;
nni_http_read_chunks(
txn->conn, txn->chunks, &txn->aio);
nni_mtx_unlock(&http_txn_lk);
return;
}
if ((strcmp(nni_http_get_method(txn->conn), "HEAD") == 0) ||
((str = nni_http_get_header(
txn->conn, "Content-Length")) == NULL) ||
((len = (uint64_t) strtoull(str, &end, 10)) == 0) ||
(end == NULL) || (*end != '\0')) {
// If no content-length, or HEAD (which per RFC
// never transfers data), then we are done.
http_txn_finish_aios(txn, 0);
nni_mtx_unlock(&http_txn_lk);
http_txn_fini(txn);
return;
}
if ((rv = nni_http_res_alloc_data(txn->res, (size_t) len)) !=
NNG_OK) {
goto error;
}
nni_http_get_body(txn->conn, &iov.iov_buf, &iov.iov_len);
nni_aio_set_iov(&txn->aio, 1, &iov);
txn->state = HTTP_RECVING_BODY;
nni_http_read_full(txn->conn, &txn->aio);
nni_mtx_unlock(&http_txn_lk);
return;
case HTTP_RECVING_BODY:
// All done!
http_txn_finish_aios(txn, 0);
nni_mtx_unlock(&http_txn_lk);
http_txn_fini(txn);
return;
case HTTP_RECVING_CHUNKS:
// All done, but now we need to coalesce the chunks, for
// yet *another* copy. Chunked transfers are such crap.
sz = nni_http_chunks_size(txn->chunks);
if ((rv = nni_http_res_alloc_data(txn->res, sz)) != 0) {
goto error;
}
nni_http_get_body(txn->conn, (void **) &dst, &sz);
while ((chunk = nni_http_chunks_iter(txn->chunks, chunk)) !=
NULL) {
memcpy(dst, nni_http_chunk_data(chunk),
nni_http_chunk_size(chunk));
dst += nni_http_chunk_size(chunk);
}
http_txn_finish_aios(txn, 0);
nni_mtx_unlock(&http_txn_lk);
http_txn_fini(txn);
return;
}
error:
http_txn_finish_aios(txn, rv);
nni_http_conn_close(txn->conn);
nni_mtx_unlock(&http_txn_lk);
http_txn_fini(txn);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x20, %rsp
movq %rdi, %rbx
leaq 0x27a68(%rip), %rdi # 0x61520
callq 0x1d680
movq %rbx, %rdi
callq 0x14dab
testl %eax, %eax
je 0x39afa
movq %rbx, %rdi
movl %eax, %esi
callq 0x39de1
leaq 0x27a46(%rip), %rdi # 0x61520
callq 0x1d68a
leaq 0x26e42(%rip), %rdi # 0x60928
movq %rbx, %rsi
addq $0x20, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp 0x19c48
movl 0x200(%rbx), %eax
cmpq $0x3, %rax
ja 0x39c00
leaq 0xd2df(%rip), %rcx # 0x46df0
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movl $0x1, 0x200(%rbx)
movq 0x1e8(%rbx), %rdi
movq %rbx, %rsi
callq 0x3a8e1
leaq 0x279e6(%rip), %rdi # 0x61520
addq $0x20, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
jmp 0x1d68a
movq 0x1e8(%rbx), %rdi
callq 0x3adec
leaq 0xd323(%rip), %rsi # 0x46e81
movq %rax, %rdi
callq 0x95d0
testl %eax, %eax
je 0x39c1b
movq 0x1e8(%rbx), %rdi
leaq 0xd054(%rip), %rsi # 0x46bd0
callq 0x3b811
testq %rax, %rax
je 0x39c1b
leaq 0xd051(%rip), %rsi # 0x46be2
movq %rax, %rdi
callq 0x9470
testq %rax, %rax
je 0x39c1b
leaq 0x1f8(%rbx), %rdi
xorl %esi, %esi
callq 0x39e94
testl %eax, %eax
jne 0x39bfc
movl $0x3, 0x200(%rbx)
movq 0x1e8(%rbx), %rdi
movq 0x1f8(%rbx), %rsi
movq %rbx, %rdx
callq 0x3a922
jmp 0x39b33
movq 0x1f8(%rbx), %rdi
callq 0x39f5d
movq %rax, -0x28(%rbp)
movq 0x1f0(%rbx), %rdi
movq %rax, %rsi
callq 0x3c9ba
testl %eax, %eax
je 0x39c8e
movl %eax, %esi
jmp 0x39c02
xorl %esi, %esi
movq %rbx, %rdi
callq 0x39de1
movq 0x1e8(%rbx), %rdi
callq 0x3a446
jmp 0x39ad3
movq 0x1e8(%rbx), %rdi
callq 0x3adec
leaq 0xd253(%rip), %rsi # 0x46e81
movq %rax, %rdi
callq 0x95d0
testl %eax, %eax
je 0x39c84
movq 0x1e8(%rbx), %rdi
leaq 0xcf79(%rip), %rsi # 0x46bc1
callq 0x3b811
testq %rax, %rax
je 0x39c84
leaq -0x28(%rbp), %r14
movq %rax, %rdi
movq %r14, %rsi
movl $0xa, %edx
callq 0x96b0
testq %rax, %rax
sete %dl
movq (%r14), %rcx
testq %rcx, %rcx
sete %sil
orb %dl, %sil
jne 0x39c84
cmpb $0x0, (%rcx)
je 0x39d32
movq %rbx, %rdi
xorl %esi, %esi
jmp 0x39ace
movq 0x1e8(%rbx), %rdi
leaq -0x38(%rbp), %rsi
leaq -0x28(%rbp), %rdx
callq 0x3b88e
movq 0x1f8(%rbx), %rdi
xorl %esi, %esi
callq 0x39f48
testq %rax, %rax
je 0x39d00
movq %rax, %r14
movq -0x38(%rbp), %r15
movq %r14, %rdi
callq 0x39f97
movq %rax, %r12
movq %r14, %rdi
callq 0x39f8d
movq %r15, %rdi
movq %r12, %rsi
movq %rax, %rdx
callq 0x97c0
movq %r14, %rdi
callq 0x39f8d
addq %rax, -0x38(%rbp)
movq 0x1f8(%rbx), %rdi
movq %r14, %rsi
callq 0x39f48
movq %rax, %r14
testq %rax, %rax
jne 0x39cb8
movq %rbx, %rdi
xorl %esi, %esi
callq 0x39de1
leaq 0x2780f(%rip), %rdi # 0x61520
callq 0x1d68a
leaq 0x26c0b(%rip), %rdi # 0x60928
movq %rbx, %rsi
callq 0x19c48
addq $0x20, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
movq 0x1f0(%rbx), %rdi
movq %rax, %rsi
callq 0x3c9ba
testl %eax, %eax
jne 0x39bfc
movq 0x1e8(%rbx), %rdi
leaq -0x30(%rbp), %rdx
leaq -0x38(%rbp), %r14
movq %r14, %rsi
callq 0x3b88e
movq %rbx, %rdi
movl $0x1, %esi
movq %r14, %rdx
callq 0x14b76
movl $0x2, 0x200(%rbx)
movq 0x1e8(%rbx), %rdi
movq %rbx, %rsi
callq 0x3a976
leaq 0x27790(%rip), %rdi # 0x61520
callq 0x1d68a
jmp 0x39d25
|
/nanomsg[P]nng/src/supplemental/http/http_client.c
|
nni_http_set_header
|
static nng_err
http_set_header(nng_http *conn, const char *key, const char *val)
{
nni_http_entity *data =
conn->client ? &conn->req.data : &conn->res.data;
http_header *h;
NNI_LIST_FOREACH (&data->hdrs, h) {
if (nni_strcasecmp(key, h->name) == 0) {
char *news;
if ((news = nni_strdup(val)) == NULL) {
return (NNG_ENOMEM);
}
if (!h->static_value) {
nni_strfree(h->value);
h->value = NULL;
}
h->value = news;
return (NNG_OK);
}
}
if ((h = NNI_ALLOC_STRUCT(h)) == NULL) {
return (NNG_ENOMEM);
}
h->alloc_header = true;
if ((h->name = nni_strdup(key)) == NULL) {
NNI_FREE_STRUCT(h);
return (NNG_ENOMEM);
}
if ((h->value = nni_strdup(val)) == NULL) {
nni_strfree(h->name);
NNI_FREE_STRUCT(h);
return (NNG_ENOMEM);
}
nni_list_append(&data->hdrs, h);
return (NNG_OK);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %r14
movq %rsi, %r12
movq %rdi, %r13
callq 0x3b5eb
xorl %ebx, %ebx
testb %al, %al
jne 0x3b800
leaq 0x530(%r13), %r15
cmpb $0x0, 0x8e1(%r13)
leaq 0x408(%r13), %rax
cmovneq %rax, %r15
addq $0xf8, %r15
movq %r15, %rdi
callq 0x17571
testq %rax, %rax
je 0x3b772
movq %rax, %r13
movq (%r13), %rsi
movq %r12, %rdi
callq 0x1cf4a
testl %eax, %eax
je 0x3b7b7
movq %r15, %rdi
movq %r13, %rsi
callq 0x176dd
movq %rax, %r13
testq %rax, %rax
jne 0x3b74f
movl $0x28, %edi
callq 0x1e787
testq %rax, %rax
je 0x3b7fb
movq %rax, %r13
orb $0x4, 0x20(%rax)
movq %r12, %rdi
callq 0x1ce20
movq %rax, (%r13)
testq %rax, %rax
je 0x3b7ee
movq %r14, %rdi
callq 0x1ce20
movq %rax, 0x8(%r13)
testq %rax, %rax
je 0x3b7e5
movq %r15, %rdi
movq %r13, %rsi
callq 0x1759e
jmp 0x3b800
movq %r14, %rdi
callq 0x1ce20
testq %rax, %rax
je 0x3b7fb
movq %rax, %r14
testb $0x2, 0x20(%r13)
jne 0x3b7df
movq 0x8(%r13), %rdi
callq 0x1ce59
movq $0x0, 0x8(%r13)
movq %r14, 0x8(%r13)
jmp 0x3b800
movq (%r13), %rdi
callq 0x1ce59
movl $0x28, %esi
movq %r13, %rdi
callq 0x1e7a2
movl $0x2, %ebx
movl %ebx, %eax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/nanomsg[P]nng/src/supplemental/http/http_conn.c
|
Am_Input_Char::operator==(Am_Input_Char) const
|
bool operator==(Am_Input_Char i) const
{
//quick exit for mouse_moved == something else, for efficiency.
// Mouse-moved doesn't match ANY-* so is an easy test
if ((i.code == Am_MOUSE_MOVED && code != Am_MOUSE_MOVED) ||
(code == Am_MOUSE_MOVED && i.code != Am_MOUSE_MOVED))
return false;
else
return helper_check_equal(i);
}
|
pushq %rbp
movq %rsp, %rbp
subq $0x20, %rsp
movl %esi, -0x8(%rbp)
movq %rdi, -0x10(%rbp)
movq -0x10(%rbp), %rax
movq %rax, -0x20(%rbp)
movswl -0x8(%rbp), %eax
cmpl $0x103, %eax # imm = 0x103
jne 0x2db0
movq -0x20(%rbp), %rax
movswl (%rax), %eax
cmpl $0x103, %eax # imm = 0x103
jne 0x2dc9
movq -0x20(%rbp), %rax
movswl (%rax), %eax
cmpl $0x103, %eax # imm = 0x103
jne 0x2dcf
movswl -0x8(%rbp), %eax
cmpl $0x103, %eax # imm = 0x103
je 0x2dcf
movb $0x0, -0x1(%rbp)
jmp 0x2de6
movq -0x20(%rbp), %rdi
movl -0x8(%rbp), %eax
movl %eax, -0x14(%rbp)
movl -0x14(%rbp), %esi
callq 0x2040
andb $0x1, %al
movb %al, -0x1(%rbp)
movb -0x1(%rbp), %al
andb $0x1, %al
addq $0x20, %rsp
popq %rbp
retq
|
/ProgrammerArchaeology[P]openamulet/include/amulet/idefs.h
|
WebPAllocateDecBuffer
|
VP8StatusCode WebPAllocateDecBuffer(int width, int height,
const WebPDecoderOptions* const options,
WebPDecBuffer* const buffer) {
VP8StatusCode status;
if (buffer == NULL || width <= 0 || height <= 0) {
return VP8_STATUS_INVALID_PARAM;
}
if (options != NULL) { // First, apply options if there is any.
if (options->use_cropping) {
const int cw = options->crop_width;
const int ch = options->crop_height;
const int x = options->crop_left & ~1;
const int y = options->crop_top & ~1;
if (!WebPCheckCropDimensions(width, height, x, y, cw, ch)) {
return VP8_STATUS_INVALID_PARAM; // out of frame boundary.
}
width = cw;
height = ch;
}
if (options->use_scaling) {
#if !defined(WEBP_REDUCE_SIZE)
int scaled_width = options->scaled_width;
int scaled_height = options->scaled_height;
if (!WebPRescalerGetScaledDimensions(
width, height, &scaled_width, &scaled_height)) {
return VP8_STATUS_INVALID_PARAM;
}
width = scaled_width;
height = scaled_height;
#else
return VP8_STATUS_INVALID_PARAM; // rescaling not supported
#endif
}
}
buffer->width = width;
buffer->height = height;
// Then, allocate buffer for real.
status = AllocateBuffer(buffer);
if (status != VP8_STATUS_OK) return status;
// Use the stride trick if vertical flip is needed.
if (options != NULL && options->flip) {
status = WebPFlipBuffer(buffer);
}
return status;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movl $0x2, %ebp
testl %esi, %esi
jle 0xa814
testl %edi, %edi
jle 0xa814
movq %rcx, %rbx
testq %rcx, %rcx
je 0xa814
movq %rdx, %r14
movl %esi, %r15d
testq %rdx, %rdx
je 0xa6ac
cmpl $0x0, 0x8(%r14)
je 0xa676
movl 0x14(%r14), %r12d
movl 0x18(%r14), %r13d
movl 0xc(%r14), %edx
movl 0x10(%r14), %ecx
andl $-0x2, %edx
andl $-0x2, %ecx
movl %r15d, %esi
movl %r12d, %r8d
movl %r13d, %r9d
callq 0x130c6
movl %r13d, %r15d
movl %r12d, %edi
testl %eax, %eax
je 0xa814
cmpl $0x0, 0x1c(%r14)
je 0xa6ac
movl 0x20(%r14), %eax
leaq 0xc(%rsp), %rdx
movl %eax, (%rdx)
movl 0x24(%r14), %eax
leaq 0x8(%rsp), %rcx
movl %eax, (%rcx)
movl %r15d, %esi
callq 0x30f7a
testl %eax, %eax
je 0xa814
movl 0xc(%rsp), %edi
movl 0x8(%rsp), %r15d
movl %edi, 0x4(%rbx)
movl %r15d, 0x8(%rbx)
movl (%rbx), %eax
testl %edi, %edi
setle %cl
testl %r15d, %r15d
setle %dl
orb %cl, %dl
cmpl $0xd, %eax
setae %cl
orb %dl, %cl
jne 0xa814
cmpl $0x0, 0xc(%rbx)
jg 0xa6dd
cmpq $0x0, 0x70(%rbx)
je 0xa719
testq %r14, %r14
sete %r15b
movq %rbx, %rdi
callq 0xaa4b
movl %eax, %ebp
testl %eax, %eax
setne %al
orb %r15b, %al
jne 0xa814
cmpl $0x0, 0x30(%r14)
je 0xa712
movq %rbx, %rdi
callq 0xa56c
movl %eax, %ebp
jmp 0xa814
xorl %ebp, %ebp
jmp 0xa814
movl %eax, %ecx
movl %edi, %r12d
leaq 0x49273(%rip), %rax # 0x53998
movzbl (%rcx,%rax), %r13d
movq %r13, %rax
imulq %r12, %rax
cmpq $0x7fffffff, %rax # imm = 0x7FFFFFFF
ja 0xa814
imull %edi, %r13d
movl %r15d, %eax
movq %r13, %rbp
imulq %rax, %rbp
cmpl $0xb, %ecx
movq %rcx, 0x20(%rsp)
jb 0xa784
leal 0x1(%rdi), %edx
shrl %edx
incl %r15d
shrl %r15d
movq %rdx, 0x10(%rsp)
imulq %rdx, %r15
imulq %rax, %r12
xorl %eax, %eax
cmpl $0xc, %ecx
cmovneq %rax, %r12
cmpq $0xc, %rcx
cmovel %edi, %eax
movq %rax, 0x18(%rsp)
jmp 0xa79c
xorl %r12d, %r12d
xorl %r15d, %r15d
movq $0x0, 0x18(%rsp)
movq $0x0, 0x10(%rsp)
leaq (%r12,%rbp), %rax
leaq (%rax,%r15,2), %rdi
movl $0x1, %esi
callq 0x3145c
testq %rax, %rax
je 0xa80f
movq %rax, 0x70(%rbx)
movq %rax, 0x10(%rbx)
movq 0x20(%rsp), %rdx
cmpl $0xb, %edx
jb 0xa825
movl %r13d, 0x30(%rbx)
movq %rbp, 0x40(%rbx)
addq %rbp, %rax
movq %rax, 0x18(%rbx)
movq 0x10(%rsp), %rsi
movl %esi, 0x34(%rbx)
movq %r15, 0x48(%rbx)
leaq (%rax,%r15), %rcx
movq %rcx, 0x20(%rbx)
movl %esi, 0x38(%rbx)
movq %r15, 0x50(%rbx)
cmpl $0xc, %edx
jne 0xa7fe
addq %r15, %r15
addq %r15, %rax
movq %rax, 0x28(%rbx)
movq %r12, 0x58(%rbx)
movq 0x18(%rsp), %rax
movl %eax, 0x3c(%rbx)
jmp 0xa6dd
movl $0x1, %ebp
movl %ebp, %eax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl %r13d, 0x18(%rbx)
movq %rbp, 0x20(%rbx)
jmp 0xa6dd
|
/PKRoma[P]libwebp/src/dec/buffer_dec.c
|
WebPIAppend
|
VP8StatusCode WebPIAppend(WebPIDecoder* idec,
const uint8_t* data, size_t data_size) {
VP8StatusCode status;
if (idec == NULL || data == NULL) {
return VP8_STATUS_INVALID_PARAM;
}
status = IDecCheckStatus(idec);
if (status != VP8_STATUS_SUSPENDED) {
return status;
}
// Check mixed calls between RemapMemBuffer and AppendToMemBuffer.
if (!CheckMemBufferMode(&idec->mem_, MEM_MODE_APPEND)) {
return VP8_STATUS_INVALID_PARAM;
}
// Append data to memory buffer
if (!AppendToMemBuffer(idec, data, data_size)) {
return VP8_STATUS_OUT_OF_MEMORY;
}
return IDecode(idec);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
testq %rdi, %rdi
sete %al
testq %rsi, %rsi
sete %cl
orb %al, %cl
movl $0x2, %ebp
jne 0xb13a
movq %rdx, %r14
movq %rdi, %rbx
movl (%rdi), %edx
xorl %eax, %eax
cmpl $0x6, %edx
setne %al
leal (%rax,%rax,4), %eax
cmpl $0x7, %edx
movl $0x3, %ebp
cmovnel %eax, %ebp
cmpl $0x5, %ebp
jne 0xb13a
movq %rsi, %r15
movl 0x128(%rbx), %eax
cmpl $0x1, %eax
je 0xb0dc
movl $0x2, %ebp
testl %eax, %eax
jne 0xb13a
movl $0x1, 0x128(%rbx)
movq 0x80(%rbx), %rax
movb $0x1, %cl
testl %edx, %edx
je 0xb103
cmpl $0x0, 0x78(%rbx)
jne 0xb103
cmpq $0x0, 0xb98(%rax)
je 0xb103
cmpl $0x0, 0xba8(%rax)
setne %cl
movq 0x148(%rbx), %r12
testq %r12, %r12
je 0xb11b
movq 0x130(%rbx), %rdx
addq %r12, %rdx
jmp 0xb11d
xorl %edx, %edx
movq %rdx, %rsi
testb %cl, %cl
jne 0xb12b
movq 0xb98(%rax), %rsi
movl $0x1, %ebp
movl $0xfffffff6, %eax # imm = 0xFFFFFFF6
cmpq %rax, %r14
jbe 0xb14b
movl %ebp, %eax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq 0x138(%rbx), %r13
leaq (%r14,%r13), %rax
cmpq 0x140(%rbx), %rax
movq %rdx, 0x20(%rsp)
jbe 0xb1ef
subq 0x130(%rbx), %r13
subq %rsi, %rdx
movq %rdx, 0x18(%rsp)
addq %rdx, %r13
leaq (%r14,%r13), %rdi
addq $0xfff, %rdi # imm = 0xFFF
andq $-0x1000, %rdi # imm = 0xF000
movq %rsi, 0x8(%rsp)
movl $0x1, %esi
movq %rdi, 0x10(%rsp)
callq 0x3145c
testq %rax, %rax
je 0xb13a
movq %rax, %r12
movq 0x8(%rsp), %rsi
testq %rsi, %rsi
je 0xb1bd
movq %r12, %rdi
movq %r13, %rdx
callq 0x45d0
movq 0x148(%rbx), %rdi
callq 0x314a8
movq %r12, 0x148(%rbx)
movq 0x10(%rsp), %rax
movq %rax, 0x140(%rbx)
movq 0x18(%rsp), %rax
movq %rax, 0x130(%rbx)
movq %r13, 0x138(%rbx)
addq %r12, %r13
movq %r13, %rdi
movq %r15, %rsi
movq %r14, %rdx
callq 0x45d0
addq %r14, 0x138(%rbx)
movq 0x148(%rbx), %rsi
addq 0x130(%rbx), %rsi
subq 0x20(%rsp), %rsi
movq %rbx, %rdi
callq 0xbc19
movq %rbx, %rdi
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0xb235
|
/PKRoma[P]libwebp/src/dec/idec_dec.c
|
FinishDecoding
|
static VP8StatusCode FinishDecoding(WebPIDecoder* const idec) {
const WebPDecoderOptions* const options = idec->params_.options;
WebPDecBuffer* const output = idec->params_.output;
idec->state_ = STATE_DONE;
if (options != NULL && options->flip) {
const VP8StatusCode status = WebPFlipBuffer(output);
if (status != VP8_STATUS_OK) return status;
}
if (idec->final_output_ != NULL) {
WebPCopyDecBufferPixels(output, idec->final_output_); // do the slow-copy
WebPFreeDecBuffer(&idec->output_);
*output = *idec->final_output_;
idec->final_output_ = NULL;
}
return VP8_STATUS_OK;
}
|
pushq %rbp
pushq %r14
pushq %rbx
movq %rdi, %rbx
movq 0x8(%rdi), %r14
movq 0x30(%rdi), %rax
movl $0x6, (%rdi)
testq %rax, %rax
je 0xbd9d
cmpl $0x0, 0x30(%rax)
je 0xbd9d
movq %r14, %rdi
callq 0xa56c
movl %eax, %ebp
testl %eax, %eax
jne 0xbddc
movq 0x1d8(%rbx), %rsi
xorl %ebp, %ebp
testq %rsi, %rsi
je 0xbddc
movq %r14, %rdi
callq 0xa910
leaq 0x160(%rbx), %rdi
callq 0xa879
movq 0x1d8(%rbx), %rsi
movl $0xf, %ecx
movq %r14, %rdi
rep movsq (%rsi), %es:(%rdi)
movq $0x0, 0x1d8(%rbx)
movl %ebp, %eax
popq %rbx
popq %r14
popq %rbp
retq
nop
|
/PKRoma[P]libwebp/src/dec/idec_dec.c
|
CustomSetup
|
static int CustomSetup(VP8Io* io) {
WebPDecParams* const p = (WebPDecParams*)io->opaque;
const WEBP_CSP_MODE colorspace = p->output->colorspace;
const int is_rgb = WebPIsRGBMode(colorspace);
const int is_alpha = WebPIsAlphaMode(colorspace);
p->memory = NULL;
p->emit = NULL;
p->emit_alpha = NULL;
p->emit_alpha_row = NULL;
if (!WebPIoInitFromOptions(p->options, io, is_alpha ? MODE_YUV : MODE_YUVA)) {
return 0;
}
if (is_alpha && WebPIsPremultipliedMode(colorspace)) {
WebPInitUpsamplers();
}
if (io->use_scaling) {
#if !defined(WEBP_REDUCE_SIZE)
const int ok = is_rgb ? InitRGBRescaler(io, p) : InitYUVRescaler(io, p);
if (!ok) {
return 0; // memory error
}
#else
return 0; // rescaling support not compiled
#endif
} else {
if (is_rgb) {
WebPInitSamplers();
p->emit = EmitSampledRGB; // default
if (io->fancy_upsampling) {
#ifdef FANCY_UPSAMPLING
const int uv_width = (io->mb_w + 1) >> 1;
p->memory = WebPSafeMalloc(1ULL, (size_t)(io->mb_w + 2 * uv_width));
if (p->memory == NULL) {
return 0; // memory error.
}
p->tmp_y = (uint8_t*)p->memory;
p->tmp_u = p->tmp_y + io->mb_w;
p->tmp_v = p->tmp_u + uv_width;
p->emit = EmitFancyRGB;
WebPInitUpsamplers();
#endif
}
} else {
p->emit = EmitYUV;
}
if (is_alpha) { // need transparency output
p->emit_alpha =
(colorspace == MODE_RGBA_4444 || colorspace == MODE_rgbA_4444) ?
EmitAlphaRGBA4444
: is_rgb ? EmitAlphaRGB
: EmitAlphaYUV;
if (is_rgb) {
WebPInitAlphaProcessing();
}
}
}
return 1;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movq %rdi, %rbx
movq 0x38(%rdi), %r13
movq (%r13), %rax
movl (%rax), %r12d
cmpl $0xc, %r12d
ja 0xbf37
movl $0x103a, %eax # imm = 0x103A
btl %r12d, %eax
jae 0xbf37
leaq 0x50(%r13), %rbp
xorps %xmm0, %xmm0
movups %xmm0, 0x60(%r13)
movups %xmm0, 0x50(%r13)
movl $0xb, %edx
xorl %r15d, %r15d
movq 0x28(%r13), %rdi
movq %rbx, %rsi
callq 0x1311c
xorl %edx, %edx
testl %eax, %eax
je 0xc44b
leal -0xb(%r12), %eax
cmpl $-0x4, %eax
setb %al
orb %r15b, %al
jne 0xbec9
callq 0x1744d
cmpl $0x0, 0x88(%rbx)
je 0xbefe
movq (%r13), %rdx
movl (%rdx), %eax
leal -0x1(%rax), %ecx
cmpl $0xa, %r12d
movq %rbp, 0x10(%rsp)
jbe 0xbf18
cmpl $0xc, %ecx
movq %rdx, 0x28(%rsp)
jae 0xbf62
movl $0x81d, %edx # imm = 0x81D
btl %ecx, %edx
jae 0xbf62
xorl %ecx, %ecx
jmp 0xbf6b
cmpl $0xa, %r12d
jbe 0xc3b4
leaq 0x7e0(%rip), %rax # 0xc6ef
movq %rax, 0x58(%r13)
jmp 0xc420
cmpl $0xc, %ecx
jae 0xc18a
movl $0x81d, %edx # imm = 0x81D
btl %ecx, %edx
jae 0xc18a
xorl %r12d, %r12d
jmp 0xc194
leal -0xb(%r12), %eax
xorl %r15d, %r15d
cmpl $-0x4, %eax
setb %r15b
leaq 0x50(%r13), %rbp
xorps %xmm0, %xmm0
movups %xmm0, 0x60(%r13)
movups %xmm0, 0x50(%r13)
movl %r15d, %edx
addl $0xb, %edx
jmp 0xbe9e
addl $-0xb, %eax
cmpl $-0x4, %eax
setb %cl
movl %ecx, 0x20(%rsp)
movslq 0x8c(%rbx), %r14
movl 0x90(%rbx), %eax
movq %rax, 0x30(%rsp)
leaq 0x1(%r14), %rax
movl 0xc(%rbx), %edx
movl %edx, 0x1c(%rsp)
movl 0x10(%rbx), %edx
movl %edx, 0x3c(%rsp)
leaq (%r14,%r14), %rsi
movq %rax, 0x8(%rsp)
andq $-0x2, %rax
movq %rax, 0x40(%rsp)
movq %rsi, 0x50(%rsp)
leaq (%rsi,%rax,2), %r15
leaq (,%r14,8), %rax
testb %cl, %cl
movl $0x0, %ebp
cmoveq %rax, %rbp
leaq 0x157(%rbp,%r15,4), %rax
leaq 0x1bf(%rbp,%r15,4), %rsi
cmovneq %rax, %rsi
movl $0x1, %edi
callq 0x3145c
xorl %edx, %edx
movq %rax, %r12
movq 0x10(%rsp), %rax
movq %r12, (%rax)
testq %r12, %r12
je 0xc44b
leaq (,%r15,4), %rax
addq %rbp, %rax
leaq (%r12,%rax), %rdi
addq $0x1f, %rdi
andq $-0x20, %rdi
movq %rdi, 0x30(%r13)
leaq 0x68(%rdi), %rax
movq %rax, 0x38(%r13)
leaq 0xd0(%rdi), %rax
movq %rax, 0x40(%r13)
leaq 0x138(%rdi), %rax
movl 0x20(%rsp), %ecx
testb %cl, %cl
movl $0x0, %ecx
cmoveq %rax, %rcx
movq %rcx, 0x48(%r13)
movl 0xc(%rbx), %esi
movl 0x10(%rbx), %edx
movq 0x28(%rsp), %r15
movq 0x10(%r15), %rcx
movl 0x30(%r15), %eax
subq $0x8, %rsp
movl %r14d, %r8d
movq 0x38(%rsp), %rbp
movl %ebp, %r9d
pushq %r12
pushq $0x1
pushq %rax
callq 0x30e60
addq $0x20, %rsp
testl %eax, %eax
je 0xc3ad
movq %r14, 0x48(%rsp)
movq 0x8(%rsp), %r8
sarl %r8d
leal 0x1(%rbp), %r9d
sarl %r9d
movl 0x1c(%rsp), %r14d
incl %r14d
sarl %r14d
movl 0x3c(%rsp), %ebp
incl %ebp
sarl %ebp
movq 0x38(%r13), %rdi
movq 0x18(%r15), %rcx
movl 0x34(%r15), %eax
movq 0x50(%rsp), %rdx
leaq (%r12,%rdx,4), %r12
subq $0x8, %rsp
movl %r14d, %esi
movl %ebp, %edx
movq %r8, 0x10(%rsp)
movl %r9d, 0x18(%rsp)
pushq %r12
pushq $0x1
pushq %rax
callq 0x30e60
addq $0x20, %rsp
testl %eax, %eax
je 0xc3ad
movq 0x40(%r13), %rdi
movq 0x20(%r15), %rcx
movl 0x38(%r15), %eax
movq 0x40(%rsp), %rdx
leaq (%r12,%rdx,4), %r10
subq $0x8, %rsp
movl %r14d, %esi
movl %ebp, %edx
movq 0x10(%rsp), %r8
movl 0x18(%rsp), %r9d
pushq %r10
pushq $0x1
pushq %rax
callq 0x30e60
addq $0x20, %rsp
testl %eax, %eax
je 0xc3ad
leaq 0xd0b(%rip), %rax # 0xce2a
movq %rax, 0x58(%r13)
movl $0x1, %edx
cmpb $0x0, 0x20(%rsp)
movq 0x48(%rsp), %r8
movq 0x30(%rsp), %r9
jne 0xc44b
movq 0x40(%rsp), %r10
addq %r10, %r10
movq 0x48(%r13), %rdi
movl 0xc(%rbx), %esi
movl 0x10(%rbx), %edx
movq 0x28(%rsp), %rax
movq 0x28(%rax), %rcx
movl 0x3c(%rax), %eax
leaq (%r12,%r10,4), %r10
subq $0x8, %rsp
pushq %r10
pushq $0x1
pushq %rax
callq 0x30e60
addq $0x20, %rsp
movl $0x0, %edx
testl %eax, %eax
je 0xc44b
leaq 0xd61(%rip), %rax # 0xcee6
jmp 0xc43d
addl $-0xb, %eax
cmpl $-0x4, %eax
setb %r12b
movslq 0x8c(%rbx), %r14
movl 0x90(%rbx), %eax
movl %eax, 0x8(%rsp)
movl 0xc(%rbx), %eax
movl %eax, 0x20(%rsp)
movl 0x10(%rbx), %eax
movl %eax, 0x30(%rsp)
leaq (%r14,%r14), %r15
movzbl %r12b, %eax
movl $0x4, %ecx
subq %rax, %rcx
movq %r15, 0x28(%rsp)
imulq %rcx, %r15
imulq $0x68, %rcx, %rax
imulq %r14, %rcx
leaq (%rcx,%r15,4), %rbp
leaq (%rax,%rbp), %rsi
addq $0x1f, %rsi
movl $0x1, %edi
callq 0x3145c
movq %rax, %r10
movq 0x10(%rsp), %rax
movq %r10, (%rax)
testq %r10, %r10
movl $0x0, %edx
je 0xc44b
leaq (%r10,%r15,4), %rcx
leaq (%r10,%rbp), %rdi
addq $0x1f, %rdi
andq $-0x20, %rdi
movq %rdi, 0x30(%r13)
leaq 0x68(%rdi), %rax
movq %rax, 0x38(%r13)
leaq 0x138(%rdi), %rax
movl %r12d, 0x1c(%rsp)
testb %r12b, %r12b
movl $0x0, %edx
cmoveq %rax, %rdx
leaq 0xd0(%rdi), %rax
movq %rax, 0x40(%r13)
movq %rdx, 0x48(%r13)
movl 0xc(%rbx), %esi
movl 0x10(%rbx), %edx
subq $0x8, %rsp
movq %rcx, %r12
movl %r14d, %r8d
movl 0x10(%rsp), %ebp
movl %ebp, %r9d
pushq %r10
pushq $0x1
pushq $0x0
movq %r10, 0x30(%rsp)
callq 0x30e60
addq $0x20, %rsp
testl %eax, %eax
je 0xc3ad
movq %r14, %r15
movl %ebp, %r9d
movl 0x20(%rsp), %r14d
incl %r14d
sarl %r14d
movl 0x30(%rsp), %ebp
incl %ebp
sarl %ebp
movq 0x38(%r13), %rdi
leaq (%r12,%r15), %rcx
movq 0x28(%rsp), %rax
movq 0x10(%rsp), %rdx
leaq (%rdx,%rax,4), %rax
subq $0x8, %rsp
movl %r14d, %esi
movl %ebp, %edx
movl %r15d, %r8d
pushq %rax
pushq $0x1
pushq $0x0
callq 0x30e60
addq $0x20, %rsp
testl %eax, %eax
je 0xc3ad
movq 0x40(%r13), %rdi
movq %r12, 0x20(%rsp)
leaq (%r12,%r15,2), %rcx
movq %r15, %rax
shlq $0x4, %rax
movl %r14d, %esi
movq 0x10(%rsp), %rdx
movq %rdx, %r14
addq %rdx, %rax
subq $0x8, %rsp
movl %ebp, %edx
movl %r15d, %r8d
movl 0x10(%rsp), %r9d
pushq %rax
pushq $0x1
pushq $0x0
callq 0x30e60
addq $0x20, %rsp
testl %eax, %eax
je 0xc3ad
leaq 0x6c0(%rip), %rax # 0xc9d7
movq %rax, 0x58(%r13)
callq 0x17c28
movl $0x1, %edx
cmpb $0x0, 0x1c(%rsp)
movl 0x8(%rsp), %r9d
jne 0xc44b
movq 0x48(%r13), %rdi
movl 0xc(%rbx), %esi
movl 0x10(%rbx), %edx
leal (%r15,%r15,2), %eax
cltq
movq 0x20(%rsp), %rcx
addq %rax, %rcx
leaq (%r15,%r15,2), %rax
leaq (%r14,%rax,8), %rax
subq $0x8, %rsp
movl %r15d, %r8d
pushq %rax
pushq $0x1
pushq $0x0
callq 0x30e60
addq $0x20, %rsp
movl $0x0, %edx
testl %eax, %eax
je 0xc44b
leaq 0x7fd(%rip), %rax # 0xcb7b
movq %rax, 0x60(%r13)
movq (%r13), %rax
movl (%rax), %eax
cmpl $0xa, %eax
leaq 0x857(%rip), %rcx # 0xcbe9
leaq 0x962(%rip), %rdx # 0xccfb
cmoveq %rcx, %rdx
cmpl $0x5, %eax
cmoveq %rcx, %rdx
movq %rdx, 0x68(%r13)
jmp 0xc441
xorl %edx, %edx
jmp 0xc44b
callq 0x1b7b2
leaq 0xd2(%rip), %rax # 0xc492
movq %rax, 0x58(%r13)
cmpl $0x0, 0x58(%rbx)
je 0xc420
movslq 0xc(%rbx), %rax
leaq 0x1(%rax), %r14
movq %r14, %rsi
andq $-0x2, %rsi
addq %rax, %rsi
movl $0x1, %edi
callq 0x3145c
movq %rax, (%rbp)
testq %rax, %rax
movl $0x0, %edx
je 0xc44b
sarl %r14d
movq %rax, 0x8(%r13)
movslq 0xc(%rbx), %rcx
addq %rax, %rcx
movq %rcx, 0x10(%r13)
movslq %r14d, %rax
addq %rcx, %rax
movq %rax, 0x18(%r13)
leaq 0xd6(%rip), %rax # 0xc4ed
movq %rax, 0x58(%r13)
callq 0x1744d
movl $0x1, %edx
testb %r15b, %r15b
jne 0xc44b
cmpl $0xa, %r12d
je 0xc436
cmpl $0x5, %r12d
jne 0xc45c
leaq 0x36a(%rip), %rax # 0xc7a7
movq %rax, 0x60(%r13)
callq 0x135d3
movl $0x1, %edx
movl %edx, %eax
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x4db(%rip), %rax # 0xc93e
leaq 0x40b(%rip), %rcx # 0xc875
cmpl $0xb, %r12d
cmovaeq %rax, %rcx
movq %rcx, 0x60(%r13)
jb 0xc441
jmp 0xc44b
|
/PKRoma[P]libwebp/src/dec/io_dec.c
|
EmitFancyRGB
|
static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) {
int num_lines_out = io->mb_h; // a priori guess
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* dst = buf->rgba + (size_t)io->mb_y * buf->stride;
WebPUpsampleLinePairFunc upsample = WebPUpsamplers[p->output->colorspace];
const uint8_t* cur_y = io->y;
const uint8_t* cur_u = io->u;
const uint8_t* cur_v = io->v;
const uint8_t* top_u = p->tmp_u;
const uint8_t* top_v = p->tmp_v;
int y = io->mb_y;
const int y_end = io->mb_y + io->mb_h;
const int mb_w = io->mb_w;
const int uv_w = (mb_w + 1) / 2;
if (y == 0) {
// First line is special cased. We mirror the u/v samples at boundary.
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, mb_w);
} else {
// We can finish the left-over line from previous call.
upsample(p->tmp_y, cur_y, top_u, top_v, cur_u, cur_v,
dst - buf->stride, dst, mb_w);
++num_lines_out;
}
// Loop over each output pairs of row.
for (; y + 2 < y_end; y += 2) {
top_u = cur_u;
top_v = cur_v;
cur_u += io->uv_stride;
cur_v += io->uv_stride;
dst += 2 * buf->stride;
cur_y += 2 * io->y_stride;
upsample(cur_y - io->y_stride, cur_y,
top_u, top_v, cur_u, cur_v,
dst - buf->stride, dst, mb_w);
}
// move to last row
cur_y += io->y_stride;
if (io->crop_top + y_end < io->crop_bottom) {
// Save the unfinished samples for next call (as we're not done yet).
memcpy(p->tmp_y, cur_y, mb_w * sizeof(*p->tmp_y));
memcpy(p->tmp_u, cur_u, uv_w * sizeof(*p->tmp_u));
memcpy(p->tmp_v, cur_v, uv_w * sizeof(*p->tmp_v));
// The fancy upsampler leaves a row unfinished behind
// (except for the very last row)
num_lines_out--;
} else {
// Process the very last row of even-sized picture
if (!(y_end & 1)) {
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v,
dst + buf->stride, NULL, mb_w);
}
}
return num_lines_out;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movl 0x10(%rdi), %r13d
movq (%rsi), %rcx
movslq 0x8(%rdi), %r14
movslq 0x18(%rcx), %rax
movq %rax, %r15
imulq %r14, %r15
addq 0x10(%rcx), %r15
movslq 0xc(%rdi), %r11
movq %rcx, 0x28(%rsp)
movl (%rcx), %ecx
leaq 0x5d3a9(%rip), %rdx # 0x698d0
movq (%rdx,%rcx,8), %r10
movq 0x18(%rdi), %r12
movq 0x20(%rdi), %rbx
movq %rdi, 0x8(%rsp)
movq 0x28(%rdi), %r9
testq %r14, %r14
movq %r11, 0x10(%rsp)
movq %r10, 0x20(%rsp)
movq %rsi, 0x30(%rsp)
movq %r9, 0x18(%rsp)
je 0xc586
movq 0x18(%rsi), %rcx
movq 0x8(%rsi), %rdi
movq 0x10(%rsi), %rdx
movq %r10, %rbp
movq %r15, %r10
subq %rax, %r10
subq $0x8, %rsp
movq %r12, %rsi
movq %rbx, %r8
pushq %r11
pushq %r15
pushq %r10
callq *%rbp
addq $0x20, %rsp
leal 0x1(%r13), %eax
jmp 0xc5a8
subq $0x8, %rsp
movq %r12, %rdi
xorl %esi, %esi
movq %rbx, %rdx
movq %r9, %rcx
movq %rbx, %r8
pushq %r11
pushq $0x0
pushq %r15
callq *%r10
addq $0x20, %rsp
movl %r13d, %eax
movl %eax, 0x4(%rsp)
leal (%r14,%r13), %r8d
cmpl $0x3, %r13d
movq 0x28(%rsp), %rdi
jl 0xc639
addl $0x2, %r14d
movq 0x10(%rsp), %r11
movq 0x8(%rsp), %rdx
movq 0x18(%rsp), %rcx
movslq 0x34(%rdx), %r13
leaq (%rbx,%r13), %rbp
addq %rcx, %r13
movslq 0x18(%rdi), %rax
leaq (%r15,%rax,2), %r15
movslq 0x30(%rdx), %rdx
leaq (%r12,%rdx,2), %r12
movq %r12, %rdi
subq %rdx, %rdi
movq %r15, %r10
subq %rax, %r10
subq $0x8, %rsp
movq %r12, %rsi
movq %rbx, %rdx
movl %r8d, %ebx
movq %rbp, %r8
movq %r13, %r9
pushq %r11
pushq %r15
pushq %r10
callq *0x40(%rsp)
movl %ebx, %r8d
movq 0x28(%rsp), %rdx
movq 0x48(%rsp), %rdi
movq 0x30(%rsp), %r11
addq $0x20, %rsp
addl $0x2, %r14d
movq %r13, %rcx
movq %rbp, %rbx
cmpl %r8d, %r14d
jl 0xc5ce
jmp 0xc64b
movq %rbx, %rbp
movq 0x18(%rsp), %r13
movq 0x10(%rsp), %r11
movq 0x8(%rsp), %rdx
movslq 0x30(%rdx), %rsi
addq %r12, %rsi
movl 0x80(%rdx), %eax
addl %r8d, %eax
cmpl 0x84(%rdx), %eax
jge 0xc6ac
leal 0x1(%r11), %eax
shrl $0x1f, %eax
leal (%r11,%rax), %ebx
incl %ebx
sarl %ebx
movq 0x30(%rsp), %r14
movq 0x8(%r14), %rdi
movq %r11, %rdx
callq 0x45d0
movq 0x10(%r14), %rdi
movslq %ebx, %rbx
movq %rbp, %rsi
movq %rbx, %rdx
callq 0x45d0
movq 0x18(%r14), %rdi
movq %r13, %rsi
movq %rbx, %rdx
callq 0x45d0
movl 0x4(%rsp), %eax
decl %eax
jmp 0xc6e0
testb $0x1, %r8b
jne 0xc6dc
movslq 0x18(%rdi), %rax
addq %rax, %r15
subq $0x8, %rsp
movq %rsi, %rdi
xorl %esi, %esi
movq %rbp, %rdx
movq %r13, %rcx
movq %rbp, %r8
movq %r13, %r9
pushq %r11
pushq $0x0
pushq %r15
callq *0x40(%rsp)
addq $0x20, %rsp
movl 0x4(%rsp), %eax
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/io_dec.c
|
EmitAlphaRGBA4444
|
static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) {
const uint8_t* alpha = io->a;
if (alpha != NULL) {
const int mb_w = io->mb_w;
const WEBP_CSP_MODE colorspace = p->output->colorspace;
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
int num_rows;
const size_t start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
#if (WEBP_SWAP_16BIT_CSP == 1)
uint8_t* alpha_dst = base_rgba;
#else
uint8_t* alpha_dst = base_rgba + 1;
#endif
uint32_t alpha_mask = 0x0f;
int i, j;
for (j = 0; j < num_rows; ++j) {
for (i = 0; i < mb_w; ++i) {
// Fill in the alpha value (converted to 4 bits).
const uint32_t alpha_value = alpha[i] >> 4;
alpha_dst[2 * i] = (alpha_dst[2 * i] & 0xf0) | alpha_value;
alpha_mask &= alpha_value;
}
alpha += io->width;
alpha_dst += buf->stride;
}
(void)expected_num_lines_out;
assert(expected_num_lines_out == num_rows);
if (alpha_mask != 0x0f && WebPIsPremultipliedMode(colorspace)) {
WebPApplyAlphaMultiply4444(base_rgba, mb_w, num_rows, buf->stride);
}
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x10, %rsp
movq 0x98(%rdi), %rax
movq %rax, (%rsp)
testq %rax, %rax
je 0xc866
movq %rdi, %r14
movl 0xc(%rdi), %ebx
movq (%rsi), %r12
movl (%r12), %ebp
movq %rsp, %rsi
leaq 0xc(%rsp), %r15
movq %r15, %rdx
callq 0xd00a
movl (%r15), %edx
testl %edx, %edx
jle 0xc866
movslq %eax, %rdi
movslq 0x18(%r12), %rcx
imulq %rcx, %rdi
addq 0x10(%r12), %rdi
movq (%rsp), %rax
leaq 0x1(%rdi), %r8
movl $0xf, %esi
xorl %r9d, %r9d
testl %ebx, %ebx
jle 0xc83a
xorl %ecx, %ecx
movzbl (%rax,%rcx), %r10d
shrl $0x4, %r10d
movb (%r8,%rcx,2), %r11b
andb $-0x10, %r11b
orb %r10b, %r11b
movb %r11b, (%r8,%rcx,2)
andl %r10d, %esi
incq %rcx
cmpq %rcx, %rbx
jne 0xc812
movl 0x18(%r12), %ecx
movslq (%r14), %r10
addq %r10, %rax
movslq %ecx, %r10
addq %r10, %r8
incl %r9d
cmpl %edx, %r9d
jne 0xc80c
cmpl $0xf, %esi
je 0xc866
addl $-0xb, %ebp
cmpl $-0x4, %ebp
jb 0xc866
leaq 0x5cbe6(%rip), %rax # 0x69448
movl %ebx, %esi
callq *(%rax)
xorl %eax, %eax
addq $0x10, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/io_dec.c
|
EmitAlphaYUV
|
static int EmitAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) {
const uint8_t* alpha = io->a;
const WebPYUVABuffer* const buf = &p->output->u.YUVA;
const int mb_w = io->mb_w;
const int mb_h = io->mb_h;
uint8_t* dst = buf->a + (size_t)io->mb_y * buf->a_stride;
int j;
(void)expected_num_lines_out;
assert(expected_num_lines_out == mb_h);
if (alpha != NULL) {
for (j = 0; j < mb_h; ++j) {
memcpy(dst, alpha, mb_w * sizeof(*dst));
alpha += io->width;
dst += buf->a_stride;
}
} else if (buf->a != NULL) {
// the user requested alpha, but there is none, set it to opaque.
FillAlphaPlane(dst, mb_w, mb_h, buf->a_stride);
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq 0x98(%rdi), %r12
movq (%rsi), %r13
movslq 0xc(%rdi), %rbx
movl 0x10(%rdi), %ebp
movq 0x28(%r13), %rax
movslq 0x8(%rdi), %r14
movslq 0x3c(%r13), %rdx
imulq %rdx, %r14
addq %rax, %r14
testq %r12, %r12
je 0xc99a
testl %ebp, %ebp
jle 0xc9c6
movq %rdi, %r15
movq %r14, %rdi
movq %r12, %rsi
movq %rbx, %rdx
callq 0x45d0
movslq (%r15), %rax
addq %rax, %r12
movslq 0x3c(%r13), %rax
addq %rax, %r14
decl %ebp
jne 0xc979
jmp 0xc9c6
testq %rax, %rax
setne %al
testl %ebp, %ebp
setg %cl
andb %al, %cl
cmpb $0x1, %cl
jne 0xc9c6
movq %rdx, %r15
movq %r14, %rdi
movl $0xff, %esi
movq %rbx, %rdx
callq 0x40c0
addq %r15, %r14
decl %ebp
jne 0xc9af
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/io_dec.c
|
EmitRescaledAlphaRGB
|
static int EmitRescaledAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
int expected_num_out_lines) {
if (io->a != NULL) {
WebPRescaler* const scaler = p->scaler_a;
int lines_left = expected_num_out_lines;
const int y_end = p->last_y + lines_left;
while (lines_left > 0) {
const int64_t row_offset = (int64_t)scaler->src_y - io->mb_y;
WebPRescalerImport(scaler, io->mb_h + io->mb_y - scaler->src_y,
io->a + row_offset * io->width, io->width);
lines_left -= p->emit_alpha_row(p, y_end - lines_left, lines_left);
}
}
return 0;
}
|
cmpq $0x0, 0x98(%rdi)
je 0xcbe6
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
movl %edx, %ebx
testl %edx, %edx
jle 0xcbde
movq %rsi, %r14
movq %rdi, %r15
movq 0x48(%rsi), %r12
movl 0x20(%rsi), %ebp
addl %ebx, %ebp
movslq 0x3c(%r12), %rax
movslq 0x8(%r15), %rsi
movq %rax, %rdx
subq %rsi, %rdx
subl %eax, %esi
addl 0x10(%r15), %esi
movslq (%r15), %rcx
imulq %rcx, %rdx
addq 0x98(%r15), %rdx
movq %r12, %rdi
callq 0x31015
movl %ebp, %esi
subl %ebx, %esi
movq %r14, %rdi
movl %ebx, %edx
callq *0x68(%r14)
subl %eax, %ebx
jg 0xcba2
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
xorl %eax, %eax
retq
|
/PKRoma[P]libwebp/src/dec/io_dec.c
|
EmitRescaledAlphaYUV
|
static int EmitRescaledAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) {
const WebPYUVABuffer* const buf = &p->output->u.YUVA;
uint8_t* const dst_a = buf->a + (size_t)p->last_y * buf->a_stride;
if (io->a != NULL) {
uint8_t* const dst_y = buf->y + (size_t)p->last_y * buf->y_stride;
const int num_lines_out = Rescale(io->a, io->width, io->mb_h, p->scaler_a);
assert(expected_num_lines_out == num_lines_out);
if (num_lines_out > 0) { // unmultiply the Y
WebPMultRows(dst_y, buf->y_stride, dst_a, buf->a_stride,
p->scaler_a->dst_width, num_lines_out, 1);
}
} else if (buf->a != NULL) {
// the user requested alpha, but there is none, set it to opaque.
assert(p->last_y + expected_num_lines_out <= io->scaled_height);
FillAlphaPlane(dst_a, io->scaled_width, expected_num_lines_out,
buf->a_stride);
}
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq (%rsi), %r12
movq 0x28(%r12), %rcx
movslq 0x20(%rsi), %r13
movslq 0x3c(%r12), %r15
movq %r15, %rbx
imulq %r13, %rbx
addq %rcx, %rbx
movq 0x98(%rdi), %rax
testq %rax, %rax
je 0xcf6a
movq %rsi, %r14
movq 0x10(%r12), %r15
movslq 0x30(%r12), %rbp
movl (%rdi), %esi
movl 0x10(%rdi), %edx
movq 0x48(%r14), %rcx
movq %rax, %rdi
callq 0xcfaa
testl %eax, %eax
jle 0xcf99
imulq %r13, %rbp
addq %rbp, %r15
movl 0x30(%r12), %esi
movl 0x3c(%r12), %ecx
movq 0x48(%r14), %rdx
movl 0x34(%rdx), %r8d
movl $0x1, (%rsp)
movq %r15, %rdi
movq %rbx, %rdx
movl %eax, %r9d
callq 0x13583
jmp 0xcf99
movl %edx, %ebp
testq %rcx, %rcx
sete %al
testl %edx, %edx
setle %cl
orb %al, %cl
jne 0xcf99
movslq 0x8c(%rdi), %r14
movq %rbx, %rdi
movl $0xff, %esi
movq %r14, %rdx
callq 0x40c0
addq %r15, %rbx
decl %ebp
jne 0xcf82
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/io_dec.c
|
ParsePartitions
|
static VP8StatusCode ParsePartitions(VP8Decoder* const dec,
const uint8_t* buf, size_t size) {
VP8BitReader* const br = &dec->br_;
const uint8_t* sz = buf;
const uint8_t* buf_end = buf + size;
const uint8_t* part_start;
size_t size_left = size;
size_t last_part;
size_t p;
dec->num_parts_minus_one_ = (1 << VP8GetValue(br, 2, "global-header")) - 1;
last_part = dec->num_parts_minus_one_;
if (size < 3 * last_part) {
// we can't even read the sizes with sz[]! That's a failure.
return VP8_STATUS_NOT_ENOUGH_DATA;
}
part_start = buf + last_part * 3;
size_left -= last_part * 3;
for (p = 0; p < last_part; ++p) {
size_t psize = sz[0] | (sz[1] << 8) | (sz[2] << 16);
if (psize > size_left) psize = size_left;
VP8InitBitReader(dec->parts_ + p, part_start, psize);
part_start += psize;
size_left -= psize;
sz += 3;
}
VP8InitBitReader(dec->parts_ + last_part, part_start, size_left);
return (part_start < buf_end) ? VP8_STATUS_OK :
VP8_STATUS_SUSPENDED; // Init is ok, but there's not enough data
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r14
addq $0x10, %rdi
movl $0x2, %esi
callq 0x30593
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
movl %eax, %ecx
shll %cl, %edx
notl %edx
movl %edx, 0x1b0(%r14)
leaq (%rdx,%rdx,2), %rsi
movl $0x7, %ecx
movq %rbx, %r12
subq %rsi, %r12
jb 0xe41e
addq %r15, %rbx
movq %rbx, 0x18(%rsp)
movq %rsi, 0x8(%rsp)
leaq (%r15,%rsi), %r13
testl %eax, %eax
movq %r14, 0x10(%rsp)
je 0xe3ed
leaq 0x1b8(%r14), %rbp
cmpq $0x1, %rdx
adcq $0x0, %rdx
leaq (%rdx,%rdx,2), %rax
movq %rax, 0x20(%rsp)
xorl %r14d, %r14d
movzwl (%r15,%r14), %eax
movzbl 0x2(%r15,%r14), %ebx
shll $0x10, %ebx
orq %rax, %rbx
cmpq %r12, %rbx
cmovaeq %r12, %rbx
movq %rbp, %rdi
movq %r13, %rsi
movq %rbx, %rdx
callq 0x304d6
addq %rbx, %r13
subq %rbx, %r12
addq $0x3, %r14
addq $0x30, %rbp
cmpq %r14, 0x20(%rsp)
jne 0xe3b2
movq 0x8(%rsp), %rcx
shlq $0x4, %rcx
movq 0x10(%rsp), %rax
leaq (%rax,%rcx), %rdi
addq $0x1b8, %rdi # imm = 0x1B8
movq %r13, %rsi
movq %r12, %rdx
callq 0x304d6
xorl %eax, %eax
cmpq 0x18(%rsp), %r13
setae %al
leal (%rax,%rax,4), %ecx
movl %ecx, %eax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/vp8_dec.c
|
DecodeImageStream
|
static int DecodeImageStream(int xsize, int ysize,
int is_level0,
VP8LDecoder* const dec,
uint32_t** const decoded_data) {
int ok = 1;
int transform_xsize = xsize;
int transform_ysize = ysize;
VP8LBitReader* const br = &dec->br_;
VP8LMetadata* const hdr = &dec->hdr_;
uint32_t* data = NULL;
int color_cache_bits = 0;
// Read the transforms (may recurse).
if (is_level0) {
while (ok && VP8LReadBits(br, 1)) {
ok = ReadTransform(&transform_xsize, &transform_ysize, dec);
}
}
// Color cache
if (ok && VP8LReadBits(br, 1)) {
color_cache_bits = VP8LReadBits(br, 4);
ok = (color_cache_bits >= 1 && color_cache_bits <= MAX_CACHE_BITS);
if (!ok) {
dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
goto End;
}
}
// Read the Huffman codes (may recurse).
ok = ok && ReadHuffmanCodes(dec, transform_xsize, transform_ysize,
color_cache_bits, is_level0);
if (!ok) {
dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
goto End;
}
// Finish setting up the color-cache
if (color_cache_bits > 0) {
hdr->color_cache_size_ = 1 << color_cache_bits;
if (!VP8LColorCacheInit(&hdr->color_cache_, color_cache_bits)) {
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
ok = 0;
goto End;
}
} else {
hdr->color_cache_size_ = 0;
}
UpdateDecoder(dec, transform_xsize, transform_ysize);
if (is_level0) { // level 0 complete
dec->state_ = READ_HDR;
goto End;
}
{
const uint64_t total_size = (uint64_t)transform_xsize * transform_ysize;
data = (uint32_t*)WebPSafeMalloc(total_size, sizeof(*data));
if (data == NULL) {
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
ok = 0;
goto End;
}
}
// Use the Huffman trees to decode the LZ77 encoded data.
ok = DecodeImageData(dec, data, transform_xsize, transform_ysize,
transform_ysize, NULL);
ok = ok && !br->eos_;
End:
if (!ok) {
WebPSafeFree(data);
ClearMetadata(hdr);
} else {
if (decoded_data != NULL) {
*decoded_data = data;
} else {
// We allocate image data in this function only for transforms. At level 0
// (that is: not the transforms), we shouldn't have allocated anything.
assert(data == NULL);
assert(is_level0);
}
dec->last_pixel_ = 0; // Reset for future DECODE_DATA_FUNC() calls.
if (!is_level0) ClearMetadata(hdr); // Clean up temporary data behind.
}
return ok;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %rcx, %r14
movq %rsi, 0x38(%rsp)
movq %rdi, 0x28(%rsp)
leaq 0x28(%rcx), %rbx
leaq 0x98(%rcx), %rax
movq %rax, 0x70(%rsp)
movl %edx, 0x1c(%rsp)
testl %edx, %edx
movq %r8, 0x90(%rsp)
je 0xff22
leaq 0xf8(%r14), %r13
movq %rbx, %rdi
movl $0x1, %esi
callq 0x307bd
testl %eax, %eax
je 0xff22
movslq 0xf0(%r14), %r15
movq %rbx, %rdi
movl $0x2, %esi
callq 0x307bd
movl $0x1, %edx
movl %eax, %ecx
shll %cl, %edx
movl 0x158(%r14), %ecx
movl $0x3, %ebp
btl %eax, %ecx
jb 0x1059c
leaq (%r15,%r15,2), %rsi
leaq (,%rsi,8), %r12
addq %r13, %r12
orl %edx, %ecx
movl %ecx, 0x158(%r14)
movl %eax, (%r12)
movq 0x28(%rsp), %rcx
movl %ecx, 0x8(%r12)
movq 0x38(%rsp), %rcx
movl %ecx, 0xc(%r12)
leaq 0x10(%r12), %r15
movq $0x0, 0x10(%r12)
incl 0xf0(%r14)
cmpl $0x2, %eax
jae 0xfe09
movq %rbx, %rdi
movl $0x3, %esi
callq 0x307bd
leal 0x2(%rax), %ecx
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
shll %cl, %esi
movl %ecx, 0x4(%r12)
notl %esi
movl 0x8(%r12), %edi
addl %esi, %edi
shrl %cl, %edi
addl 0xc(%r12), %esi
shrl %cl, %esi
xorl %edx, %edx
movq %r14, %rcx
movq %r15, %r8
callq 0xfce7
testl %eax, %eax
jne 0xfd30
jmp 0x1059c
cmpl $0x3, %eax
jne 0xfd30
movq %rbx, %rdi
movl $0x8, %esi
callq 0x307bd
incl %eax
xorl %ecx, %ecx
cmpl $0x10, %eax
jg 0xfe3d
movl $0x1, %ecx
cmpl $0x4, %eax
jg 0xfe3d
xorl %ecx, %ecx
cmpl $0x3, %eax
setge %cl
xorl $0x3, %ecx
movl $0x1, %esi
shll %cl, %esi
movl 0x8(%r12), %edx
addl %esi, %edx
decl %edx
shrl %cl, %edx
movq %rdx, 0x28(%rsp)
movl %ecx, 0x4(%r12)
movl %eax, 0x8(%rsp)
movl %eax, %edi
movl $0x1, %esi
xorl %edx, %edx
movq %r14, %rcx
movq %r15, %r8
callq 0xfce7
testl %eax, %eax
je 0x1059c
movb 0x4(%r12), %cl
movl $0x8, %eax
shrq %cl, %rax
movl $0x1, %edi
movq %rax, 0x48(%rsp)
movl %eax, %ecx
shlq %cl, %rdi
movl $0x4, %esi
callq 0x3145c
testq %rax, %rax
je 0x1059c
movq %rax, %r12
movq (%r15), %rcx
movl (%rcx), %eax
movl %eax, (%r12)
movl $0x4, %eax
movl 0x8(%rsp), %edx
cmpl $0x2, %edx
jl 0xfeec
shll $0x2, %edx
cmpl $0x6, %edx
movl %edx, %eax
movl $0x5, %edx
cmovll %edx, %eax
movl $0x4, %edx
movb -0x4(%r12,%rdx), %sil
addb (%rcx,%rdx), %sil
movb %sil, (%r12,%rdx)
incq %rdx
cmpq %rdx, %rax
jne 0xfed7
movl $0x4, %edx
movq 0x48(%rsp), %rcx
shll %cl, %edx
cmpl %edx, %eax
jae 0xff12
movl %eax, %ecx
movq %r12, %rdi
addq %rcx, %rdi
notl %eax
addl %eax, %edx
incq %rdx
xorl %esi, %esi
callq 0x40c0
movq (%r15), %rdi
callq 0x314a8
movq %r12, (%r15)
jmp 0xfd30
movl $0x1, %r15d
movq %rbx, %rdi
movl $0x1, %esi
callq 0x307bd
testl %eax, %eax
je 0xff5a
movq %rbx, %rdi
movl $0x4, %esi
callq 0x307bd
movl %eax, %r13d
decl %eax
movl $0x3, %ebp
cmpl $0xa, %eax
jbe 0xff5d
jmp 0x1059c
xorl %r13d, %r13d
movq $0x0, 0x40(%rsp)
movl %r13d, %eax
leaq 0x44880(%rip), %rcx # 0x547f0
movzwl (%rcx,%rax,2), %eax
movq %rax, 0x8(%rsp)
cmpl $0x0, 0x1c(%rsp)
je 0x1005b
movl $0x1, %r15d
movq %rbx, %rdi
movl $0x1, %esi
callq 0x307bd
testl %eax, %eax
je 0x1005b
movq %rbx, %rdi
movl $0x3, %esi
callq 0x307bd
movl %eax, %ebp
addl $0x2, %ebp
movl $0x1, %eax
movl %ebp, %ecx
shll %cl, %eax
movl $0x1, %r15d
movq 0x28(%rsp), %rcx
leal (%rcx,%rax), %ebx
decl %ebx
movl %ebp, %ecx
shrl %cl, %ebx
movq 0x38(%rsp), %rcx
leal (%rcx,%rax), %r12d
decl %r12d
movl %ebp, %ecx
shrl %cl, %r12d
leaq 0x40(%rsp), %r8
movl %ebx, %edi
movl %r12d, %esi
xorl %edx, %edx
movq %r14, %rcx
callq 0xfce7
testl %eax, %eax
je 0x1049a
imull %r12d, %ebx
movl %ebp, 0xc4(%r14)
testl %ebx, %ebx
jle 0x10033
movq 0x40(%rsp), %rax
movl %ebx, %ecx
movl $0x1, %r15d
xorl %edx, %edx
movzwl 0x1(%rax,%rdx,4), %esi
movl %esi, (%rax,%rdx,4)
leal 0x1(%rsi), %edi
cmpl %r15d, %esi
cmovgel %edi, %r15d
incq %rdx
cmpq %rdx, %rcx
jne 0x10019
cmpl $0x3e8, %r15d # imm = 0x3E8
jg 0x104b3
movq 0x28(%rsp), %rax
imull 0x38(%rsp), %eax
cmpl %eax, %r15d
jg 0x104b3
xorl %r12d, %r12d
movl %r15d, %r8d
jmp 0x10064
xorl %r12d, %r12d
movl $0x1, %r8d
movb $0x1, %bl
xorl %edx, %edx
cmpl $0x0, 0x4c(%r14)
je 0x10083
movl %r13d, %r15d
xorl %r13d, %r13d
movq $0x0, 0x10(%rsp)
jmp 0x10567
movq %r8, 0x60(%rsp)
movl $0x1, %edi
movl %r13d, %ecx
shll %cl, %edi
leaq 0x4476f(%rip), %r8 # 0x54808
xorl %eax, %eax
xorl %ecx, %ecx
movl %ecx, %r9d
testq %rax, %rax
movl $0x0, %ecx
cmovel %edi, %ecx
testl %r13d, %r13d
movzwl (%rax,%r8), %esi
cmovlel %edx, %ecx
addl %esi, %ecx
cmpl %ecx, %r9d
cmovgl %r9d, %ecx
addq $0x2, %rax
cmpq $0xa, %rax
jne 0x1009d
movl %edi, 0x48(%rsp)
movl %ecx, %edi
movl $0x4, %esi
callq 0x31484
movq %rax, %rbp
movslq 0x60(%rsp), %rbx
movq 0x8(%rsp), %rdi
imulq %rbx, %rdi
movl $0x4, %esi
callq 0x3145c
movq %rax, 0x20(%rsp)
movl %ebx, %edi
callq 0x3087c
movq %rbp, %rdx
movq %rax, 0x10(%rsp)
testq %rax, %rax
je 0x10484
testq %rdx, %rdx
je 0x10484
cmpq $0x0, 0x20(%rsp)
je 0x10484
movl %r15d, %eax
movq %rax, 0x88(%rsp)
movq 0x10(%rsp), %rax
addq $0x3c, %rax
movq %rax, 0x78(%rsp)
xorl %ebx, %ebx
movq 0x20(%rsp), %r8
movq %rdx, 0x30(%rsp)
movl %r13d, 0x4(%rsp)
movq %r12, 0x50(%rsp)
testq %r12, %r12
movq %rbx, 0x58(%rsp)
je 0x101c9
movl (%r12,%rbx,4), %eax
cmpl $-0x1, %eax
jne 0x101cb
movq %r8, 0x8(%rsp)
xorl %ebx, %ebx
movl 0x4(%rsp), %r13d
testq %rbx, %rbx
movl $0x0, %edi
cmovel 0x48(%rsp), %edi
testl %r13d, %r13d
leaq 0x4467d(%rip), %rax # 0x54808
movzwl (%rbx,%rax), %eax
movl $0x0, %ecx
cmovlel %ecx, %edi
addl %eax, %edi
movq %r14, %rsi
xorl %ecx, %ecx
callq 0x11ebb
testl %eax, %eax
je 0x10558
addq $0x2, %rbx
cmpq $0xa, %rbx
movq 0x30(%rsp), %rdx
jne 0x10174
movq 0x58(%rsp), %rbx
movq 0x8(%rsp), %r8
jmp 0x10439
movl %ebx, %eax
cltq
imulq $0x238, %rax, %rcx # imm = 0x238
movq 0x10(%rsp), %rax
movq %rcx, 0x80(%rsp)
addq %rcx, %rax
movq %rax, 0x68(%rsp)
movl $0x1, %ebp
xorl %ebx, %ebx
xorl %r15d, %r15d
xorl %r9d, %r9d
movl %r9d, 0x8(%rsp)
testq %rbx, %rbx
movl $0x0, %r12d
cmovel 0x48(%rsp), %r12d
cmpl $0x0, 0x4(%rsp)
leaq 0x445f2(%rip), %rax # 0x54808
movzwl (%rax,%rbx,2), %eax
movq 0x68(%rsp), %rcx
movq %r8, (%rcx,%rbx,8)
movl $0x0, %ecx
cmovlel %ecx, %r12d
addl %eax, %r12d
movl %r12d, %edi
movq %r14, %r13
movq %r14, %rsi
movq %r8, %r14
movq %r8, %rcx
callq 0x11ebb
testl %eax, %eax
je 0x10520
testl %ebp, %ebp
movq 0x30(%rsp), %rdx
movq %r14, %r8
je 0x10269
movb (%r14), %cl
testb $0x3, %bl
je 0x10270
xorl %ebp, %ebp
testb %cl, %cl
sete %bpl
jmp 0x10275
movb (%r14), %cl
xorl %ebp, %ebp
jmp 0x10275
movl $0x1, %ebp
movq %r13, %r14
movl 0x8(%rsp), %r9d
movzbl %cl, %ecx
addl %ecx, %r9d
cltq
leaq (%r8,%rax,4), %r8
cmpq $0x4, %rbx
je 0x102c1
movl (%rdx), %eax
cmpl $0x2, %r12d
jb 0x102b1
movl %r12d, %ecx
movl $0x1, %r10d
movl (%rdx,%r10,4), %esi
cmpl %eax, %esi
cmovgl %esi, %eax
incq %r10
cmpq %r10, %rcx
jne 0x102a0
addl %eax, %r15d
incq %rbx
cmpq $0x5, %rbx
jne 0x101f6
movq 0x68(%rsp), %rsi
movl %ebp, 0x28(%rsi)
movl $0x0, 0x30(%rsi)
testl %ebp, %ebp
movl $0xff, %ebp
je 0x10339
movq 0x8(%rsi), %rax
movq 0x10(%rsi), %rcx
movzwl 0x2(%rax), %eax
movzwl 0x2(%rcx), %ecx
movq 0x18(%rsi), %r10
movzwl 0x2(%r10), %r10d
shll $0x18, %r10d
shll $0x10, %eax
orl %ecx, %eax
orl %r10d, %eax
movl %eax, 0x2c(%rsi)
testl %r9d, %r9d
jne 0x10339
movq (%rsi), %rcx
movzwl 0x2(%rcx), %ecx
cmpl $0xff, %ecx
ja 0x10339
movq $0x1, 0x30(%rsi)
shll $0x8, %ecx
orl %ecx, %eax
movl %eax, 0x2c(%rsi)
movl 0x4(%rsp), %r13d
movq 0x50(%rsp), %r12
movq 0x58(%rsp), %rbx
jmp 0x10439
xorl %eax, %eax
cmpl $0x6, %r15d
setl %al
movl %eax, 0x34(%rsi)
movq 0x50(%rsp), %r12
movq 0x58(%rsp), %rbx
jge 0x10434
movq %rsi, %r15
movq %r8, 0x8(%rsp)
movq (%rsi), %rdx
movq 0x80(%rsp), %r11
addq 0x78(%rsp), %r11
xorl %esi, %esi
movl 0x4(%rsp), %r13d
movzbl (%rdx,%rsi,4), %ecx
movzwl 0x2(%rdx,%rsi,4), %r8d
cmpl $0x100, %r8d # imm = 0x100
jb 0x1039b
orl $0x100, %ecx # imm = 0x100
movl %ecx, -0x4(%r11,%rsi,8)
movl %r8d, (%r11,%rsi,8)
jmp 0x1041b
movl %ecx, -0x4(%r11,%rsi,8)
movl %esi, %edi
shrl %cl, %edi
shll $0x8, %r8d
movl %r8d, (%r11,%rsi,8)
movq 0x8(%r15), %rax
movl (%rax,%rdi,4), %eax
movl %eax, %r10d
movl %eax, %r9d
andl $0xffff0000, %r9d # imm = 0xFFFF0000
andl %ebp, %r10d
addl %ecx, %r10d
movl %r10d, -0x4(%r11,%rsi,8)
orl %r8d, %r9d
movl %r9d, (%r11,%rsi,8)
movl %eax, %ecx
shrl %cl, %edi
movq 0x10(%r15), %rax
movl (%rax,%rdi,4), %ecx
movl %ecx, %eax
movl %ecx, %r8d
shrl $0x10, %r8d
andl %ebp, %eax
addl %r10d, %eax
movl %eax, -0x4(%r11,%rsi,8)
shrl %cl, %edi
orl %r9d, %r8d
movl %r8d, (%r11,%rsi,8)
movq 0x18(%r15), %rcx
movl (%rcx,%rdi,4), %ecx
movl %ecx, %edi
andl %ebp, %edi
addl %eax, %edi
movl %edi, -0x4(%r11,%rsi,8)
shll $0x8, %ecx
andl $0xff000000, %ecx # imm = 0xFF000000
orl %r8d, %ecx
movl %ecx, (%r11,%rsi,8)
incq %rsi
cmpq $0x40, %rsi
jne 0x10374
movq 0x8(%rsp), %r8
movq 0x30(%rsp), %rdx
jmp 0x10439
movl 0x4(%rsp), %r13d
incq %rbx
cmpq 0x88(%rsp), %rbx
jne 0x10155
movl %r13d, %r15d
movq 0x40(%rsp), %rax
movq %rax, 0xd0(%r14)
movq 0x60(%rsp), %rax
movl %eax, 0xd8(%r14)
movq 0x10(%rsp), %rax
movq %rax, 0xe0(%r14)
movq 0x20(%rsp), %r13
movq %r13, 0xe8(%r14)
xorl %ebx, %ebx
jmp 0x10567
movl %r13d, %r15d
movl $0x1, (%r14)
movb $0x1, %bl
movq 0x20(%rsp), %r13
jmp 0x10567
movl %r13d, %r15d
xorl %edx, %edx
xorl %r13d, %r13d
movq $0x0, 0x10(%rsp)
xorl %r12d, %r12d
jmp 0x10554
movl %r15d, %r12d
movl $0x4, %esi
movq %r12, %rdi
callq 0x3145c
testq %rax, %rax
je 0x10539
shlq $0x2, %r12
movq %rax, %rbp
movq %rax, %rdi
movl $0xff, %esi
movq %r12, %rdx
callq 0x40c0
testl %ebx, %ebx
jle 0x106e6
movq 0x40(%rsp), %rax
movl %ebx, %ecx
xorl %edx, %edx
xorl %r8d, %r8d
movq %rbp, %r12
movl (%rax,%rdx,4), %edi
movl (%r12,%rdi,4), %esi
cmpl $-0x1, %esi
jne 0x10510
movl %r8d, (%r12,%rdi,4)
movl %r8d, %esi
leal 0x1(%r8), %edi
movl %edi, %r8d
movl %esi, (%rax,%rdx,4)
incq %rdx
cmpq %rdx, %rcx
jne 0x104f6
jmp 0x10064
movl 0x4(%rsp), %r15d
movq 0x50(%rsp), %r12
movb $0x1, %bl
movq 0x20(%rsp), %rax
movq %r13, %r14
movq %rax, %r13
jmp 0x10562
movl %r13d, %r15d
movl $0x1, (%r14)
xorl %r12d, %r12d
xorl %edx, %edx
xorl %r13d, %r13d
movq $0x0, 0x10(%rsp)
movb $0x1, %bl
jmp 0x10567
movl %r13d, %r15d
movb $0x1, %bl
movq 0x20(%rsp), %r13
movq 0x30(%rsp), %rdx
movq %rdx, %rdi
callq 0x314a8
movq %r12, %rdi
callq 0x314a8
testb %bl, %bl
je 0x105c9
movq 0x40(%rsp), %rdi
callq 0x314a8
movq %r13, %rdi
callq 0x314a8
movq 0x10(%rsp), %rdi
callq 0x30889
movl $0x3, %ebp
movl %ebp, (%r14)
xorl %ebx, %ebx
movq %rbx, %rdi
callq 0x314a8
xorl %ebx, %ebx
movq 0x70(%rsp), %rdi
callq 0xfb04
movl %ebx, %eax
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
testl %r15d, %r15d
jle 0x105f9
movl $0x1, %eax
movl %r15d, %ecx
shll %cl, %eax
movl $0x1, %ebp
movl %eax, 0x98(%r14)
leaq 0xa0(%r14), %rdi
movl %r15d, %esi
callq 0x30800
testl %eax, %eax
jne 0x10604
jmp 0x1059c
movq 0x70(%rsp), %rax
movl $0x0, (%rax)
movl 0xc4(%r14), %ecx
movl $0x1, %eax
shll %cl, %eax
movq 0x28(%rsp), %rdi
addl %edi, %eax
decl %eax
shrl %cl, %eax
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
shll %cl, %edx
movl $0x1, %ebp
notl %edx
xorl %esi, %esi
cmpl $0x1, %ecx
sbbl %esi, %esi
orl %edx, %esi
movl %edi, 0x84(%r14)
movq 0x38(%rsp), %rcx
movl %ecx, 0x88(%r14)
movl %eax, 0xc8(%r14)
movl %esi, 0xc0(%r14)
cmpl $0x0, 0x1c(%rsp)
je 0x10696
movl $0x1, 0x4(%r14)
xorl %ebx, %ebx
movq 0x90(%rsp), %rax
testq %rax, %rax
je 0x10676
movq %rbx, (%rax)
movl $0x0, 0x90(%r14)
movl $0x1, %ebx
cmpl $0x0, 0x1c(%rsp)
jne 0x105b5
jmp 0x105ab
movslq %edi, %rax
movslq %ecx, %rdi
imulq %rax, %rdi
movl $0x4, %esi
callq 0x3145c
testq %rax, %rax
je 0x1059c
movq %rax, %rbx
movq %r14, %rdi
movq %rax, %rsi
movq 0x28(%rsp), %rdx
movq 0x38(%rsp), %rcx
movl %ecx, %r8d
xorl %r9d, %r9d
callq 0x10e13
testl %eax, %eax
je 0x105a1
cmpl $0x0, 0x4c(%r14)
jne 0x105a1
jmp 0x10666
xorl %r8d, %r8d
movq %rbp, %r12
jmp 0x10064
|
/PKRoma[P]libwebp/src/dec/vp8l_dec.c
|
VP8LDecodeAlphaImageStream
|
int VP8LDecodeAlphaImageStream(ALPHDecoder* const alph_dec, int last_row) {
VP8LDecoder* const dec = alph_dec->vp8l_dec_;
assert(dec != NULL);
assert(last_row <= dec->height_);
if (dec->last_row_ >= last_row) {
return 1; // done
}
if (!alph_dec->use_8b_decode_) WebPInitAlphaProcessing();
// Decode (with special row processing).
return alph_dec->use_8b_decode_ ?
DecodeAlphaData(dec, (uint8_t*)dec->pixels_, dec->width_, dec->height_,
last_row) :
DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_,
last_row, ExtractAlphaRows);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq 0x18(%rdi), %rbx
movl $0x1, %eax
cmpl %esi, 0x8c(%rbx)
jge 0x10e00
movl %esi, %r13d
movq %rdi, %r14
cmpl $0x0, 0xc0(%rdi)
jne 0x10797
callq 0x135d3
cmpl $0x0, 0xc0(%r14)
je 0x10d53
movl 0x84(%rbx), %ebp
movl 0x90(%rbx), %ecx
movl %ecx, %eax
cltd
idivl %ebp
movl %eax, %r14d
movl 0x88(%rbx), %eax
imull %ebp, %eax
movl %eax, 0x24(%rsp)
movl %ebp, %eax
imull %r13d, %eax
movl %eax, 0x2c(%rsp)
cmpl %eax, %ecx
movl %ecx, 0x8(%rsp)
jge 0x107fa
movl %edx, %r12d
movl 0xc4(%rbx), %ecx
testl %ecx, %ecx
je 0x10808
movl %r14d, %eax
sarl %cl, %eax
imull 0xc8(%rbx), %eax
movl %r12d, %edx
sarl %cl, %edx
movq 0xd0(%rbx), %rcx
addl %eax, %edx
movslq %edx, %rax
movslq (%rcx,%rax,4), %rax
jmp 0x1080a
leaq 0x4c(%rbx), %rax
movq %rax, 0x10(%rsp)
jmp 0x10d93
xorl %eax, %eax
leaq 0x4c(%rbx), %rcx
movq %rcx, 0x10(%rsp)
cmpl $0x0, 0x4c(%rbx)
jne 0x10d93
movq 0x18(%rbx), %rcx
leaq 0x28(%rbx), %rdx
movq %rdx, 0x18(%rsp)
movl 0xc0(%rbx), %edx
movl %edx, 0x28(%rsp)
imulq $0x238, %rax, %rax # imm = 0x238
addq 0xe0(%rbx), %rax
movq %rax, 0x30(%rsp)
movq %rcx, 0x38(%rsp)
leaq 0x1(%rcx), %rax
movq %rax, 0x40(%rsp)
movl %r13d, 0xc(%rsp)
testl %r12d, 0x28(%rsp)
jne 0x108a3
movl 0xc4(%rbx), %ecx
testl %ecx, %ecx
je 0x1088e
movl %r14d, %eax
sarl %cl, %eax
imull 0xc8(%rbx), %eax
movl %r12d, %edx
sarl %cl, %edx
movq 0xd0(%rbx), %rcx
addl %eax, %edx
movslq %edx, %rax
movslq (%rcx,%rax,4), %rax
jmp 0x10890
xorl %eax, %eax
imulq $0x238, %rax, %rax # imm = 0x238
addq 0xe0(%rbx), %rax
movq %rax, 0x30(%rsp)
movl 0x48(%rbx), %edx
cmpl $0x20, %edx
jl 0x108b8
movq 0x18(%rsp), %rdi
callq 0x3070f
movl 0x48(%rbx), %edx
movq 0x30(%rsp), %rax
movq (%rax), %rax
movq 0x18(%rsp), %rcx
movq (%rcx), %rsi
movq %rsi, %rdi
movl %edx, %ecx
shrq %cl, %rdi
movzbl %dil, %ecx
leaq (%rax,%rcx,4), %rdi
movzbl (%rdi), %eax
cmpl $0x9, %eax
jb 0x1090c
addl $0x8, %edx
movq %rsi, %r8
movl %edx, %ecx
shrq %cl, %r8
movzwl 0x2(%rdi), %ecx
leaq (%rdi,%rcx,4), %rdi
addb $-0x8, %al
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
movl %eax, %ecx
shll %cl, %r9d
notl %r9d
andl %r8d, %r9d
leaq (%rdi,%r9,4), %rdi
movb (%rdi), %al
movl 0x8(%rsp), %r15d
movzbl %al, %eax
addl %edx, %eax
movl %eax, 0x48(%rbx)
movzwl 0x2(%rdi), %r13d
cmpl $0xff, %r13d
ja 0x10973
movslq %r15d, %rax
movq 0x38(%rsp), %rcx
movb %r13b, (%rcx,%rax)
incl %r15d
incl %r12d
cmpl %ebp, %r12d
jl 0x10969
leal 0x1(%r14), %r13d
cmpl 0xc(%rsp), %r14d
setl %al
testb $0xf, %r13b
sete %cl
andb %al, %cl
xorl %r12d, %r12d
cmpb $0x1, %cl
jne 0x10966
movq %rbx, %rdi
movl %r13d, %esi
callq 0x1215f
movl %r13d, %r14d
movl 0xc(%rsp), %r13d
jmp 0x10d17
movb $0x1, %dl
cmpl $0x117, %r13d # imm = 0x117
ja 0x10da7
leal -0x100(%r13), %r15d
cmpl $0x4, %r15d
jb 0x109bc
leal -0x102(%r13), %ecx
shrl %ecx
andl $0x1, %r13d
orl $0x2, %r13d
shll %cl, %r13d
movq 0x18(%rsp), %rdi
movl %ecx, %esi
callq 0x307bd
movl %eax, %r15d
addl %r13d, %r15d
movq 0x28(%rbx), %rsi
movl 0x48(%rbx), %eax
movq 0x30(%rsp), %rcx
movq 0x20(%rcx), %rdx
movq %rsi, %rdi
movl %eax, %ecx
shrq %cl, %rdi
movzbl %dil, %ecx
leaq (%rdx,%rcx,4), %rdi
movzbl (%rdi), %edx
cmpl $0x9, %edx
jb 0x10a07
addl $0x8, %eax
movl %eax, %ecx
shrq %cl, %rsi
movzwl 0x2(%rdi), %ecx
leaq (%rdi,%rcx,4), %rdi
addb $-0x8, %dl
movl $0xffffffff, %r8d # imm = 0xFFFFFFFF
movl %edx, %ecx
shll %cl, %r8d
notl %r8d
andl %esi, %r8d
leaq (%rdi,%r8,4), %rdi
movb (%rdi), %dl
movzbl %dl, %ecx
addl %eax, %ecx
movl %ecx, 0x48(%rbx)
movzwl 0x2(%rdi), %r13d
cmpl $0x20, %ecx
jl 0x10a23
movq 0x18(%rsp), %rdi
callq 0x3070f
cmpw $0x4, %r13w
jb 0x10a4d
leal -0x2(%r13), %ecx
shrl %ecx
andl $0x1, %r13d
orl $0x2, %r13d
shll %cl, %r13d
movq 0x18(%rsp), %rdi
movl %ecx, %esi
callq 0x307bd
addl %r13d, %eax
movl %eax, %r13d
movl 0x8(%rsp), %ecx
leal 0x1(%r13), %eax
cmpl $0x79, %eax
jl 0x10a60
addl $-0x77, %r13d
jmp 0x10a90
movslq %r13d, %rax
leaq 0x43dd6(%rip), %rdx # 0x54840
movzbl (%rax,%rdx), %eax
movl %eax, %r13d
shrl $0x4, %r13d
andl $0xf, %eax
imull %ebp, %r13d
subl %eax, %r13d
addl $0x8, %r13d
cmpl $0x2, %r13d
movl $0x1, %eax
cmovll %eax, %r13d
cmpl %r13d, %ecx
jl 0x10e0f
incl %r15d
movl 0x24(%rsp), %eax
subl %ecx, %eax
cmpl %r15d, %eax
jl 0x10e0f
movl %ecx, %r8d
movq 0x38(%rsp), %rax
leaq (%rax,%r8), %rdi
movl %r13d, %edx
movq %rdi, %rsi
subq %rdx, %rsi
negq %rdx
cmpl $0x8, %r15d
jl 0x10ae6
cmpl $0x4, %r13d
je 0x10b23
cmpl $0x2, %r13d
je 0x10b17
cmpl $0x1, %r13d
jne 0x10ae6
movzbl (%rsi), %eax
imull $0x1010101, %eax, %eax # imm = 0x1010101
jmp 0x10b25
cmpl %r15d, %r13d
jge 0x10b05
movl %r15d, %eax
movl 0xc(%rsp), %r13d
movb (%rdi,%rdx), %cl
movb %cl, (%rdi)
incq %rdi
decq %rax
jne 0x10af3
jmp 0x10c90
movslq %r15d, %rdx
callq 0x45d0
movl 0xc(%rsp), %r13d
jmp 0x10c90
movzwl (%rsi), %ecx
movl %ecx, %eax
shll $0x10, %eax
orl %ecx, %eax
jmp 0x10b25
movl (%rsi), %eax
testb $0x3, %dil
movl 0xc(%rsp), %r13d
je 0x10b5e
addq 0x40(%rsp), %r8
movl %r15d, %ecx
movq %r8, %rsi
movb (%rdi,%rdx), %r9b
movb %r9b, (%rdi)
incq %rdi
roll $0x18, %eax
decl %ecx
incq %rsi
testb $0x3, %r8b
movq %rsi, %r8
jne 0x10b3b
addq %rdi, %rdx
movq %rdx, %rsi
jmp 0x10b61
movl %r15d, %ecx
movl %ecx, %edx
sarl $0x2, %edx
testl %edx, %edx
jle 0x10c6d
movl %edx, %r9d
leal 0x3(%rdx), %r8d
decq %r9
movq %r9, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
shrl $0x2, %r8d
shlq $0x4, %r8
pxor 0x43c1e(%rip), %xmm0 # 0x547b0
xorl %r9d, %r9d
movdqa 0x43a23(%rip), %xmm1 # 0x545c0
movdqa 0x43bfb(%rip), %xmm2 # 0x547a0
movdqa %xmm1, %xmm3
pxor 0x43bff(%rip), %xmm3 # 0x547b0
movdqa %xmm3, %xmm4
pcmpgtd %xmm0, %xmm4
pcmpeqd %xmm0, %xmm3
pshufd $0xf5, %xmm3, %xmm5 # xmm5 = xmm3[1,1,3,3]
pand %xmm4, %xmm5
pshufd $0xf5, %xmm4, %xmm3 # xmm3 = xmm4[1,1,3,3]
por %xmm5, %xmm3
movd %xmm3, %r10d
notl %r10d
testb $0x1, %r10b
je 0x10be1
movl %eax, (%rdi,%r9)
pxor 0x43be7(%rip), %xmm3 # 0x547d0
pextrw $0x4, %xmm3, %r10d
testb $0x1, %r10b
je 0x10bfa
movl %eax, 0x4(%rdi,%r9)
movdqa %xmm2, %xmm3
pxor 0x43baa(%rip), %xmm3 # 0x547b0
movdqa %xmm3, %xmm4
pcmpgtd %xmm0, %xmm4
pcmpeqd %xmm0, %xmm3
pshufd $0xf5, %xmm3, %xmm5 # xmm5 = xmm3[1,1,3,3]
pand %xmm4, %xmm5
pshufd $0xf5, %xmm4, %xmm3 # xmm3 = xmm4[1,1,3,3]
por %xmm5, %xmm3
pxor 0x43ba4(%rip), %xmm3 # 0x547d0
pextrw $0x0, %xmm3, %r10d
testb $0x1, %r10b
je 0x10c3d
movl %eax, 0x8(%rdi,%r9)
pextrw $0x4, %xmm3, %r10d
testb $0x1, %r10b
je 0x10c4e
movl %eax, 0xc(%rdi,%r9)
movdqa 0x43b6a(%rip), %xmm3 # 0x547c0
paddq %xmm3, %xmm1
paddq %xmm3, %xmm2
addq $0x10, %r9
cmpq %r9, %r8
jne 0x10ba5
jmp 0x10c6f
xorl %edx, %edx
leal (,%rdx,4), %eax
cmpl %ecx, %eax
jge 0x10c90
movl %edx, %eax
shlq $0x2, %rax
movl %ecx, %ecx
movb (%rsi,%rax), %dl
movb %dl, (%rdi,%rax)
incq %rax
cmpq %rax, %rcx
jne 0x10c82
addl %r15d, %r12d
cmpl %ebp, %r12d
jl 0x10cc0
cmpl %r13d, %r14d
leal 0x1(%r14), %r14d
setl %al
testb $0xf, %r14b
sete %cl
andb %al, %cl
cmpb $0x1, %cl
jne 0x10cbb
movq %rbx, %rdi
movl %r14d, %esi
callq 0x1215f
subl %ebp, %r12d
jmp 0x10c93
addl 0x8(%rsp), %r15d
cmpl 0x2c(%rsp), %r15d
jge 0x10d17
movl %r12d, %eax
andl 0x28(%rsp), %eax
je 0x10d17
movl 0xc4(%rbx), %ecx
testl %ecx, %ecx
je 0x10d02
movl %r14d, %eax
sarl %cl, %eax
imull 0xc8(%rbx), %eax
movl %r12d, %edx
sarl %cl, %edx
movq 0xd0(%rbx), %rcx
addl %eax, %edx
movslq %edx, %rax
movslq (%rcx,%rax,4), %rax
jmp 0x10d04
xorl %eax, %eax
imulq $0x238, %rax, %rax # imm = 0x238
addq 0xe0(%rbx), %rax
movq %rax, 0x30(%rsp)
movq 0x10(%rsp), %rax
cmpl $0x0, (%rax)
jne 0x10d83
movq 0x40(%rbx), %rcx
xorl %eax, %eax
cmpq 0x38(%rbx), %rcx
jne 0x10d36
xorl %eax, %eax
cmpl $0x41, 0x48(%rbx)
setge %al
movq 0x10(%rsp), %rcx
movl %eax, (%rcx)
testl %eax, %eax
jne 0x10d8e
movl %r15d, 0x8(%rsp)
cmpl 0x2c(%rsp), %r15d
jl 0x1085a
jmp 0x10d8e
movq 0x18(%rbx), %rsi
movl 0x84(%rbx), %edx
movl 0x88(%rbx), %ecx
leaq 0x997(%rip), %r9 # 0x11701
movq %rbx, %rdi
movl %r13d, %r8d
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x10e13
movq 0x10(%rsp), %rax
movl $0x1, (%rax)
movl %r15d, 0x8(%rsp)
cmpl %r13d, %r14d
cmovll %r14d, %r13d
movq %rbx, %rdi
movl %r13d, %esi
callq 0x1215f
xorl %edx, %edx
movl $0x1, %eax
movq 0x10(%rsp), %rcx
cmpl $0x0, (%rcx)
jne 0x10dcb
movq 0x40(%rbx), %rcx
xorl %eax, %eax
cmpq 0x38(%rbx), %rcx
jne 0x10dcb
xorl %eax, %eax
cmpl $0x41, 0x48(%rbx)
setge %al
movq 0x10(%rsp), %rcx
movl %eax, (%rcx)
testb %dl, %dl
jne 0x10de4
testl %eax, %eax
je 0x10df1
movl 0x8(%rsp), %ecx
cmpl 0x24(%rsp), %ecx
jge 0x10df1
leal 0x3(,%rax,2), %eax
movl %eax, (%rbx)
xorl %eax, %eax
jmp 0x10e00
movl 0x8(%rsp), %eax
movl %eax, 0x90(%rbx)
movl $0x1, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movb $0x1, %dl
jmp 0x10da7
|
/PKRoma[P]libwebp/src/dec/vp8l_dec.c
|
VP8LDecodeHeader
|
int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io) {
int width, height, has_alpha;
if (dec == NULL) return 0;
if (io == NULL) {
dec->status_ = VP8_STATUS_INVALID_PARAM;
return 0;
}
dec->io_ = io;
dec->status_ = VP8_STATUS_OK;
VP8LInitBitReader(&dec->br_, io->data, io->data_size);
if (!ReadImageInfo(&dec->br_, &width, &height, &has_alpha)) {
dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
goto Error;
}
dec->state_ = READ_DIM;
io->width = width;
io->height = height;
if (!DecodeImageStream(width, height, 1, dec, NULL)) goto Error;
return 1;
Error:
VP8LClear(dec);
assert(dec->status_ != VP8_STATUS_OK);
return 0;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x18, %rsp
xorl %ebp, %ebp
testq %rdi, %rdi
je 0x118df
movq %rsi, %r14
movq %rdi, %rbx
testq %rsi, %rsi
je 0x118c7
movq %r14, 0x8(%rbx)
movl $0x0, (%rbx)
leaq 0x28(%rbx), %r15
movq 0x60(%r14), %rdx
movq 0x68(%r14), %rsi
movq %r15, %rdi
callq 0x3068d
leaq 0x10(%rsp), %rsi
leaq 0xc(%rsp), %rdx
leaq 0x14(%rsp), %rcx
movq %r15, %rdi
callq 0xf9c1
testl %eax, %eax
je 0x118cf
movl $0x2, 0x4(%rbx)
movl 0x10(%rsp), %edi
movl %edi, (%r14)
movl 0xc(%rsp), %esi
movl %esi, 0x4(%r14)
movl $0x1, %ebp
movl $0x1, %edx
movq %rbx, %rcx
xorl %r8d, %r8d
callq 0xfce7
testl %eax, %eax
jne 0x118df
jmp 0x118d5
movl $0x2, (%rbx)
jmp 0x118df
movl $0x3, (%rbx)
movq %rbx, %rdi
callq 0xfa73
xorl %ebp, %ebp
movl %ebp, %eax
addq $0x18, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/vp8l_dec.c
|
ExtractPalettedAlphaRows
|
static void ExtractPalettedAlphaRows(VP8LDecoder* const dec, int last_row) {
// For vertical and gradient filtering, we need to decode the part above the
// crop_top row, in order to have the correct spatial predictors.
ALPHDecoder* const alph_dec = (ALPHDecoder*)dec->io_->opaque;
const int top_row =
(alph_dec->filter_ == WEBP_FILTER_NONE ||
alph_dec->filter_ == WEBP_FILTER_HORIZONTAL) ? dec->io_->crop_top
: dec->last_row_;
const int first_row = (dec->last_row_ < top_row) ? top_row : dec->last_row_;
assert(last_row <= dec->io_->crop_bottom);
if (last_row > first_row) {
// Special method for paletted alpha data. We only process the cropped area.
const int width = dec->io_->width;
uint8_t* out = alph_dec->output_ + width * first_row;
const uint8_t* const in =
(uint8_t*)dec->pixels_ + dec->width_ * first_row;
VP8LTransform* const transform = &dec->transforms_[0];
assert(dec->next_transform_ == 1);
assert(transform->type_ == COLOR_INDEXING_TRANSFORM);
VP8LColorIndexInverseTransformAlpha(transform, first_row, last_row,
in, out);
AlphaApplyFilter(alph_dec, first_row, last_row, out, width);
}
dec->last_row_ = dec->last_out_row_ = last_row;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movq %rdi, %rbx
movq 0x8(%rdi), %rax
movq 0x38(%rax), %r13
leaq 0x80(%rax), %rcx
leaq 0x8c(%rdi), %rdx
cmpl $0x2, 0xc(%r13)
cmovbq %rcx, %rdx
movl %esi, %r12d
movl (%rdx), %esi
movl 0x8c(%rdi), %ecx
cmpl %esi, %ecx
cmovgl %ecx, %esi
movl %r12d, %ebp
subl %esi, %ebp
jle 0x1222f
movslq (%rax), %r14
movslq %esi, %rax
movq %r14, %r15
imulq %rax, %r15
addq 0xc8(%r13), %r15
movslq 0x84(%rbx), %rcx
imulq %rax, %rcx
addq 0x18(%rbx), %rcx
leaq 0xf8(%rbx), %rdi
movl %r12d, %edx
movq %r15, %r8
callq 0x15f61
cmpl $0x0, 0xc(%r13)
je 0x1222f
movl %r12d, 0xc(%rsp)
movq %rbx, 0x10(%rsp)
movq 0xd0(%r13), %rdi
leaq 0x57472(%rip), %rbx # 0x69670
movq %r15, %r12
movl 0xc(%r13), %eax
movq %r15, %rsi
movq %r15, %rdx
movl %r14d, %ecx
callq *(%rbx,%rax,8)
addq %r14, %r15
movq %r12, %rdi
decl %ebp
jne 0x121fe
subq %r14, %r15
movq %r15, 0xd0(%r13)
movq 0x10(%rsp), %rbx
movl 0xc(%rsp), %r12d
movl %r12d, 0x94(%rbx)
movl %r12d, 0x8c(%rbx)
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/vp8l_dec.c
|
DecodeIntoRGBABuffer
|
static uint8_t* DecodeIntoRGBABuffer(WEBP_CSP_MODE colorspace,
const uint8_t* const data,
size_t data_size,
uint8_t* const rgba,
int stride, size_t size) {
WebPDecParams params;
WebPDecBuffer buf;
if (rgba == NULL) {
return NULL;
}
WebPInitDecBuffer(&buf);
WebPResetDecParams(¶ms);
params.output = &buf;
buf.colorspace = colorspace;
buf.u.RGBA.rgba = rgba;
buf.u.RGBA.stride = stride;
buf.u.RGBA.size = size;
buf.is_external_memory = 1;
if (DecodeInto(data, data_size, ¶ms) != VP8_STATUS_OK) {
return NULL;
}
return rgba;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
testq %rcx, %rcx
je 0x129a1
movl %r8d, %ebp
movq %rcx, %rbx
movq %rsi, %r12
movl %edi, %r14d
leaq 0x80(%rsp), %r15
movq %r15, %rdi
movl $0x209, %esi # imm = 0x209
movq %rdx, 0x8(%rsp)
movq %r9, (%rsp)
callq 0xa832
xorps %xmm0, %xmm0
leaq 0x10(%rsp), %rdx
movups %xmm0, 0x8(%rdx)
movups %xmm0, 0x18(%rdx)
movups %xmm0, 0x28(%rdx)
movups %xmm0, 0x38(%rdx)
movups %xmm0, 0x48(%rdx)
movups %xmm0, 0x58(%rdx)
xorl %r13d, %r13d
movq %r13, 0x68(%rdx)
movq %r15, (%rdx)
movl %r14d, (%r15)
movq %rbx, 0x10(%r15)
movl %ebp, 0x18(%r15)
movq (%rsp), %rax
movq %rax, 0x20(%r15)
movl $0x1, 0xc(%r15)
movq %r12, %rdi
movq 0x8(%rsp), %rsi
callq 0x12b06
testl %eax, %eax
cmoveq %rbx, %r13
jmp 0x129a4
xorl %r13d, %r13d
movq %r13, %rax
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/webp_dec.c
|
TransformOne_C
|
static void TransformOne_C(const int16_t* in, uint8_t* dst) {
int C[4 * 4], *tmp;
int i;
tmp = C;
for (i = 0; i < 4; ++i) { // vertical pass
const int a = in[0] + in[8]; // [-4096, 4094]
const int b = in[0] - in[8]; // [-4095, 4095]
const int c = MUL2(in[4]) - MUL1(in[12]); // [-3783, 3783]
const int d = MUL1(in[4]) + MUL2(in[12]); // [-3785, 3781]
tmp[0] = a + d; // [-7881, 7875]
tmp[1] = b + c; // [-7878, 7878]
tmp[2] = b - c; // [-7878, 7878]
tmp[3] = a - d; // [-7877, 7879]
tmp += 4;
in++;
}
// Each pass is expanding the dynamic range by ~3.85 (upper bound).
// The exact value is (2. + (20091 + 35468) / 65536).
// After the second pass, maximum interval is [-3794, 3794], assuming
// an input in [-2048, 2047] interval. We then need to add a dst value
// in the [0, 255] range.
// In the worst case scenario, the input to clip_8b() can be as large as
// [-60713, 60968].
tmp = C;
for (i = 0; i < 4; ++i) { // horizontal pass
const int dc = tmp[0] + 4;
const int a = dc + tmp[8];
const int b = dc - tmp[8];
const int c = MUL2(tmp[4]) - MUL1(tmp[12]);
const int d = MUL1(tmp[4]) + MUL2(tmp[12]);
STORE(0, 0, a + d);
STORE(1, 0, b + c);
STORE(2, 0, b - c);
STORE(3, 0, a - d);
tmp++;
dst += BPS;
}
}
|
pushq %rbx
movq (%rdi), %xmm0
punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm0
movq 0x10(%rdi), %xmm1
punpcklwd %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
psrad $0x10, %xmm2
movdqa %xmm2, %xmm1
paddd %xmm0, %xmm1
psubd %xmm2, %xmm0
movq 0x8(%rdi), %xmm5
punpcklwd %xmm5, %xmm3 # xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
psrad $0x10, %xmm3
movdqa 0x3f899(%rip), %xmm2 # 0x54920
pshufd $0xf5, %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
movdqa 0x3f89c(%rip), %xmm6 # 0x54930
pmulhw %xmm6, %xmm5
punpcklwd %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm5
paddd %xmm3, %xmm5
pmuludq %xmm2, %xmm3
pshufd $0xe8, %xmm3, %xmm3 # xmm3 = xmm3[0,2,2,3]
pmuludq %xmm2, %xmm4
pshufd $0xe8, %xmm4, %xmm4 # xmm4 = xmm4[0,2,2,3]
punpckldq %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
psrad $0x10, %xmm3
movq 0x18(%rdi), %xmm4
punpcklwd %xmm4, %xmm7 # xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
psrad $0x10, %xmm7
pmulhw %xmm6, %xmm4
punpcklwd %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm4
paddd %xmm7, %xmm4
psubd %xmm4, %xmm3
pshufd $0xf5, %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
pmuludq %xmm2, %xmm7
pshufd $0xe8, %xmm7, %xmm6 # xmm6 = xmm7[0,2,2,3]
pmuludq %xmm2, %xmm4
pshufd $0xe8, %xmm4, %xmm2 # xmm2 = xmm4[0,2,2,3]
punpckldq %xmm2, %xmm6 # xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
psrad $0x10, %xmm6
paddd %xmm5, %xmm6
movdqa %xmm6, %xmm2
paddd %xmm1, %xmm2
movdqa %xmm3, %xmm4
paddd %xmm0, %xmm4
psubd %xmm3, %xmm0
psubd %xmm6, %xmm1
movdqa %xmm0, %xmm3
punpckldq %xmm1, %xmm3 # xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
movdqa %xmm2, %xmm5
punpckldq %xmm4, %xmm5 # xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
movdqa %xmm5, %xmm6
punpcklqdq %xmm3, %xmm6 # xmm6 = xmm6[0],xmm3[0]
punpckhqdq %xmm3, %xmm5 # xmm5 = xmm5[1],xmm3[1]
punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
punpckhdq %xmm4, %xmm2 # xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
movdqa %xmm2, %xmm1
punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
punpckhqdq %xmm0, %xmm2 # xmm2 = xmm2[1],xmm0[1]
movdqa %xmm2, -0x10(%rsp)
movdqa %xmm1, -0x20(%rsp)
movdqa %xmm5, -0x30(%rsp)
movdqa %xmm6, -0x40(%rsp)
xorl %eax, %eax
movl -0x40(%rsp,%rax), %edx
movl -0x30(%rsp,%rax), %r10d
movl -0x20(%rsp,%rax), %r11d
leal (%rdx,%r11), %ecx
addl $0x4, %ecx
movl -0x10(%rsp,%rax), %r9d
imull $0x4e7b, %r10d, %r8d # imm = 0x4E7B
sarl $0x10, %r8d
addl %r10d, %r8d
imull $0x8a8c, %r9d, %edi # imm = 0x8A8C
sarl $0x10, %edi
addl %r8d, %edi
movzbl (%rsi,%rax,8), %r8d
leal (%rdi,%rcx), %ebx
sarl $0x3, %ebx
addl %r8d, %ebx
testl %ebx, %ebx
jg 0x151b2
xorl %ebx, %ebx
movl $0xff, %r8d
cmpl %r8d, %ebx
jl 0x151c2
movl $0xff, %ebx
addl $0x4, %edx
subl %r11d, %edx
imull $0x8a8c, %r10d, %r10d # imm = 0x8A8C
sarl $0x10, %r10d
imull $0x4e7b, %r9d, %r11d # imm = 0x4E7B
sarl $0x10, %r11d
addl %r9d, %r11d
subl %r11d, %r10d
movb %bl, (%rsi,%rax,8)
movzbl 0x1(%rsi,%rax,8), %r11d
leal (%r10,%rdx), %r9d
sarl $0x3, %r9d
addl %r11d, %r9d
testl %r9d, %r9d
jg 0x15200
xorl %r9d, %r9d
cmpl %r8d, %r9d
jl 0x1520b
movl $0xff, %r9d
movb %r9b, 0x1(%rsi,%rax,8)
movzbl 0x2(%rsi,%rax,8), %r9d
subl %r10d, %edx
sarl $0x3, %edx
addl %r9d, %edx
testl %edx, %edx
jg 0x15225
xorl %edx, %edx
cmpl %r8d, %edx
jl 0x1522f
movl $0xff, %edx
movb %dl, 0x2(%rsi,%rax,8)
movzbl 0x3(%rsi,%rax,8), %edx
subl %edi, %ecx
sarl $0x3, %ecx
addl %edx, %ecx
testl %ecx, %ecx
jg 0x15245
xorl %ecx, %ecx
cmpl %r8d, %ecx
jl 0x1524f
movl $0xff, %ecx
movb %cl, 0x3(%rsi,%rax,8)
addq $0x4, %rax
cmpl $0x10, %eax
jne 0x15169
popq %rbx
retq
|
/PKRoma[P]libwebp/src/dsp/dec.c
|
WebPCopyPixels
|
void WebPCopyPixels(const WebPPicture* const src, WebPPicture* const dst) {
assert(src != NULL && dst != NULL);
assert(src->width == dst->width && src->height == dst->height);
assert(src->use_argb && dst->use_argb);
WebPCopyPlane((uint8_t*)src->argb, 4 * src->argb_stride, (uint8_t*)dst->argb,
4 * dst->argb_stride, 4 * src->width, src->height);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movl 0xc(%rdi), %ebp
testl %ebp, %ebp
jle 0x31565
movslq 0x8(%rdi), %rbx
shlq $0x2, %rbx
movslq 0x50(%rsi), %r12
shlq $0x2, %r12
movq 0x48(%rsi), %r14
movslq 0x50(%rdi), %r13
shlq $0x2, %r13
movq 0x48(%rdi), %r15
incl %ebp
movq %r14, %rdi
movq %r15, %rsi
movq %rbx, %rdx
callq 0x45d0
addq %r13, %r15
addq %r12, %r14
decl %ebp
cmpl $0x1, %ebp
ja 0x3154a
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/utils/utils.c
|
VP8InitDithering
|
void VP8InitDithering(const WebPDecoderOptions* const options,
VP8Decoder* const dec) {
assert(dec != NULL);
if (options != NULL) {
const int d = options->dithering_strength;
const int max_amp = (1 << VP8_RANDOM_DITHER_FIX) - 1;
const int f = (d < 0) ? 0 : (d > 100) ? max_amp : (d * max_amp / 100);
if (f > 0) {
int s;
int all_amp = 0;
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
VP8QuantMatrix* const dqm = &dec->dqm_[s];
if (dqm->uv_quant_ < DITHER_AMP_TAB_SIZE) {
const int idx = (dqm->uv_quant_ < 0) ? 0 : dqm->uv_quant_;
dqm->dither_ = (f * kQuantToDitherAmp[idx]) >> 3;
}
all_amp |= dqm->dither_;
}
if (all_amp != 0) {
VP8InitRandom(&dec->dithering_rg_, 1.0f);
dec->dither_ = 1;
}
}
// potentially allow alpha dithering
dec->alpha_dithering_ = options->alpha_dithering_strength;
if (dec->alpha_dithering_ > 100) {
dec->alpha_dithering_ = 100;
} else if (dec->alpha_dithering_ < 0) {
dec->alpha_dithering_ = 0;
}
}
}
|
testq %rdi, %rdi
je 0x31bac
pushq %r14
pushq %rbx
pushq %rax
movq %rsi, %rbx
movq %rdi, %r14
movl 0x2c(%rdi), %ecx
testl %ecx, %ecx
js 0x31b85
movl $0xff, %eax
cmpl $0x64, %ecx
ja 0x31b12
testl %ecx, %ecx
je 0x31b85
movl %ecx, %eax
shll $0x8, %eax
subl %ecx, %eax
movzwl %ax, %eax
shrl $0x2, %eax
imull $0x147b, %eax, %eax # imm = 0x147B
shrl $0x11, %eax
xorl %ecx, %ecx
leaq 0x25435(%rip), %rdx # 0x56f50
xorl %edi, %edi
xorl %esi, %esi
movl 0x43c(%rbx,%rdi), %r8d
cmpl $0xc, %r8d
jge 0x31b4b
testl %r8d, %r8d
cmovlel %ecx, %r8d
movzbl (%r8,%rdx), %r8d
imull %eax, %r8d
shrl $0x3, %r8d
movl %r8d, 0x440(%rbx,%rdi)
jmp 0x31b53
movl 0x440(%rbx,%rdi), %r8d
orl %r8d, %esi
addq $0x20, %rdi
cmpq $0x80, %rdi
jne 0x31b1f
testl %esi, %esi
je 0x31b85
leaq 0x33c(%rbx), %rdi
movss 0x250fe(%rip), %xmm0 # 0x56c74
callq 0x31100
movl $0x1, 0x338(%rbx)
movl 0x34(%r14), %ecx
movl %ecx, 0xbc8(%rbx)
movl $0x64, %eax
cmpl $0x64, %ecx
jg 0x31b9f
testl %ecx, %ecx
jns 0x31ba5
xorl %eax, %eax
movl %eax, 0xbc8(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/PKRoma[P]libwebp/src/dec/frame_dec.c
|
ReconstructRow
|
static void ReconstructRow(const VP8Decoder* const dec,
const VP8ThreadContext* ctx) {
int j;
int mb_x;
const int mb_y = ctx->mb_y_;
const int cache_id = ctx->id_;
uint8_t* const y_dst = dec->yuv_b_ + Y_OFF;
uint8_t* const u_dst = dec->yuv_b_ + U_OFF;
uint8_t* const v_dst = dec->yuv_b_ + V_OFF;
// Initialize left-most block.
for (j = 0; j < 16; ++j) {
y_dst[j * BPS - 1] = 129;
}
for (j = 0; j < 8; ++j) {
u_dst[j * BPS - 1] = 129;
v_dst[j * BPS - 1] = 129;
}
// Init top-left sample on left column too.
if (mb_y > 0) {
y_dst[-1 - BPS] = u_dst[-1 - BPS] = v_dst[-1 - BPS] = 129;
} else {
// we only need to do this init once at block (0,0).
// Afterward, it remains valid for the whole topmost row.
memset(y_dst - BPS - 1, 127, 16 + 4 + 1);
memset(u_dst - BPS - 1, 127, 8 + 1);
memset(v_dst - BPS - 1, 127, 8 + 1);
}
// Reconstruct one row.
for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) {
const VP8MBData* const block = ctx->mb_data_ + mb_x;
// Rotate in the left samples from previously decoded block. We move four
// pixels at a time for alignment reason, and because of in-loop filter.
if (mb_x > 0) {
for (j = -1; j < 16; ++j) {
Copy32b(&y_dst[j * BPS - 4], &y_dst[j * BPS + 12]);
}
for (j = -1; j < 8; ++j) {
Copy32b(&u_dst[j * BPS - 4], &u_dst[j * BPS + 4]);
Copy32b(&v_dst[j * BPS - 4], &v_dst[j * BPS + 4]);
}
}
{
// bring top samples into the cache
VP8TopSamples* const top_yuv = dec->yuv_t_ + mb_x;
const int16_t* const coeffs = block->coeffs_;
uint32_t bits = block->non_zero_y_;
int n;
if (mb_y > 0) {
memcpy(y_dst - BPS, top_yuv[0].y, 16);
memcpy(u_dst - BPS, top_yuv[0].u, 8);
memcpy(v_dst - BPS, top_yuv[0].v, 8);
}
// predict and add residuals
if (block->is_i4x4_) { // 4x4
uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16);
if (mb_y > 0) {
if (mb_x >= dec->mb_w_ - 1) { // on rightmost border
memset(top_right, top_yuv[0].y[15], sizeof(*top_right));
} else {
memcpy(top_right, top_yuv[1].y, sizeof(*top_right));
}
}
// replicate the top-right pixels below
top_right[BPS] = top_right[2 * BPS] = top_right[3 * BPS] = top_right[0];
// predict and add residuals for all 4x4 blocks in turn.
for (n = 0; n < 16; ++n, bits <<= 2) {
uint8_t* const dst = y_dst + kScan[n];
VP8PredLuma4[block->imodes_[n]](dst);
DoTransform(bits, coeffs + n * 16, dst);
}
} else { // 16x16
const int pred_func = CheckMode(mb_x, mb_y, block->imodes_[0]);
VP8PredLuma16[pred_func](y_dst);
if (bits != 0) {
for (n = 0; n < 16; ++n, bits <<= 2) {
DoTransform(bits, coeffs + n * 16, y_dst + kScan[n]);
}
}
}
{
// Chroma
const uint32_t bits_uv = block->non_zero_uv_;
const int pred_func = CheckMode(mb_x, mb_y, block->uvmode_);
VP8PredChroma8[pred_func](u_dst);
VP8PredChroma8[pred_func](v_dst);
DoUVTransform(bits_uv >> 0, coeffs + 16 * 16, u_dst);
DoUVTransform(bits_uv >> 8, coeffs + 20 * 16, v_dst);
}
// stash away top samples for next block
if (mb_y < dec->mb_h_ - 1) {
memcpy(top_yuv[0].y, y_dst + 15 * BPS, 16);
memcpy(top_yuv[0].u, u_dst + 7 * BPS, 8);
memcpy(top_yuv[0].v, v_dst + 7 * BPS, 8);
}
}
// Transfer reconstructed samples from yuv_b_ cache to final destination.
{
const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const y_out = dec->cache_y_ + mb_x * 16 + y_offset;
uint8_t* const u_out = dec->cache_u_ + mb_x * 8 + uv_offset;
uint8_t* const v_out = dec->cache_v_ + mb_x * 8 + uv_offset;
for (j = 0; j < 16; ++j) {
memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16);
}
for (j = 0; j < 8; ++j) {
memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8);
memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8);
}
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x98, %rsp
movq %rdi, %r12
movl 0x4(%rsi), %eax
movl %eax, 0xc(%rsp)
movq %rsi, 0x80(%rsp)
movslq (%rsi), %rsi
movq 0xb20(%rdi), %rbp
leaq 0x248(%rbp), %rax
movq %rax, 0x38(%rsp)
xorl %eax, %eax
movb $-0x7f, 0x27(%rbp,%rax)
addq $0x20, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x31d34
leaq 0x258(%rbp), %r13
xorl %eax, %eax
movb $-0x7f, %cl
movb %cl, 0x247(%rbp,%rax)
movb %cl, 0x257(%rbp,%rax)
addq $0x20, %rax
cmpq $0x100, %rax # imm = 0x100
jne 0x31d50
cmpl $0x0, 0xc(%rsp)
jle 0x31d84
movb $-0x7f, %al
movb %al, 0x237(%rbp)
movb %al, 0x227(%rbp)
movb %al, 0x7(%rbp)
jmp 0x31db9
leaq 0x227(%rbp), %rax
movabsq $0x7f7f7f7f7f7f7f7f, %rcx # imm = 0x7F7F7F7F7F7F7F7F
movq %rcx, 0x14(%rbp)
movaps 0x23ad0(%rip), %xmm0 # 0x55870
movups %xmm0, 0x7(%rbp)
movb $0x7f, %dl
movb %dl, 0x8(%rax)
movq %rcx, (%rax)
movb %dl, 0x23f(%rbp)
movq %rcx, 0x237(%rbp)
cmpl $0x0, 0x198(%r12)
jle 0x3225b
leaq 0x28(%rbp), %rax
movq %rax, 0x18(%rsp)
leaq 0x18(%rbp), %rax
movq %rax, 0x20(%rsp)
leaq 0x8(%rbp), %rax
movq %rax, 0x58(%rsp)
xorl %eax, %eax
cmpl $0x0, 0xc(%rsp)
sete %al
leal (,%rax,4), %ecx
movq %rcx, 0x28(%rsp)
addq $0x5, %rax
movq %rax, 0x30(%rsp)
leaq 0x208(%rbp), %rax
movq %rax, 0x50(%rsp)
movq %rsi, %rax
shlq $0x4, %rax
movq %rax, 0x68(%rsp)
shlq $0x3, %rsi
movq %rsi, 0x78(%rsp)
movl $0x301, %esi # imm = 0x301
xorl %r15d, %r15d
xorl %r14d, %r14d
movq %r12, 0x60(%rsp)
movq %r13, 0x70(%rsp)
movq 0x80(%rsp), %rax
movq 0x18(%rax), %rbx
testq %r14, %r14
je 0x31e8c
movl $0x14, %eax
movl (%rbp,%rax), %ecx
movl %ecx, -0x10(%rbp,%rax)
addq $0x20, %rax
cmpq $0x234, %rax # imm = 0x234
jne 0x31e4e
xorl %eax, %eax
movl 0x22c(%rbp,%rax), %ecx
movl 0x23c(%rbp,%rax), %edx
movl %ecx, 0x224(%rbp,%rax)
movl %edx, 0x234(%rbp,%rax)
addq $0x20, %rax
cmpq $0x120, %rax # imm = 0x120
jne 0x31e64
imulq $0x320, %r14, %rax # imm = 0x320
addq %rbx, %rax
movq %r14, %rcx
shlq $0x5, %rcx
addq 0xb08(%r12), %rcx
movl 0x314(%rax), %r13d
cmpl $0x0, 0xc(%rsp)
movq %rax, 0x10(%rsp)
movq %rsi, 0x90(%rsp)
movq %r15, 0x40(%rsp)
movq %r14, 0x48(%rsp)
movq %rcx, 0x88(%rsp)
jle 0x31f24
movups (%rcx), %xmm0
movq 0x58(%rsp), %rax
movups %xmm0, (%rax)
movq 0x10(%rcx), %rax
movq %rax, 0x228(%rbp)
movq 0x18(%rcx), %rax
movq %rax, 0x238(%rbp)
movq 0x10(%rsp), %rax
cmpb $0x0, 0x300(%rax)
je 0x31f39
movslq 0x198(%r12), %rax
decq %rax
cmpq %rax, %r14
jge 0x31fdc
movl 0x20(%rcx), %eax
movq 0x20(%rsp), %rcx
movl %eax, (%rcx)
jmp 0x31ffb
cmpb $0x0, 0x300(%rax)
je 0x31f39
movq 0x20(%rsp), %rax
movl (%rax), %eax
jmp 0x31ffb
testq %r14, %r14
movzbl 0x301(%rax), %eax
movq 0x28(%rsp), %r15
cmoveq 0x30(%rsp), %r15
testl %eax, %eax
movl %r15d, %ecx
cmovnel %eax, %ecx
movq 0x18(%rsp), %rdi
leaq 0x3755e(%rip), %rax # 0x694c0
callq *(%rax,%rcx,8)
testl %r13d, %r13d
je 0x320a9
addq 0x40(%rsp), %rbx
xorl %r14d, %r14d
leaq 0x25063(%rip), %rax # 0x56fe0
movzwl (%r14,%rax), %esi
addq 0x18(%rsp), %rsi
movl %r13d, %eax
shrl $0x1e, %eax
leaq 0x24fac(%rip), %rcx # 0x56f40
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq %rbx, %rdi
leaq 0x375f9(%rip), %rax # 0x695a0
jmp 0x31fb3
movq %rbx, %rdi
leaq 0x375dd(%rip), %rax # 0x69590
callq *(%rax)
jmp 0x31fc5
movq %rbx, %rdi
xorl %edx, %edx
leaq 0x375c5(%rip), %rax # 0x69588
callq *(%rax)
shll $0x2, %r13d
addq $0x20, %rbx
addq $0x2, %r14
cmpq $0x20, %r14
jne 0x31f76
jmp 0x320a9
movzbl 0xf(%rcx), %eax
imull $0x1010101, %eax, %ecx # imm = 0x1010101
movq 0x20(%rsp), %rdx
movl %ecx, (%rdx)
movl %eax, %ecx
shll $0x8, %ecx
orl %eax, %ecx
movl %ecx, %eax
shll $0x10, %eax
orl %ecx, %eax
movl %eax, 0x198(%rbp)
movl %eax, 0x118(%rbp)
movl %eax, 0x98(%rbp)
addq %rbx, %r15
addq %rsi, %rbx
xorl %r14d, %r14d
leaq 0x24fc3(%rip), %rax # 0x56fe0
movzwl (%r14,%rax), %r12d
addq 0x18(%rsp), %r12
movzbl (%rbx), %eax
movq %r12, %rdi
leaq 0x374cc(%rip), %rcx # 0x69500
callq *(%rcx,%rax,8)
movl %r13d, %eax
shrl $0x1e, %eax
leaq 0x24eec(%rip), %rcx # 0x56f30
movslq (%rcx,%rax,4), %rax
addq %rcx, %rax
jmpq *%rax
movq %r15, %rdi
movq %r12, %rsi
leaq 0x37546(%rip), %rax # 0x695a0
jmp 0x32069
movq %r15, %rdi
movq %r12, %rsi
leaq 0x37527(%rip), %rax # 0x69590
callq *(%rax)
jmp 0x3207e
movq %r15, %rdi
movq %r12, %rsi
xorl %edx, %edx
leaq 0x3750c(%rip), %rax # 0x69588
callq *(%rax)
shll $0x2, %r13d
addq $0x20, %r15
incq %rbx
addq $0x2, %r14
cmpq $0x20, %r14
jne 0x32016
cmpq $0x0, 0x48(%rsp)
movq 0x28(%rsp), %r15
cmoveq 0x30(%rsp), %r15
movq 0x60(%rsp), %r12
movq 0x10(%rsp), %rax
movl 0x318(%rax), %ebx
movzbl 0x311(%rax), %eax
testl %eax, %eax
cmovnel %eax, %r15d
movq 0x38(%rsp), %rdi
leaq 0x37483(%rip), %r14 # 0x69550
callq *(%r14,%r15,8)
movq 0x70(%rsp), %r13
movq %r13, %rdi
callq *(%r14,%r15,8)
testb %bl, %bl
movq 0x40(%rsp), %r15
je 0x3210c
leaq 0x374ab(%rip), %rax # 0x69598
testb $-0x56, %bl
jne 0x320f9
leaq 0x374af(%rip), %rax # 0x695a8
movq 0x10(%rsp), %rcx
leaq 0x200(%rcx), %rdi
movq 0x38(%rsp), %rsi
callq *(%rax)
testl $0xff00, %ebx # imm = 0xFF00
movq 0x48(%rsp), %r14
je 0x32140
leaq 0x37478(%rip), %rax # 0x69598
testl $0xaa00, %ebx # imm = 0xAA00
jne 0x3212f
leaq 0x37479(%rip), %rax # 0x695a8
movq 0x10(%rsp), %rdi
addq $0x280, %rdi # imm = 0x280
movq %r13, %rsi
callq *(%rax)
movl 0x19c(%r12), %eax
decl %eax
cmpl %eax, 0xc(%rsp)
jge 0x32179
movq 0x50(%rsp), %rax
movups (%rax), %xmm0
movq 0x88(%rsp), %rcx
movups %xmm0, (%rcx)
movq 0x328(%rbp), %rax
movq %rax, 0x10(%rcx)
movq 0x338(%rbp), %rax
movq %rax, 0x18(%rcx)
movslq 0xb40(%r12), %rax
imulq 0x68(%rsp), %rax
movslq 0xb44(%r12), %rdx
movq %r14, %rsi
shlq $0x4, %rsi
addq 0xb28(%r12), %rsi
imulq 0x78(%rsp), %rdx
addq %rax, %rsi
movq 0xb30(%r12), %rax
leaq (,%r14,8), %rdi
movq 0xb38(%r12), %rcx
addq %rdi, %rcx
movq 0x18(%rsp), %r8
xorl %r9d, %r9d
movslq 0xb40(%r12), %r10
movslq %r9d, %r11
imulq %r10, %r11
movups (%r8), %xmm0
movups %xmm0, (%rsi,%r11)
incq %r9
addq $0x20, %r8
cmpq $0x10, %r9
jne 0x321ca
addq %rdi, %rax
addq %rdx, %rax
addq %rdx, %rcx
movq %r13, %rdx
xorl %esi, %esi
movslq 0xb44(%r12), %rdi
movslq %esi, %r8
imulq %r8, %rdi
movq -0x10(%rdx), %r9
movq %r9, (%rax,%rdi)
movslq 0xb44(%r12), %rdi
imulq %r8, %rdi
movq (%rdx), %r8
movq %r8, (%rcx,%rdi)
incq %rsi
addq $0x20, %rdx
cmpq $0x8, %rsi
jne 0x321fd
incq %r14
movslq 0x198(%r12), %rax
movl $0x320, %ecx # imm = 0x320
addq %rcx, %r15
movq 0x90(%rsp), %rsi
addq %rcx, %rsi
cmpq %rax, %r14
jl 0x31e38
addq $0x98, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/frame_dec.c
|
VP8EnterCritical
|
VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
// Call setup() first. This may trigger additional decoding features on 'io'.
// Note: Afterward, we must call teardown() no matter what.
if (io->setup != NULL && !io->setup(io)) {
VP8SetError(dec, VP8_STATUS_USER_ABORT, "Frame setup failed");
return dec->status_;
}
// Disable filtering per user request
if (io->bypass_filtering) {
dec->filter_type_ = 0;
}
// Define the area where we can skip in-loop filtering, in case of cropping.
//
// 'Simple' filter reads two luma samples outside of the macroblock
// and filters one. It doesn't filter the chroma samples. Hence, we can
// avoid doing the in-loop filtering before crop_top/crop_left position.
// For the 'Complex' filter, 3 samples are read and up to 3 are filtered.
// Means: there's a dependency chain that goes all the way up to the
// top-left corner of the picture (MB #0). We must filter all the previous
// macroblocks.
{
const int extra_pixels = kFilterExtraRows[dec->filter_type_];
if (dec->filter_type_ == 2) {
// For complex filter, we need to preserve the dependency chain.
dec->tl_mb_x_ = 0;
dec->tl_mb_y_ = 0;
} else {
// For simple filter, we can filter only the cropped region.
// We include 'extra_pixels' on the other side of the boundary, since
// vertical or horizontal filtering of the previous macroblock can
// modify some abutting pixels.
dec->tl_mb_x_ = (io->crop_left - extra_pixels) >> 4;
dec->tl_mb_y_ = (io->crop_top - extra_pixels) >> 4;
if (dec->tl_mb_x_ < 0) dec->tl_mb_x_ = 0;
if (dec->tl_mb_y_ < 0) dec->tl_mb_y_ = 0;
}
// We need some 'extra' pixels on the right/bottom.
dec->br_mb_y_ = (io->crop_bottom + 15 + extra_pixels) >> 4;
dec->br_mb_x_ = (io->crop_right + 15 + extra_pixels) >> 4;
if (dec->br_mb_x_ > dec->mb_w_) {
dec->br_mb_x_ = dec->mb_w_;
}
if (dec->br_mb_y_ > dec->mb_h_) {
dec->br_mb_y_ = dec->mb_h_;
}
}
PrecomputeFilterStrengths(dec);
return VP8_STATUS_OK;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rsi, %r14
movq %rdi, %rbx
movq 0x48(%rsi), %rax
testq %rax, %rax
je 0x329db
movq %r14, %rdi
callq *%rax
testl %eax, %eax
je 0x32bab
cmpl $0x0, 0x70(%r14)
je 0x329f2
movl $0x0, 0xb68(%rbx)
xorl %ecx, %ecx
xorl %eax, %eax
jmp 0x32a22
movslq 0xb68(%rbx), %rax
leaq 0x2455c(%rip), %rcx # 0x56f5c
movzbl (%rax,%rcx), %ecx
cmpq $0x2, %rax
jne 0x32a22
movl $0x0, 0x1a0(%rbx)
leaq 0x1a4(%rbx), %rdx
movl $0x2, %eax
jmp 0x32a5c
movl 0x78(%r14), %esi
subl %ecx, %esi
sarl $0x4, %esi
movl %esi, 0x1a0(%rbx)
movl 0x80(%r14), %edx
subl %ecx, %edx
sarl $0x4, %edx
movl %edx, 0x1a4(%rbx)
testl %esi, %esi
jns 0x32a51
movl $0x0, 0x1a0(%rbx)
testl %edx, %edx
jns 0x32a62
leaq 0x1a4(%rbx), %rdx
movl $0x0, (%rdx)
addl $0xf, %ecx
movl 0x84(%r14), %esi
addl %ecx, %esi
sarl $0x4, %esi
movl %esi, 0x1ac(%rbx)
addl 0x7c(%r14), %ecx
sarl $0x4, %ecx
movl 0x198(%rbx), %edi
movl 0x19c(%rbx), %edx
cmpl %edi, %ecx
cmovgel %edi, %ecx
movl %ecx, 0x1a8(%rbx)
cmpl %edx, %esi
jle 0x32a9f
movl %edx, 0x1ac(%rbx)
testl %eax, %eax
jle 0x32ba7
movl 0x5c(%rbx), %edx
movl 0x80(%rbx), %eax
movl %eax, 0x4(%rsp)
leaq 0xb6c(%rbx), %rdi
xorl %r8d, %r8d
movl $0x2, %r9d
cmpl $0x0, 0x4(%rsp)
je 0x32ae3
movsbl 0x90(%rbx,%r8), %r10d
cmpl $0x0, 0x88(%rbx)
jne 0x32ae7
addl 0x54(%rbx), %r10d
jmp 0x32ae7
movl 0x54(%rbx), %r10d
movb $0x1, %r11b
leaq (%rdi,%r8,8), %r14
xorl %r15d, %r15d
movl %r10d, %eax
testl %edx, %edx
je 0x32b07
movl 0x60(%rbx), %eax
addl %r10d, %eax
testb $0x1, %r11b
jne 0x32b07
addl 0x70(%rbx), %eax
cmpl $0x3f, %eax
movl $0x3f, %r13d
cmovll %eax, %r13d
testl %r13d, %r13d
jg 0x32b1c
xorl %r13d, %r13d
leaq (%r14,%r15,4), %r12
testl %eax, %eax
jle 0x32b76
movl 0x58(%rbx), %esi
movl %r13d, %ebp
testl %esi, %esi
jle 0x32b48
cmpl $0x5, %esi
movb $0x1, %cl
sbbb $-0x1, %cl
movl %r13d, %ebp
shrl %cl, %ebp
movl $0x9, %ecx
subl %esi, %ecx
cmpl %ecx, %ebp
jl 0x32b48
movl %ecx, %ebp
cmpl $0x2, %ebp
jge 0x32b52
movl $0x1, %ebp
movb %bpl, 0x1(%r12)
leal (%rbp,%r13,2), %ecx
movb %cl, (%r12)
xorl %ecx, %ecx
cmpl $0xf, %eax
setae %cl
cmpl $0x28, %eax
cmovael %r9d, %ecx
movb %cl, 0x3(%r12)
jmp 0x32b7b
movb $0x0, (%r12)
movb %r15b, 0x2(%r12)
movl $0x1, %r15d
xorl %eax, %eax
testb $0x1, %r11b
movl $0x0, %r11d
jne 0x32af1
incq %r8
cmpq $0x4, %r8
jne 0x32ac4
jmp 0x32bc1
xorl %eax, %eax
jmp 0x32bc1
leaq 0x243ad(%rip), %rdx # 0x56f5f
movq %rbx, %rdi
movl $0x6, %esi
callq 0xdd5d
movl (%rbx), %eax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dec/frame_dec.c
|
VP8GetThreadMethod
|
int VP8GetThreadMethod(const WebPDecoderOptions* const options,
const WebPHeaderStructure* const headers,
int width, int height) {
if (options == NULL || options->use_threads == 0) {
return 0;
}
(void)headers;
(void)width;
(void)height;
assert(headers == NULL || !headers->is_lossless);
#if defined(WEBP_USE_THREAD)
if (width >= MIN_WIDTH_FOR_THREADS) return 2;
#endif
return 0;
}
|
xorl %eax, %eax
testq %rdi, %rdi
je 0x32c2d
cmpl $0x0, 0x28(%rdi)
je 0x32c2d
xorl %eax, %eax
cmpl $0x200, %edx # imm = 0x200
setge %al
addl %eax, %eax
retq
|
/PKRoma[P]libwebp/src/dec/frame_dec.c
|
Dither8x8
|
static void Dither8x8(VP8Random* const rg, uint8_t* dst, int bps, int amp) {
uint8_t dither[64];
int i;
for (i = 0; i < 8 * 8; ++i) {
dither[i] = VP8RandomBits2(rg, VP8_DITHER_AMP_BITS + 1, amp);
}
VP8DitherCombine8x8(dither, dst, bps);
}
|
subq $0x48, %rsp
movq (%rdi), %xmm2
xorl %eax, %eax
pcmpeqd %xmm0, %xmm0
movdqa 0x23ef8(%rip), %xmm1 # 0x56f20
movd %xmm2, %r8d
movslq %r8d, %r8
movl 0x8(%rdi,%r8,4), %r9d
pshufd $0x55, %xmm2, %xmm2 # xmm2 = xmm2[1,1,1,1]
movd %xmm2, %r10d
movslq %r10d, %r10
subl 0x8(%rdi,%r10,4), %r9d
movl %r9d, %r10d
andl $0x7fffffff, %r10d # imm = 0x7FFFFFFF
movl %r10d, 0x8(%rdi,%r8,4)
movq (%rdi), %xmm3
psubd %xmm0, %xmm3
movdqa %xmm3, %xmm2
pcmpeqd %xmm1, %xmm2
pandn %xmm3, %xmm2
movq %xmm2, (%rdi)
shrl $0x17, %r9d
movsbl %r9b, %r8d
imull %ecx, %r8d
shrl $0x8, %r8d
addb $-0x80, %r8b
movb %r8b, (%rsp,%rax)
incq %rax
cmpq $0x40, %rax
jne 0x33028
leaq 0x3657a(%rip), %rax # 0x69610
movq %rsp, %rdi
callq *(%rax)
addq $0x48, %rsp
retq
|
/PKRoma[P]libwebp/src/dec/frame_dec.c
|
SetResidualCoeffs_C
|
static void SetResidualCoeffs_C(const int16_t* const coeffs,
VP8Residual* const res) {
int n;
res->last = -1;
assert(res->first == 0 || coeffs[0] == 0);
for (n = 15; n >= 0; --n) {
if (coeffs[n]) {
res->last = n;
break;
}
}
res->coeffs = coeffs;
}
|
movl $0xffffffff, 0x4(%rsi) # imm = 0xFFFFFFFF
movl $0xf, %eax
movl %eax, %ecx
cmpw $0x0, (%rdi,%rcx,2)
jne 0x334e6
addl $-0x1, %eax
jb 0x334d6
jmp 0x334e9
movl %eax, 0x4(%rsi)
movq %rdi, 0x8(%rsi)
retq
nop
|
/PKRoma[P]libwebp/src/dsp/cost.c
|
Intra16Preds_C
|
static WEBP_INLINE void DCMode(uint8_t* dst, const uint8_t* left,
const uint8_t* top,
int size, int round, int shift) {
int DC = 0;
int j;
if (top != NULL) {
for (j = 0; j < size; ++j) DC += top[j];
if (left != NULL) { // top and left present
for (j = 0; j < size; ++j) DC += left[j];
} else { // top, but no left
DC += DC;
}
DC = (DC + round) >> shift;
} else if (left != NULL) { // left but no top
for (j = 0; j < size; ++j) DC += left[j];
DC += DC;
DC = (DC + round) >> shift;
} else { // no top, no left, nothing.
DC = 0x80;
}
Fill(dst, DC, size);
}
|
testq %rdx, %rdx
je 0x34b90
pxor %xmm0, %xmm0
xorl %eax, %eax
pxor %xmm1, %xmm1
movd (%rdx,%rax), %xmm2
punpcklbw %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
paddd %xmm2, %xmm1
addq $0x4, %rax
cmpq $0x10, %rax
jne 0x34b19
pshufd $0xee, %xmm1, %xmm0 # xmm0 = xmm1[2,3,2,3]
paddd %xmm1, %xmm0
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %eax
testq %rsi, %rsi
je 0x34be0
movd %eax, %xmm0
xorl %eax, %eax
pxor %xmm1, %xmm1
movd (%rsi,%rax), %xmm2
punpcklbw %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
punpcklwd %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
paddd %xmm2, %xmm0
addq $0x4, %rax
cmpq $0x10, %rax
jne 0x34b5d
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
paddd %xmm0, %xmm1
pshufd $0x55, %xmm1, %xmm0 # xmm0 = xmm1[1,1,1,1]
paddd %xmm1, %xmm0
movd %xmm0, %eax
jmp 0x34be2
testq %rsi, %rsi
je 0x34d54
pxor %xmm0, %xmm0
xorl %eax, %eax
pxor %xmm1, %xmm1
movd (%rsi,%rax), %xmm2
punpcklbw %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
paddd %xmm2, %xmm1
addq $0x4, %rax
cmpq $0x10, %rax
jne 0x34ba3
pshufd $0xee, %xmm1, %xmm0 # xmm0 = xmm1[2,3,2,3]
paddd %xmm1, %xmm0
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %eax
leal 0x10(,%rax,2), %eax
shrl $0x5, %eax
jmp 0x34be8
addl %eax, %eax
addl $0x10, %eax
sarl $0x5, %eax
xorl %ecx, %ecx
movzbl %al, %eax
movd %eax, %xmm0
punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqu %xmm0, (%rdi,%rcx)
addq $0x20, %rcx
cmpq $0x200, %rcx # imm = 0x200
jne 0x34bff
leaq 0x200(%rdi), %rax
xorl %ecx, %ecx
testq %rdx, %rdx
je 0x34c37
movdqu (%rdx), %xmm0
movdqu %xmm0, (%rax,%rcx)
addq $0x20, %rcx
cmpq $0x200, %rcx # imm = 0x200
jne 0x34c1f
jmp 0x34c51
movdqa 0x20c31(%rip), %xmm0 # 0x55870
movdqu %xmm0, (%rax,%rcx)
addq $0x20, %rcx
cmpq $0x200, %rcx # imm = 0x200
jne 0x34c3f
leaq 0x210(%rdi), %rax
xorl %ecx, %ecx
testq %rsi, %rsi
je 0x34cdb
movzbl (%rsi,%rcx), %r8d
movd %r8d, %xmm0
punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqu %xmm0, (%rax)
incq %rcx
addq $0x20, %rax
cmpq $0x10, %rcx
jne 0x34c5f
addq $0x10, %rdi
testq %rdx, %rdx
je 0x34d2a
movzbl -0x1(%rsi), %ecx
leaq 0x362e0(%rip), %rax # 0x6af80
subq %rcx, %rax
addq $0xff, %rax
xorl %ecx, %ecx
movzbl (%rsi,%rcx), %r8d
addq %rax, %r8
xorl %r9d, %r9d
movzbl (%rdx,%r9), %r10d
movb (%r8,%r10), %r10b
movb %r10b, (%rdi,%r9)
incq %r9
cmpq $0x10, %r9
jne 0x34cb6
addq $0x20, %rdi
incq %rcx
cmpq $0x10, %rcx
jne 0x34cab
jmp 0x34d53
movdqa 0x237ed(%rip), %xmm0 # 0x584d0
movdqu %xmm0, (%rax,%rcx)
addq $0x20, %rcx
cmpq $0x200, %rcx # imm = 0x200
jne 0x34ce3
addq $0x10, %rdi
xorl %eax, %eax
testq %rdx, %rdx
je 0x34d17
movdqu (%rdx), %xmm0
movdqu %xmm0, (%rdi,%rax)
addq $0x20, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x34d00
jmp 0x34d53
movdqu %xmm0, (%rdi,%rax)
addq $0x20, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x34d17
jmp 0x34d53
xorl %eax, %eax
movzbl (%rsi,%rax), %ecx
movd %ecx, %xmm0
punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0,4,5,6,7]
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqu %xmm0, (%rdi)
incq %rax
addq $0x20, %rdi
cmpq $0x10, %rax
jne 0x34d2c
retq
movl $0x80, %eax
jmp 0x34be8
|
/PKRoma[P]libwebp/src/dsp/enc.c
|
ITransformOne
|
static WEBP_INLINE void ITransformOne(const uint8_t* ref, const int16_t* in,
uint8_t* dst) {
int C[4 * 4], *tmp;
int i;
tmp = C;
for (i = 0; i < 4; ++i) { // vertical pass
const int a = in[0] + in[8];
const int b = in[0] - in[8];
const int c = MUL(in[4], kC2) - MUL(in[12], kC1);
const int d = MUL(in[4], kC1) + MUL(in[12], kC2);
tmp[0] = a + d;
tmp[1] = b + c;
tmp[2] = b - c;
tmp[3] = a - d;
tmp += 4;
in++;
}
tmp = C;
for (i = 0; i < 4; ++i) { // horizontal pass
const int dc = tmp[0] + 4;
const int a = dc + tmp[8];
const int b = dc - tmp[8];
const int c = MUL(tmp[4], kC2) - MUL(tmp[12], kC1);
const int d = MUL(tmp[4], kC1) + MUL(tmp[12], kC2);
STORE(0, i, a + d);
STORE(1, i, b + c);
STORE(2, i, b - c);
STORE(3, i, a - d);
tmp++;
}
}
|
pushq %rbp
pushq %rbx
xorl %eax, %eax
movswl (%rsi,%rax), %ecx
movswl 0x10(%rsi,%rax), %r9d
leal (%r9,%rcx), %r8d
subl %r9d, %ecx
movswl 0x8(%rsi,%rax), %r9d
imull $0x8a8c, %r9d, %r10d # imm = 0x8A8C
sarl $0x10, %r10d
movswl 0x18(%rsi,%rax), %r11d
imull $0x14e7b, %r11d, %ebx # imm = 0x14E7B
sarl $0x10, %ebx
subl %ebx, %r10d
imull $0x14e7b, %r9d, %r9d # imm = 0x14E7B
sarl $0x10, %r9d
imull $0x8a8c, %r11d, %r11d # imm = 0x8A8C
sarl $0x10, %r11d
addl %r9d, %r11d
leal (%r11,%r8), %r9d
movl %r9d, -0x48(%rsp,%rax,8)
leal (%r10,%rcx), %r9d
movl %r9d, -0x44(%rsp,%rax,8)
subl %r10d, %ecx
movl %ecx, -0x40(%rsp,%rax,8)
subl %r11d, %r8d
movl %r8d, -0x3c(%rsp,%rax,8)
addq $0x2, %rax
cmpl $0x8, %eax
jne 0x352b7
xorl %eax, %eax
movl -0x48(%rsp,%rax), %esi
movl -0x38(%rsp,%rax), %r10d
movl -0x28(%rsp,%rax), %r11d
leal (%rsi,%r11), %ecx
addl $0x4, %ecx
movl -0x18(%rsp,%rax), %ebx
imull $0x14e7b, %r10d, %r9d # imm = 0x14E7B
sarl $0x10, %r9d
imull $0x8a8c, %ebx, %r8d # imm = 0x8A8C
sarl $0x10, %r8d
addl %r9d, %r8d
movzbl (%rdi,%rax,8), %r9d
leal (%r8,%rcx), %ebp
sarl $0x3, %ebp
addl %r9d, %ebp
testl %ebp, %ebp
jg 0x35378
xorl %ebp, %ebp
movl $0xff, %r9d
cmpl %r9d, %ebp
jl 0x35388
movl $0xff, %ebp
addl $0x4, %esi
subl %r11d, %esi
imull $0x8a8c, %r10d, %r10d # imm = 0x8A8C
sarl $0x10, %r10d
imull $0x14e7b, %ebx, %r11d # imm = 0x14E7B
sarl $0x10, %r11d
subl %r11d, %r10d
movb %bpl, (%rdx,%rax,8)
movzbl 0x1(%rdi,%rax,8), %ebx
leal (%r10,%rsi), %r11d
sarl $0x3, %r11d
addl %ebx, %r11d
testl %r11d, %r11d
jg 0x353c3
xorl %r11d, %r11d
cmpl %r9d, %r11d
jl 0x353ce
movl $0xff, %r11d
movb %r11b, 0x1(%rdx,%rax,8)
movzbl 0x2(%rdi,%rax,8), %r11d
subl %r10d, %esi
sarl $0x3, %esi
addl %r11d, %esi
testl %esi, %esi
jg 0x353e8
xorl %esi, %esi
cmpl %r9d, %esi
jl 0x353f2
movl $0xff, %esi
movb %sil, 0x2(%rdx,%rax,8)
movzbl 0x3(%rdi,%rax,8), %esi
subl %r8d, %ecx
sarl $0x3, %ecx
addl %esi, %ecx
testl %ecx, %ecx
jg 0x3540a
xorl %ecx, %ecx
cmpl %r9d, %ecx
jl 0x35414
movl $0xff, %ecx
movb %cl, 0x3(%rdx,%rax,8)
addq $0x4, %rax
cmpq $0x10, %rax
jne 0x35331
popq %rbx
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dsp/enc.c
|
VP8SSIMFromStats
|
static WEBP_INLINE double SSIMCalculation(
const VP8DistoStats* const stats, uint32_t N /*num samples*/) {
const uint32_t w2 = N * N;
const uint32_t C1 = 20 * w2;
const uint32_t C2 = 60 * w2;
const uint32_t C3 = 8 * 8 * w2; // 'dark' limit ~= 6
const uint64_t xmxm = (uint64_t)stats->xm * stats->xm;
const uint64_t ymym = (uint64_t)stats->ym * stats->ym;
if (xmxm + ymym >= C3) {
const int64_t xmym = (int64_t)stats->xm * stats->ym;
const int64_t sxy = (int64_t)stats->xym * N - xmym; // can be negative
const uint64_t sxx = (uint64_t)stats->xxm * N - xmxm;
const uint64_t syy = (uint64_t)stats->yym * N - ymym;
// we descale by 8 to prevent overflow during the fnum/fden multiply.
const uint64_t num_S = (2 * (uint64_t)(sxy < 0 ? 0 : sxy) + C2) >> 8;
const uint64_t den_S = (sxx + syy + C2) >> 8;
const uint64_t fnum = (2 * xmym + C1) * num_S;
const uint64_t fden = (xmxm + ymym + C1) * den_S;
const double r = (double)fnum / fden;
assert(r >= 0. && r <= 1.0);
return r;
}
return 1.; // area is too dark to contribute meaningfully
}
|
movl 0x4(%rdi), %edx
movl 0x8(%rdi), %ecx
movq %rdx, %rsi
imulq %rdx, %rsi
movq %rcx, %rax
imulq %rcx, %rax
addq %rsi, %rax
cmpq $0x400000, %rax # imm = 0x400000
jae 0x35618
movsd 0x21641(%rip), %xmm0 # 0x56c58
retq
imulq %rdx, %rcx
movl 0xc(%rdi), %esi
movl 0x10(%rdi), %r8d
shlq $0x8, %r8
subq %rcx, %r8
movl 0x14(%rdi), %edx
xorl %edi, %edi
testq %r8, %r8
cmovgq %r8, %rdi
addq %rdi, %rdi
movl $0x3c0000, %r8d # imm = 0x3C0000
addq %r8, %rdi
shrq $0x8, %rdi
addq %rsi, %rdx
shlq $0x8, %rdx
subq %rax, %rdx
addq %r8, %rdx
shrq $0x8, %rdx
addq %rcx, %rcx
movl $0x140000, %esi # imm = 0x140000
addq %rsi, %rcx
imulq %rdi, %rcx
addq %rsi, %rax
imulq %rdx, %rax
movq %rcx, %xmm1
movq 0x21626(%rip), %xmm2 # 0x56ca0
punpckldq %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
movapd 0x2162a(%rip), %xmm3 # 0x56cb0
subpd %xmm3, %xmm1
movapd %xmm1, %xmm0
unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
addsd %xmm1, %xmm0
movq %rax, %xmm1
punpckldq %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
subpd %xmm3, %xmm1
movapd %xmm1, %xmm2
unpckhpd %xmm1, %xmm2 # xmm2 = xmm2[1],xmm1[1]
addsd %xmm1, %xmm2
divsd %xmm2, %xmm0
retq
|
/PKRoma[P]libwebp/src/dsp/ssim.c
|
Quantize2Blocks_SSE2
|
static WEBP_INLINE int DoQuantizeBlock_SSE2(int16_t in[16], int16_t out[16],
const uint16_t* const sharpen,
const VP8Matrix* const mtx) {
const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);
const __m128i zero = _mm_setzero_si128();
__m128i coeff0, coeff8;
__m128i out0, out8;
__m128i packed_out;
// Load all inputs.
__m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);
__m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);
const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]);
const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]);
const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]);
const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]);
// extract sign(in) (0x0000 if positive, 0xffff if negative)
const __m128i sign0 = _mm_cmpgt_epi16(zero, in0);
const __m128i sign8 = _mm_cmpgt_epi16(zero, in8);
// coeff = abs(in) = (in ^ sign) - sign
coeff0 = _mm_xor_si128(in0, sign0);
coeff8 = _mm_xor_si128(in8, sign8);
coeff0 = _mm_sub_epi16(coeff0, sign0);
coeff8 = _mm_sub_epi16(coeff8, sign8);
// coeff = abs(in) + sharpen
if (sharpen != NULL) {
const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]);
const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]);
coeff0 = _mm_add_epi16(coeff0, sharpen0);
coeff8 = _mm_add_epi16(coeff8, sharpen8);
}
// out = (coeff * iQ + B) >> QFIX
{
// doing calculations with 32b precision (QFIX=17)
// out = (coeff * iQ)
const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0);
const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0);
const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8);
const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8);
__m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H);
__m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H);
__m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);
__m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);
// out = (coeff * iQ + B)
const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]);
const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]);
const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]);
const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]);
out_00 = _mm_add_epi32(out_00, bias_00);
out_04 = _mm_add_epi32(out_04, bias_04);
out_08 = _mm_add_epi32(out_08, bias_08);
out_12 = _mm_add_epi32(out_12, bias_12);
// out = QUANTDIV(coeff, iQ, B, QFIX)
out_00 = _mm_srai_epi32(out_00, QFIX);
out_04 = _mm_srai_epi32(out_04, QFIX);
out_08 = _mm_srai_epi32(out_08, QFIX);
out_12 = _mm_srai_epi32(out_12, QFIX);
// pack result as 16b
out0 = _mm_packs_epi32(out_00, out_04);
out8 = _mm_packs_epi32(out_08, out_12);
// if (coeff > 2047) coeff = 2047
out0 = _mm_min_epi16(out0, max_coeff_2047);
out8 = _mm_min_epi16(out8, max_coeff_2047);
}
// get sign back (if (sign[j]) out_n = -out_n)
out0 = _mm_xor_si128(out0, sign0);
out8 = _mm_xor_si128(out8, sign8);
out0 = _mm_sub_epi16(out0, sign0);
out8 = _mm_sub_epi16(out8, sign8);
// in = out * Q
in0 = _mm_mullo_epi16(out0, q0);
in8 = _mm_mullo_epi16(out8, q8);
_mm_storeu_si128((__m128i*)&in[0], in0);
_mm_storeu_si128((__m128i*)&in[8], in8);
// zigzag the output before storing it.
//
// The zigzag pattern can almost be reproduced with a small sequence of
// shuffles. After it, we only need to swap the 7th (ending up in third
// position instead of twelfth) and 8th values.
{
__m128i outZ0, outZ8;
outZ0 = _mm_shufflehi_epi16(out0, _MM_SHUFFLE(2, 1, 3, 0));
outZ0 = _mm_shuffle_epi32 (outZ0, _MM_SHUFFLE(3, 1, 2, 0));
outZ0 = _mm_shufflehi_epi16(outZ0, _MM_SHUFFLE(3, 1, 0, 2));
outZ8 = _mm_shufflelo_epi16(out8, _MM_SHUFFLE(3, 0, 2, 1));
outZ8 = _mm_shuffle_epi32 (outZ8, _MM_SHUFFLE(3, 1, 2, 0));
outZ8 = _mm_shufflelo_epi16(outZ8, _MM_SHUFFLE(1, 3, 2, 0));
_mm_storeu_si128((__m128i*)&out[0], outZ0);
_mm_storeu_si128((__m128i*)&out[8], outZ8);
packed_out = _mm_packs_epi16(outZ0, outZ8);
}
{
const int16_t outZ_12 = out[12];
const int16_t outZ_3 = out[3];
out[3] = outZ_12;
out[12] = outZ_3;
}
// detect if all 'out' values are zeroes or not
return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff);
}
|
movdqu (%rdi), %xmm4
movdqu 0x10(%rdi), %xmm5
movdqu (%rdx), %xmm2
movdqu 0x10(%rdx), %xmm3
movdqu 0x20(%rdx), %xmm1
movdqu 0x30(%rdx), %xmm6
pxor %xmm7, %xmm7
psubw %xmm4, %xmm7
pmaxsw %xmm4, %xmm7
psraw $0xf, %xmm4
pxor %xmm8, %xmm8
psubw %xmm5, %xmm8
pmaxsw %xmm5, %xmm8
psraw $0xf, %xmm5
pxor %xmm0, %xmm0
movdqu 0xc0(%rdx), %xmm9
paddw %xmm7, %xmm9
movdqu 0xd0(%rdx), %xmm10
paddw %xmm8, %xmm10
movdqa %xmm9, %xmm7
pmulhuw %xmm1, %xmm7
pmullw %xmm1, %xmm9
movdqa %xmm10, %xmm1
pmulhuw %xmm6, %xmm1
pmullw %xmm6, %xmm10
movdqa %xmm9, %xmm8
punpcklwd %xmm7, %xmm8 # xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
punpckhwd %xmm7, %xmm9 # xmm9 = xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
movdqa %xmm10, %xmm11
punpcklwd %xmm1, %xmm11 # xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3]
punpckhwd %xmm1, %xmm10 # xmm10 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
movdqu 0x40(%rdx), %xmm6
paddd %xmm8, %xmm6
movdqu 0x50(%rdx), %xmm1
paddd %xmm9, %xmm1
movdqu 0x60(%rdx), %xmm7
paddd %xmm11, %xmm7
movdqu 0x70(%rdx), %xmm8
paddd %xmm10, %xmm8
psrad $0x11, %xmm6
psrad $0x11, %xmm1
packssdw %xmm1, %xmm6
psrad $0x11, %xmm7
psrad $0x11, %xmm8
packssdw %xmm8, %xmm7
movdqa 0x21841(%rip), %xmm1 # 0x58590
pminsw %xmm1, %xmm6
pminsw %xmm1, %xmm7
pxor %xmm4, %xmm6
pxor %xmm5, %xmm7
psubw %xmm4, %xmm6
psubw %xmm5, %xmm7
pmullw %xmm6, %xmm2
pmullw %xmm7, %xmm3
movdqu %xmm2, (%rdi)
movdqu %xmm3, 0x10(%rdi)
pshufhw $0xc6, %xmm6, %xmm2 # xmm2 = xmm6[0,1,2,3,6,5,4,7]
pshufd $0x6c, %xmm2, %xmm2 # xmm2 = xmm2[0,3,2,1]
pshufhw $0x39, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2,3,5,6,7,4]
pshuflw $0xc6, %xmm7, %xmm3 # xmm3 = xmm7[2,1,0,3,4,5,6,7]
pshufd $0xd8, %xmm3, %xmm3 # xmm3 = xmm3[0,2,1,3]
pshuflw $0x39, %xmm3, %xmm3 # xmm3 = xmm3[1,2,3,0,4,5,6,7]
movdqu %xmm2, (%rsi)
movdqu %xmm3, 0x10(%rsi)
packsswb %xmm3, %xmm2
movd %xmm7, %eax
pextrw $0x7, %xmm6, %ecx
movw %ax, 0x6(%rsi)
movw %cx, 0x18(%rsi)
pcmpeqb %xmm0, %xmm2
pmovmskb %xmm2, %r8d
movl $0xffff, %ecx # imm = 0xFFFF
xorl %eax, %eax
xorl %ecx, %r8d
setne %al
movdqu 0x20(%rdi), %xmm4
movdqu 0x30(%rdi), %xmm5
movdqu (%rdx), %xmm2
movdqu 0x10(%rdx), %xmm3
movdqu 0x20(%rdx), %xmm6
movdqu 0x30(%rdx), %xmm7
pxor %xmm8, %xmm8
psubw %xmm4, %xmm8
pmaxsw %xmm4, %xmm8
psraw $0xf, %xmm4
pxor %xmm9, %xmm9
psubw %xmm5, %xmm9
pmaxsw %xmm5, %xmm9
psraw $0xf, %xmm5
movdqu 0xc0(%rdx), %xmm10
paddw %xmm8, %xmm10
movdqu 0xd0(%rdx), %xmm8
paddw %xmm9, %xmm8
movdqa %xmm10, %xmm9
pmulhuw %xmm6, %xmm9
pmullw %xmm6, %xmm10
movdqa %xmm8, %xmm6
pmulhuw %xmm7, %xmm6
pmullw %xmm7, %xmm8
movdqa %xmm10, %xmm7
punpcklwd %xmm9, %xmm7 # xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
punpckhwd %xmm9, %xmm10 # xmm10 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
movdqa %xmm8, %xmm9
punpcklwd %xmm6, %xmm9 # xmm9 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
punpckhwd %xmm6, %xmm8 # xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
movdqu 0x40(%rdx), %xmm6
paddd %xmm7, %xmm6
movdqu 0x50(%rdx), %xmm11
paddd %xmm10, %xmm11
movdqu 0x60(%rdx), %xmm7
paddd %xmm9, %xmm7
movdqu 0x70(%rdx), %xmm9
paddd %xmm8, %xmm9
psrad $0x11, %xmm6
psrad $0x11, %xmm11
packssdw %xmm11, %xmm6
psrad $0x11, %xmm7
psrad $0x11, %xmm9
packssdw %xmm9, %xmm7
pminsw %xmm1, %xmm6
pminsw %xmm1, %xmm7
pxor %xmm4, %xmm6
pxor %xmm5, %xmm7
psubw %xmm4, %xmm6
psubw %xmm5, %xmm7
pmullw %xmm6, %xmm2
pmullw %xmm7, %xmm3
movdqu %xmm2, 0x20(%rdi)
movdqu %xmm3, 0x30(%rdi)
pshufhw $0xc6, %xmm6, %xmm1 # xmm1 = xmm6[0,1,2,3,6,5,4,7]
pshufd $0x6c, %xmm1, %xmm1 # xmm1 = xmm1[0,3,2,1]
pshufhw $0x39, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2,3,5,6,7,4]
pshuflw $0xc6, %xmm7, %xmm2 # xmm2 = xmm7[2,1,0,3,4,5,6,7]
pshufd $0xd8, %xmm2, %xmm2 # xmm2 = xmm2[0,2,1,3]
pshuflw $0x39, %xmm2, %xmm2 # xmm2 = xmm2[1,2,3,0,4,5,6,7]
movdqu %xmm1, 0x20(%rsi)
movdqu %xmm2, 0x30(%rsi)
packsswb %xmm2, %xmm1
movd %xmm7, %edx
pextrw $0x7, %xmm6, %edi
movw %dx, 0x26(%rsi)
movw %di, 0x38(%rsi)
pcmpeqb %xmm0, %xmm1
pmovmskb %xmm1, %edx
xorl %esi, %esi
xorl %ecx, %edx
setne %sil
leal (%rax,%rsi,2), %eax
retq
|
/PKRoma[P]libwebp/src/dsp/enc_sse2.c
|
ITransform_SSE2
|
static void ITransform_SSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst,
int do_two) {
// This implementation makes use of 16-bit fixed point versions of two
// multiply constants:
// K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
// K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
//
// To be able to use signed 16-bit integers, we use the following trick to
// have constants within range:
// - Associated constants are obtained by subtracting the 16-bit fixed point
// version of one:
// k = K - (1 << 16) => K = k + (1 << 16)
// K1 = 85267 => k1 = 20091
// K2 = 35468 => k2 = -30068
// - The multiplication of a variable by a constant become the sum of the
// variable and the multiplication of that variable by the associated
// constant:
// (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
const __m128i k1 = _mm_set1_epi16(20091);
const __m128i k2 = _mm_set1_epi16(-30068);
__m128i T0, T1, T2, T3;
// Load and concatenate the transform coefficients (we'll do two inverse
// transforms in parallel). In the case of only one inverse transform, the
// second half of the vectors will just contain random value we'll never
// use nor store.
__m128i in0, in1, in2, in3;
{
in0 = _mm_loadl_epi64((const __m128i*)&in[0]);
in1 = _mm_loadl_epi64((const __m128i*)&in[4]);
in2 = _mm_loadl_epi64((const __m128i*)&in[8]);
in3 = _mm_loadl_epi64((const __m128i*)&in[12]);
// a00 a10 a20 a30 x x x x
// a01 a11 a21 a31 x x x x
// a02 a12 a22 a32 x x x x
// a03 a13 a23 a33 x x x x
if (do_two) {
const __m128i inB0 = _mm_loadl_epi64((const __m128i*)&in[16]);
const __m128i inB1 = _mm_loadl_epi64((const __m128i*)&in[20]);
const __m128i inB2 = _mm_loadl_epi64((const __m128i*)&in[24]);
const __m128i inB3 = _mm_loadl_epi64((const __m128i*)&in[28]);
in0 = _mm_unpacklo_epi64(in0, inB0);
in1 = _mm_unpacklo_epi64(in1, inB1);
in2 = _mm_unpacklo_epi64(in2, inB2);
in3 = _mm_unpacklo_epi64(in3, inB3);
// a00 a10 a20 a30 b00 b10 b20 b30
// a01 a11 a21 a31 b01 b11 b21 b31
// a02 a12 a22 a32 b02 b12 b22 b32
// a03 a13 a23 a33 b03 b13 b23 b33
}
}
// Vertical pass and subsequent transpose.
{
// First pass, c and d calculations are longer because of the "trick"
// multiplications.
const __m128i a = _mm_add_epi16(in0, in2);
const __m128i b = _mm_sub_epi16(in0, in2);
// c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
const __m128i c1 = _mm_mulhi_epi16(in1, k2);
const __m128i c2 = _mm_mulhi_epi16(in3, k1);
const __m128i c3 = _mm_sub_epi16(in1, in3);
const __m128i c4 = _mm_sub_epi16(c1, c2);
const __m128i c = _mm_add_epi16(c3, c4);
// d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
const __m128i d1 = _mm_mulhi_epi16(in1, k1);
const __m128i d2 = _mm_mulhi_epi16(in3, k2);
const __m128i d3 = _mm_add_epi16(in1, in3);
const __m128i d4 = _mm_add_epi16(d1, d2);
const __m128i d = _mm_add_epi16(d3, d4);
// Second pass.
const __m128i tmp0 = _mm_add_epi16(a, d);
const __m128i tmp1 = _mm_add_epi16(b, c);
const __m128i tmp2 = _mm_sub_epi16(b, c);
const __m128i tmp3 = _mm_sub_epi16(a, d);
// Transpose the two 4x4.
VP8Transpose_2_4x4_16b(&tmp0, &tmp1, &tmp2, &tmp3, &T0, &T1, &T2, &T3);
}
// Horizontal pass and subsequent transpose.
{
// First pass, c and d calculations are longer because of the "trick"
// multiplications.
const __m128i four = _mm_set1_epi16(4);
const __m128i dc = _mm_add_epi16(T0, four);
const __m128i a = _mm_add_epi16(dc, T2);
const __m128i b = _mm_sub_epi16(dc, T2);
// c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
const __m128i c1 = _mm_mulhi_epi16(T1, k2);
const __m128i c2 = _mm_mulhi_epi16(T3, k1);
const __m128i c3 = _mm_sub_epi16(T1, T3);
const __m128i c4 = _mm_sub_epi16(c1, c2);
const __m128i c = _mm_add_epi16(c3, c4);
// d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
const __m128i d1 = _mm_mulhi_epi16(T1, k1);
const __m128i d2 = _mm_mulhi_epi16(T3, k2);
const __m128i d3 = _mm_add_epi16(T1, T3);
const __m128i d4 = _mm_add_epi16(d1, d2);
const __m128i d = _mm_add_epi16(d3, d4);
// Second pass.
const __m128i tmp0 = _mm_add_epi16(a, d);
const __m128i tmp1 = _mm_add_epi16(b, c);
const __m128i tmp2 = _mm_sub_epi16(b, c);
const __m128i tmp3 = _mm_sub_epi16(a, d);
const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);
const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);
const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);
const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);
// Transpose the two 4x4.
VP8Transpose_2_4x4_16b(&shifted0, &shifted1, &shifted2, &shifted3, &T0, &T1,
&T2, &T3);
}
// Add inverse transform to 'ref' and store.
{
const __m128i zero = _mm_setzero_si128();
// Load the reference(s).
__m128i ref0, ref1, ref2, ref3;
if (do_two) {
// Load eight bytes/pixels per line.
ref0 = _mm_loadl_epi64((const __m128i*)&ref[0 * BPS]);
ref1 = _mm_loadl_epi64((const __m128i*)&ref[1 * BPS]);
ref2 = _mm_loadl_epi64((const __m128i*)&ref[2 * BPS]);
ref3 = _mm_loadl_epi64((const __m128i*)&ref[3 * BPS]);
} else {
// Load four bytes/pixels per line.
ref0 = _mm_cvtsi32_si128(WebPMemToUint32(&ref[0 * BPS]));
ref1 = _mm_cvtsi32_si128(WebPMemToUint32(&ref[1 * BPS]));
ref2 = _mm_cvtsi32_si128(WebPMemToUint32(&ref[2 * BPS]));
ref3 = _mm_cvtsi32_si128(WebPMemToUint32(&ref[3 * BPS]));
}
// Convert to 16b.
ref0 = _mm_unpacklo_epi8(ref0, zero);
ref1 = _mm_unpacklo_epi8(ref1, zero);
ref2 = _mm_unpacklo_epi8(ref2, zero);
ref3 = _mm_unpacklo_epi8(ref3, zero);
// Add the inverse transform(s).
ref0 = _mm_add_epi16(ref0, T0);
ref1 = _mm_add_epi16(ref1, T1);
ref2 = _mm_add_epi16(ref2, T2);
ref3 = _mm_add_epi16(ref3, T3);
// Unsigned saturate to 8b.
ref0 = _mm_packus_epi16(ref0, ref0);
ref1 = _mm_packus_epi16(ref1, ref1);
ref2 = _mm_packus_epi16(ref2, ref2);
ref3 = _mm_packus_epi16(ref3, ref3);
// Store the results.
if (do_two) {
// Store eight bytes/pixels per line.
_mm_storel_epi64((__m128i*)&dst[0 * BPS], ref0);
_mm_storel_epi64((__m128i*)&dst[1 * BPS], ref1);
_mm_storel_epi64((__m128i*)&dst[2 * BPS], ref2);
_mm_storel_epi64((__m128i*)&dst[3 * BPS], ref3);
} else {
// Store four bytes/pixels per line.
WebPUint32ToMem(&dst[0 * BPS], _mm_cvtsi128_si32(ref0));
WebPUint32ToMem(&dst[1 * BPS], _mm_cvtsi128_si32(ref1));
WebPUint32ToMem(&dst[2 * BPS], _mm_cvtsi128_si32(ref2));
WebPUint32ToMem(&dst[3 * BPS], _mm_cvtsi128_si32(ref3));
}
}
}
|
movq (%rsi), %xmm0
movq 0x8(%rsi), %xmm3
movq 0x10(%rsi), %xmm1
movq 0x18(%rsi), %xmm4
testl %ecx, %ecx
je 0x370ac
movq 0x20(%rsi), %xmm2
punpcklqdq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0]
movq 0x28(%rsi), %xmm2
punpcklqdq %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0]
movq 0x30(%rsi), %xmm2
punpcklqdq %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0]
movq 0x38(%rsi), %xmm2
punpcklqdq %xmm2, %xmm4 # xmm4 = xmm4[0],xmm2[0]
movdqa %xmm0, %xmm5
paddw %xmm1, %xmm5
psubw %xmm1, %xmm0
movdqa 0x1e780(%rip), %xmm1 # 0x55840
movdqa %xmm3, %xmm6
pmulhw %xmm1, %xmm6
movdqa 0x1e780(%rip), %xmm2 # 0x55850
movdqa %xmm4, %xmm7
pmulhw %xmm2, %xmm7
psubw %xmm7, %xmm6
movdqa %xmm3, %xmm7
psubw %xmm4, %xmm7
paddw %xmm6, %xmm7
movdqa %xmm3, %xmm6
pmulhw %xmm2, %xmm6
paddw %xmm4, %xmm3
pmulhw %xmm1, %xmm4
paddw %xmm6, %xmm3
paddw %xmm4, %xmm3
movdqa %xmm3, %xmm8
paddw %xmm5, %xmm8
movdqa %xmm7, %xmm6
paddw %xmm0, %xmm6
psubw %xmm7, %xmm0
psubw %xmm3, %xmm5
movdqa %xmm8, %xmm4
punpcklwd %xmm6, %xmm4 # xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
movdqa %xmm0, %xmm3
punpcklwd %xmm5, %xmm3 # xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
punpckhwd %xmm6, %xmm8 # xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
punpckhwd %xmm5, %xmm0 # xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
movdqa %xmm4, %xmm6
punpckldq %xmm3, %xmm6 # xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
movdqa %xmm8, %xmm5
punpckldq %xmm0, %xmm5 # xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
punpckhdq %xmm3, %xmm4 # xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
punpckhdq %xmm0, %xmm8 # xmm8 = xmm8[2],xmm0[2],xmm8[3],xmm0[3]
movdqa %xmm6, %xmm3
punpcklqdq %xmm5, %xmm3 # xmm3 = xmm3[0],xmm5[0]
punpckhqdq %xmm5, %xmm6 # xmm6 = xmm6[1],xmm5[1]
movdqa %xmm4, %xmm0
punpcklqdq %xmm8, %xmm0 # xmm0 = xmm0[0],xmm8[0]
punpckhqdq %xmm8, %xmm4 # xmm4 = xmm4[1],xmm8[1]
paddw 0x1e6f0(%rip), %xmm3 # 0x55860
movdqa %xmm3, %xmm5
paddw %xmm0, %xmm5
psubw %xmm0, %xmm3
movdqa %xmm6, %xmm0
pmulhw %xmm1, %xmm0
movdqa %xmm4, %xmm7
pmulhw %xmm2, %xmm7
psubw %xmm7, %xmm0
movdqa %xmm6, %xmm7
psubw %xmm4, %xmm7
paddw %xmm0, %xmm7
pmulhw %xmm6, %xmm2
pmulhw %xmm4, %xmm1
paddw %xmm2, %xmm1
paddw %xmm6, %xmm4
paddw %xmm1, %xmm4
movdqa %xmm5, %xmm1
paddw %xmm4, %xmm1
movdqa %xmm3, %xmm2
paddw %xmm7, %xmm2
psubw %xmm7, %xmm3
psubw %xmm4, %xmm5
psraw $0x3, %xmm1
psraw $0x3, %xmm2
psraw $0x3, %xmm3
psraw $0x3, %xmm5
movdqa %xmm1, %xmm0
punpcklwd %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
movdqa %xmm3, %xmm4
punpcklwd %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
punpckhwd %xmm2, %xmm1 # xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
punpckhwd %xmm5, %xmm3 # xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
movdqa %xmm0, %xmm5
punpckldq %xmm4, %xmm5 # xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
movdqa %xmm1, %xmm2
punpckldq %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
punpckhdq %xmm4, %xmm0 # xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
punpckhdq %xmm3, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
movdqa %xmm5, %xmm7
punpcklqdq %xmm2, %xmm7 # xmm7 = xmm7[0],xmm2[0]
punpckhqdq %xmm2, %xmm5 # xmm5 = xmm5[1],xmm2[1]
movdqa %xmm0, %xmm6
punpcklqdq %xmm1, %xmm6 # xmm6 = xmm6[0],xmm1[0]
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
testl %ecx, %ecx
je 0x3723d
movq (%rdi), %xmm1
movq 0x20(%rdi), %xmm2
movq 0x40(%rdi), %xmm3
movq 0x60(%rdi), %xmm4
jmp 0x37250
movd (%rdi), %xmm1
movd 0x20(%rdi), %xmm2
movd 0x40(%rdi), %xmm3
movd 0x60(%rdi), %xmm4
pxor %xmm8, %xmm8
punpcklbw %xmm8, %xmm1 # xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
paddw %xmm7, %xmm1
punpcklbw %xmm8, %xmm2 # xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
paddw %xmm5, %xmm2
punpcklbw %xmm8, %xmm3 # xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
paddw %xmm6, %xmm3
punpcklbw %xmm8, %xmm4 # xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
paddw %xmm0, %xmm4
packuswb %xmm1, %xmm1
packuswb %xmm2, %xmm2
packuswb %xmm3, %xmm3
packuswb %xmm4, %xmm4
testl %ecx, %ecx
je 0x372a1
movq %xmm1, (%rdx)
movq %xmm2, 0x20(%rdx)
movq %xmm3, 0x40(%rdx)
movq %xmm4, 0x60(%rdx)
retq
movd %xmm1, (%rdx)
movd %xmm2, 0x20(%rdx)
movd %xmm3, 0x40(%rdx)
movd %xmm4, 0x60(%rdx)
retq
|
/PKRoma[P]libwebp/src/dsp/enc_sse2.c
|
AccumulateSSE_SSE2
|
static uint32_t AccumulateSSE_SSE2(const uint8_t* src1,
const uint8_t* src2, int len) {
int i = 0;
uint32_t sse2 = 0;
if (len >= 16) {
const int limit = len - 32;
int32_t tmp[4];
__m128i sum1;
__m128i sum = _mm_setzero_si128();
__m128i a0 = _mm_loadu_si128((const __m128i*)&src1[i]);
__m128i b0 = _mm_loadu_si128((const __m128i*)&src2[i]);
i += 16;
while (i <= limit) {
const __m128i a1 = _mm_loadu_si128((const __m128i*)&src1[i]);
const __m128i b1 = _mm_loadu_si128((const __m128i*)&src2[i]);
__m128i sum2;
i += 16;
SubtractAndSquare_SSE2(a0, b0, &sum1);
sum = _mm_add_epi32(sum, sum1);
a0 = _mm_loadu_si128((const __m128i*)&src1[i]);
b0 = _mm_loadu_si128((const __m128i*)&src2[i]);
i += 16;
SubtractAndSquare_SSE2(a1, b1, &sum2);
sum = _mm_add_epi32(sum, sum2);
}
SubtractAndSquare_SSE2(a0, b0, &sum1);
sum = _mm_add_epi32(sum, sum1);
_mm_storeu_si128((__m128i*)tmp, sum);
sse2 += (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
}
for (; i < len; ++i) {
const int32_t diff = src1[i] - src2[i];
sse2 += diff * diff;
}
return sse2;
}
|
xorl %eax, %eax
movl $0x0, %ecx
cmpl $0x10, %edx
jl 0x37e5b
movdqu (%rsi), %xmm1
movdqu (%rdi), %xmm4
movl $0x10, %ecx
pxor %xmm0, %xmm0
cmpl $0x30, %edx
jae 0x37d92
pxor %xmm2, %xmm2
jmp 0x37e19
leal -0x20(%rdx), %eax
pxor %xmm3, %xmm3
movl $0x10, %ecx
pxor %xmm2, %xmm2
movdqa %xmm4, %xmm5
psubusb %xmm1, %xmm5
psubusb %xmm4, %xmm1
por %xmm5, %xmm1
movdqa %xmm1, %xmm4
punpcklbw %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
punpckhbw %xmm3, %xmm1 # xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
pmaddwd %xmm4, %xmm4
paddd %xmm2, %xmm4
pmaddwd %xmm1, %xmm1
paddd %xmm4, %xmm1
movdqu (%rsi,%rcx), %xmm2
movdqu (%rdi,%rcx), %xmm4
movdqa %xmm4, %xmm5
psubusb %xmm2, %xmm5
psubusb %xmm4, %xmm2
por %xmm5, %xmm2
movdqa %xmm2, %xmm5
punpcklbw %xmm3, %xmm5 # xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
pmaddwd %xmm5, %xmm5
paddd %xmm1, %xmm5
movdqu 0x10(%rsi,%rcx), %xmm1
movdqu 0x10(%rdi,%rcx), %xmm4
addq $0x20, %rcx
punpckhbw %xmm3, %xmm2 # xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
pmaddwd %xmm2, %xmm2
paddd %xmm5, %xmm2
cmpq %rax, %rcx
jbe 0x37da2
movdqa %xmm4, %xmm3
psubusb %xmm1, %xmm3
psubusb %xmm4, %xmm1
por %xmm3, %xmm1
movdqa %xmm1, %xmm3
punpcklbw %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
punpckhbw %xmm0, %xmm1 # xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
pmaddwd %xmm3, %xmm3
paddd %xmm2, %xmm3
pmaddwd %xmm1, %xmm1
paddd %xmm3, %xmm1
pshufd $0xee, %xmm1, %xmm0 # xmm0 = xmm1[2,3,2,3]
paddd %xmm1, %xmm0
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %eax
cmpl %edx, %ecx
jge 0x37e7f
movl %ecx, %ecx
movl %edx, %edx
movzbl (%rdi,%rcx), %r8d
movzbl (%rsi,%rcx), %r9d
subl %r9d, %r8d
imull %r8d, %r8d
addl %r8d, %eax
incq %rcx
cmpq %rcx, %rdx
jne 0x37e63
retq
|
/PKRoma[P]libwebp/src/dsp/ssim_sse2.c
|
VP8EncDspInitSSE41
|
WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) {
VP8CollectHistogram = CollectHistogram_SSE41;
VP8EncQuantizeBlock = QuantizeBlock_SSE41;
VP8EncQuantize2Blocks = Quantize2Blocks_SSE41;
VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE41;
VP8TDisto4x4 = Disto4x4_SSE41;
VP8TDisto16x16 = Disto16x16_SSE41;
}
|
retq
nopl (%rax)
|
/PKRoma[P]libwebp/src/dsp/enc_sse41.c
|
CompressAlphaJob
|
static int CompressAlphaJob(void* arg1, void* unused) {
VP8Encoder* const enc = (VP8Encoder*)arg1;
const WebPConfig* config = enc->config_;
uint8_t* alpha_data = NULL;
size_t alpha_size = 0;
const int effort_level = config->method; // maps to [0..6]
const WEBP_FILTER_TYPE filter =
(config->alpha_filtering == 0) ? WEBP_FILTER_NONE :
(config->alpha_filtering == 1) ? WEBP_FILTER_FAST :
WEBP_FILTER_BEST;
if (!EncodeAlpha(enc, config->alpha_quality, config->alpha_compression,
filter, effort_level, &alpha_data, &alpha_size)) {
return 0;
}
if (alpha_size != (uint32_t)alpha_size) { // Soundness check.
WebPSafeFree(alpha_data);
return 0;
}
enc->alpha_data_size_ = (uint32_t)alpha_size;
enc->alpha_data_ = alpha_data;
(void)unused;
return 1;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x248, %rsp # imm = 0x248
movq (%rdi), %rcx
movq 0x8(%rdi), %rdx
movl 0x8(%rcx), %eax
movq %rax, 0x28(%rsp)
movl 0x30(%rcx), %r8d
movl 0x34(%rcx), %eax
movl 0x38(%rcx), %r15d
movslq 0x8(%rdx), %rbx
movq %rdx, 0x20(%rsp)
movl 0xc(%rdx), %r12d
movq $0x0, 0x38(%rsp)
movl $0x0, 0xc(%rsp)
cmpl $0x64, %r15d
seta %cl
setl %bpl
cmpl $0x2, %r8d
setae %dl
orb %cl, %dl
jne 0x38590
movq %rdi, %r13
movl %r12d, %ecx
imull %ebx, %ecx
xorl %r14d, %r14d
cmpl $0x1, %eax
sete %r14b
addl $0x5, %r14d
testl %eax, %eax
cmovel %eax, %r14d
movslq %ecx, %rsi
testl %r8d, %r8d
cmovel %r8d, %r14d
movl $0x1, %edi
movl %r8d, 0x1c(%rsp)
movq %rsi, 0x48(%rsp)
callq 0x3145c
testq %rax, %rax
je 0x38590
movq %r13, 0x30(%rsp)
movl %ebx, %r13d
movq 0x20(%rsp), %rcx
movq 0x30(%rcx), %rdi
movl 0x38(%rcx), %esi
movq %rax, (%rsp)
movq %rax, %rdx
movl %ebx, %ecx
movl %r13d, %r8d
movl %r12d, %r9d
callq 0x314c8
cmpl $0x63, %r15d
jg 0x382ed
cmpl $0x46, %r15d
ja 0x382c9
movzbl %r15b, %eax
imull $0xcd, %eax, %eax
shrl $0xa, %eax
addb $0x2, %al
movzbl %al, %ecx
jmp 0x382d1
leal -0x220(,%r15,8), %ecx
movq (%rsp), %rdi
leaq 0x38(%rsp), %r8
movl %r13d, %esi
movl %r12d, %edx
callq 0x44fbc
testl %eax, %eax
je 0x38587
movl 0xc(%rsp), %eax
movb %bpl, %al
movl %eax, 0xc(%rsp)
callq 0x157fc
movq 0x20(%rsp), %rax
movq 0x80(%rax), %r15
cmpl $0x6, %r14d
je 0x38337
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
leaq 0x58(%rsp), %rdi
testl %r14d, %r14d
jne 0x38452
movq %rax, -0x8(%rdi)
movq %rdi, 0x10(%rsp)
xorl %esi, %esi
callq 0x443f9
jmp 0x3841b
leaq 0x50(%rsp), %rdi
movl $0x100, %edx # imm = 0x100
xorl %esi, %esi
callq 0x40c0
testl %r12d, %r12d
jle 0x38375
xorl %eax, %eax
movq (%rsp), %rcx
testl %ebx, %ebx
jle 0x3836a
xorl %edx, %edx
movzbl (%rcx,%rdx), %esi
movb $0x1, 0x50(%rsp,%rsi)
incq %rdx
cmpq %rdx, %r13
jne 0x38359
incq %rax
addq %rbx, %rcx
cmpq %r12, %rax
jne 0x38353
pxor %xmm0, %xmm0
xorl %eax, %eax
pcmpeqd %xmm2, %xmm2
movdqa 0x20309(%rip), %xmm3 # 0x58690
pxor %xmm1, %xmm1
movd 0x50(%rsp,%rax), %xmm4
pcmpeqb %xmm0, %xmm4
pxor %xmm2, %xmm4
punpcklbw %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1,2,2,3,3]
pand %xmm3, %xmm4
paddd %xmm4, %xmm1
addq $0x4, %rax
cmpq $0x100, %rax # imm = 0x100
jne 0x3838b
pshufd $0xee, %xmm1, %xmm0 # xmm0 = xmm1[2,3,2,3]
paddd %xmm1, %xmm0
pshufd $0x55, %xmm0, %xmm1 # xmm1 = xmm0[1,1,1,1]
paddd %xmm0, %xmm1
movd %xmm1, %ebp
xorl %eax, %eax
cmpl $0x11, %ebp
jb 0x383e3
movq (%rsp), %rdi
movl %r13d, %esi
movl %r12d, %edx
movl %ebx, %ecx
callq 0x43a14
cmpl $0x4, 0x28(%rsp)
setge %cl
cmpl $0xc1, %ebp
setae %dl
orb %cl, %dl
movzbl %dl, %ebp
btsl %eax, %ebp
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
leaq 0x58(%rsp), %rdi
movq %rax, -0x8(%rdi)
movq %rdi, 0x10(%rsp)
xorl %esi, %esi
callq 0x443f9
cmpl $0x1, %ebp
jne 0x38467
subq $0x8, %rsp
leaq 0x58(%rsp), %rax
movq 0x8(%rsp), %rdi
movl %ebx, %esi
movl %r12d, %edx
movl 0x24(%rsp), %ecx
xorl %r8d, %r8d
movl 0x14(%rsp), %r9d
pushq %rax
pushq $0x0
pushq 0x40(%rsp)
callq 0x38717
addq $0x20, %rsp
movl %eax, %r14d
jmp 0x38530
movq %rax, -0x8(%rdi)
movq %rdi, 0x10(%rsp)
xorl %esi, %esi
callq 0x443f9
movl $0xf, %ebp
movq %r15, 0x40(%rsp)
movl $0x1, %edi
movq 0x48(%rsp), %rsi
callq 0x3145c
xorl %ebx, %ebx
testq %rax, %rax
je 0x385a6
movq %rax, %r15
movq (%rsp), %rdi
movl $0x1, %r14d
testb $0x1, %bpl
je 0x38511
subq $0x8, %rsp
movl %r13d, %esi
movl %r12d, %edx
movl 0x24(%rsp), %ecx
movl %ebx, %r8d
movl 0x14(%rsp), %r9d
leaq 0x158(%rsp), %rax
pushq %rax
pushq %r15
pushq 0x40(%rsp)
callq 0x38717
addq $0x20, %rsp
movl %eax, %r14d
testl %eax, %eax
je 0x38500
movq 0x150(%rsp), %rax
cmpq 0x50(%rsp), %rax
jae 0x38500
movq 0x10(%rsp), %rdi
callq 0x44535
movl $0xf8, %edx
leaq 0x50(%rsp), %rdi
leaq 0x150(%rsp), %rsi
callq 0x45d0
jmp 0x3850d
leaq 0x158(%rsp), %rdi
callq 0x44535
movq (%rsp), %rdi
cmpl $0x2, %ebp
jb 0x38523
incl %ebx
shrl %ebp
testl %r14d, %r14d
jne 0x3848d
movq %r15, %rdi
callq 0x314a8
movq 0x40(%rsp), %r15
testl %r14d, %r14d
movq 0x30(%rsp), %r12
je 0x38573
testq %r15, %r15
je 0x38561
movups 0x11c(%rsp), %xmm0
movups %xmm0, 0x94(%r15)
movdqu 0x12c(%rsp), %xmm0
movdqu %xmm0, 0xa4(%r15)
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rbx
movq 0x18(%rax), %r14
xorl %r15d, %r15d
jmp 0x385b1
movq 0x10(%rsp), %rdi
callq 0x44535
movb $0x1, %r15b
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp 0x385b1
movq (%rsp), %rdi
callq 0x314a8
xorl %ebp, %ebp
movl %ebp, %eax
addq $0x248, %rsp # imm = 0x248
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movb $0x1, %r15b
xorl %r14d, %r14d
movq 0x30(%rsp), %r12
movq (%rsp), %rdi
movq 0x20(%rsp), %rax
movq 0x80(%rax), %rax
testq %rax, %rax
je 0x385d6
addl %r14d, (%rax)
movq 0x38(%rsp), %rax
movq %rax, 0x5bf0(%r12)
callq 0x314a8
xorl %ebp, %ebp
testb %r15b, %r15b
jne 0x38592
movq %r14, %rax
shrq $0x20, %rax
jne 0x38602
movl %r14d, 0x228(%r12)
movq %rbx, 0x220(%r12)
movl $0x1, %ebp
jmp 0x38592
movq %rbx, %rdi
callq 0x314a8
jmp 0x38592
|
/PKRoma[P]libwebp/src/enc/alpha_enc.c
|
VP8EncDeleteAlpha
|
int VP8EncDeleteAlpha(VP8Encoder* const enc) {
int ok = 1;
if (enc->thread_level_ > 0) {
WebPWorker* const worker = &enc->alpha_worker_;
// finish anything left in flight
ok = WebPGetWorkerInterface()->Sync(worker);
// still need to end the worker, even if !ok
WebPGetWorkerInterface()->End(worker);
}
WebPSafeFree(enc->alpha_data_);
enc->alpha_data_ = NULL;
enc->alpha_data_size_ = 0;
enc->has_alpha_ = 0;
return ok;
}
|
pushq %rbp
pushq %r14
pushq %rbx
movq %rdi, %rbx
cmpl $0x0, 0x5c50(%rdi)
jle 0x386f5
leaq 0x230(%rbx), %r14
callq 0x311c6
movq %r14, %rdi
callq *0x10(%rax)
movl %eax, %ebp
callq 0x311c6
movq %r14, %rdi
callq *0x28(%rax)
jmp 0x386fa
movl $0x1, %ebp
movq 0x220(%rbx), %rdi
callq 0x314a8
xorps %xmm0, %xmm0
movups %xmm0, 0x21c(%rbx)
movl %ebp, %eax
popq %rbx
popq %r14
popq %rbp
retq
|
/PKRoma[P]libwebp/src/enc/alpha_enc.c
|
VP8EncAnalyze
|
int VP8EncAnalyze(VP8Encoder* const enc) {
int ok = 1;
const int do_segments =
enc->config_->emulate_jpeg_size || // We need the complexity evaluation.
(enc->segment_hdr_.num_segments_ > 1) ||
(enc->method_ <= 1); // for method 0 - 1, we need preds_[] to be filled.
if (do_segments) {
const int last_row = enc->mb_h_;
// We give a little more than a half work to the main thread.
const int split_row = (9 * last_row + 15) >> 4;
const int total_mb = last_row * enc->mb_w_;
#ifdef WEBP_USE_THREAD
const int kMinSplitRow = 2; // minimal rows needed for mt to be worth it
const int do_mt = (enc->thread_level_ > 0) && (split_row >= kMinSplitRow);
#else
const int do_mt = 0;
#endif
const WebPWorkerInterface* const worker_interface =
WebPGetWorkerInterface();
SegmentJob main_job;
if (do_mt) {
SegmentJob side_job;
// Note the use of '&' instead of '&&' because we must call the functions
// no matter what.
InitSegmentJob(enc, &main_job, 0, split_row);
InitSegmentJob(enc, &side_job, split_row, last_row);
// we don't need to call Reset() on main_job.worker, since we're calling
// WebPWorkerExecute() on it
ok &= worker_interface->Reset(&side_job.worker);
// launch the two jobs in parallel
if (ok) {
worker_interface->Launch(&side_job.worker);
worker_interface->Execute(&main_job.worker);
ok &= worker_interface->Sync(&side_job.worker);
ok &= worker_interface->Sync(&main_job.worker);
}
worker_interface->End(&side_job.worker);
if (ok) MergeJobs(&side_job, &main_job); // merge results together
} else {
// Even for single-thread case, we use the generic Worker tools.
InitSegmentJob(enc, &main_job, 0, last_row);
worker_interface->Execute(&main_job.worker);
ok &= worker_interface->Sync(&main_job.worker);
}
worker_interface->End(&main_job.worker);
if (ok) {
enc->alpha_ = main_job.alpha / total_mb;
enc->uv_alpha_ = main_job.uv_alpha / total_mb;
AssignSegments(enc, main_job.alphas);
}
} else { // Use only one default segment.
ResetAllMBInfo(enc);
}
return ok;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2718, %rsp # imm = 0x2718
movq %rdi, %r13
movq (%rdi), %rax
cmpl $0x0, 0x50(%rax)
jne 0x389f6
cmpl $0x1, 0x20(%r13)
jg 0x389f6
cmpl $0x1, 0x5c40(%r13)
jg 0x39155
movl 0x34(%r13), %r15d
movl 0x30(%r13), %ebx
imull %r15d, %ebx
movl 0x5c50(%r13), %ebp
callq 0x311c6
movq %rax, %r14
testl %ebp, %ebp
movq %r13, 0x40(%rsp)
jle 0x38a7d
leal (%r15,%r15,8), %r12d
addl $0xf, %r12d
sarl $0x4, %r12d
cmpl $0x2, %r12d
jl 0x38a7d
leaq 0x88(%rsp), %rsi
xorl %ebp, %ebp
movq %r13, %rdi
xorl %edx, %edx
movl %r12d, %ecx
callq 0x391d5
movq %r13, %rdi
leaq 0x13d0(%rsp), %r13
movq %r13, %rsi
movl %r12d, %edx
movl %r15d, %ecx
callq 0x391d5
movq %r13, %rdi
callq *0x8(%r14)
testb $0x1, %al
jne 0x38aad
leaq 0x13d0(%rsp), %rdi
callq *0x28(%r14)
movq 0x40(%rsp), %r13
jmp 0x38b43
leaq 0x88(%rsp), %r12
movq %r13, %rdi
movq %r12, %rsi
xorl %edx, %edx
movl %r15d, %ecx
callq 0x391d5
movq %r12, %rdi
callq *0x20(%r14)
movq %r12, %rdi
callq *0x10(%r14)
movl %eax, %ebp
andl $0x1, %ebp
jmp 0x38b43
leaq 0x13d0(%rsp), %r12
movq %r12, %rdi
callq *0x18(%r14)
leaq 0x88(%rsp), %r15
movq %r15, %rdi
callq *0x20(%r14)
movq %r12, %rdi
callq *0x10(%r14)
movl %eax, %r13d
movq %r15, %rdi
callq *0x10(%r14)
movl %eax, %r15d
andl %r13d, %r15d
movq %r12, %rdi
callq *0x28(%r14)
andl $0x1, %r15d
movq 0x40(%rsp), %r13
je 0x38b43
xorl %eax, %eax
movdqu 0x1400(%rsp,%rax,4), %xmm0
movdqu 0xb8(%rsp,%rax,4), %xmm1
paddd %xmm0, %xmm1
movdqu %xmm1, 0xb8(%rsp,%rax,4)
addq $0x4, %rax
cmpq $0x100, %rax # imm = 0x100
jne 0x38af6
movq 0x1800(%rsp), %xmm0
movq 0x4b8(%rsp), %xmm1
paddd %xmm0, %xmm1
movq %xmm1, 0x4b8(%rsp)
movl %r15d, %ebp
leaq 0x88(%rsp), %rdi
callq *0x28(%r14)
testl %ebp, %ebp
je 0x391c1
movl 0x4b8(%rsp), %eax
cltd
idivl %ebx
movl %eax, 0xe04(%r13)
movl 0x4bc(%rsp), %eax
cltd
idivl %ebx
movl %eax, 0xe08(%r13)
movl 0x20(%r13), %eax
cmpl $0x4, %eax
movl $0x4, %r12d
movl %eax, 0x10(%rsp)
cmovll %eax, %r12d
xorl %r11d, %r11d
cmpl $0x0, 0xb8(%rsp,%r11,4)
jne 0x38bba
incq %r11
cmpq $0x100, %r11 # imm = 0x100
jne 0x38b91
movl $0xff, %eax
movq %rax, 0x8(%rsp)
movl $0x100, %r11d # imm = 0x100
jmp 0x38bfd
cmpq $0xff, %r11
jae 0x38bf3
movl $0xff, %eax
movq %rax, 0x8(%rsp)
movq 0x8(%rsp), %rax
cmpl $0x0, 0xb8(%rsp,%rax,4)
jne 0x38bfd
movq 0x8(%rsp), %rcx
decq %rcx
movq %rcx, 0x8(%rsp)
cmpq %r11, %rcx
ja 0x38bcd
movl %r11d, %eax
jmp 0x38bf8
movl $0xff, %eax
movq %rax, 0x8(%rsp)
movl %ebp, 0x1c(%rsp)
cmpl $0x0, 0x10(%rsp)
jle 0x38ce2
movq 0x8(%rsp), %rdx
movl %edx, %ecx
subl %r11d, %ecx
leal (%r12,%r12), %edi
movl %r12d, %ebp
leaq -0x1(%rbp), %rax
movq %rax, %xmm0
pshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
leal 0x4(,%r12,4), %r8d
andl $0x38, %r8d
leal (%rdx,%rdx,2), %esi
leal (%r11,%r11,2), %eax
subl %eax, %esi
leal (,%rdx,4), %r9d
leal (,%r11,4), %eax
subl %eax, %r9d
movdqa 0x1b965(%rip), %xmm1 # 0x545c0
xorl %r10d, %r10d
movdqa 0x1bb4a(%rip), %xmm2 # 0x547b0
pxor %xmm2, %xmm0
pcmpeqd %xmm3, %xmm3
movdqa 0x1bb6a(%rip), %xmm4 # 0x547e0
movdqa %xmm1, %xmm5
pxor %xmm2, %xmm5
movdqa %xmm5, %xmm6
pcmpgtd %xmm0, %xmm6
pcmpeqd %xmm0, %xmm5
pshufd $0xf5, %xmm5, %xmm7 # xmm7 = xmm5[1,1,3,3]
pand %xmm6, %xmm7
pshufd $0xf5, %xmm6, %xmm5 # xmm5 = xmm6[1,1,3,3]
por %xmm7, %xmm5
movd %xmm5, %eax
notl %eax
testb $0x1, %al
je 0x38cb3
movl %ecx, %eax
cltd
idivl %edi
addl %r11d, %eax
movl %eax, 0x20(%rsp,%r10)
pxor %xmm3, %xmm5
pextrw $0x4, %xmm5, %eax
testb $0x1, %al
je 0x38ccd
movl %esi, %eax
cltd
idivl %edi
addl %r11d, %eax
movl %eax, 0x24(%rsp,%r10)
paddq %xmm4, %xmm1
addq $0x8, %r10
addl %r9d, %esi
addl %r9d, %ecx
cmpq %r10, %r8
jne 0x38c76
jmp 0x38ce5
movl %r12d, %ebp
leal -0x1(%r12), %eax
leaq 0x4(,%rax,4), %rax
movq %rax, 0x50(%rsp)
movslq %r12d, %r15
movl %r11d, %eax
movq %rax, 0x48(%rsp)
movq 0x8(%rsp), %rax
cmpl %r11d, %eax
movl %r11d, %r13d
cmovgl %eax, %r13d
incl %r13d
xorl %r14d, %r14d
movq %r11, 0x58(%rsp)
movq 0x50(%rsp), %rbx
cmpl $0x0, 0x10(%rsp)
jle 0x38d4b
leaq 0x70(%rsp), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq 0x40c0
leaq 0x60(%rsp), %rdi
xorl %esi, %esi
movq %rbx, %rdx
callq 0x40c0
movq 0x58(%rsp), %r11
cmpl %r11d, 0x8(%rsp)
jl 0x38dcf
xorl %ecx, %ecx
movq 0x48(%rsp), %rax
movl 0xb8(%rsp,%rax,4), %edx
testl %edx, %edx
je 0x38dc7
movslq %ecx, %rsi
leal 0x1(%rsi), %edi
cmpl %edi, %r12d
cmovgl %r12d, %edi
decl %edi
leal -0x1(%rsi), %ecx
incq %rsi
cmpq %r15, %rsi
jge 0x38dae
movl %eax, %r8d
subl 0x20(%rsp,%rsi,4), %r8d
movl %r8d, %r9d
negl %r9d
cmovsl %r8d, %r9d
movl %eax, %r8d
subl 0x1c(%rsp,%rsi,4), %r8d
movl %r8d, %r10d
negl %r10d
cmovsl %r8d, %r10d
incl %ecx
incq %rsi
cmpl %r10d, %r9d
jb 0x38d79
jmp 0x38db0
movl %edi, %ecx
movl %edx, %esi
imull %eax, %esi
movslq %ecx, %rdi
addl %esi, 0x60(%rsp,%rdi,4)
movl %ecx, 0x13d0(%rsp,%rax,4)
addl %edx, 0x70(%rsp,%rdi,4)
incq %rax
cmpq %r13, %rax
jne 0x38d59
cmpl $0x0, 0x10(%rsp)
jle 0x38e4b
xorl %edi, %edi
xorl %ecx, %ecx
xorl %r8d, %r8d
xorl %esi, %esi
movl 0x70(%rsp,%rdi,4), %r9d
testl %r9d, %r9d
je 0x38e1c
movl %r9d, %eax
shrl $0x1f, %eax
addl %r9d, %eax
sarl %eax
addl 0x60(%rsp,%rdi,4), %eax
cltd
idivl %r9d
movl 0x20(%rsp,%rdi,4), %edx
subl %eax, %edx
movl %edx, %r10d
negl %r10d
cmovsl %edx, %r10d
addl %r10d, %r8d
movl %eax, 0x20(%rsp,%rdi,4)
imull %r9d, %eax
addl %eax, %ecx
addl %r9d, %esi
incq %rdi
cmpq %rdi, %rbp
jne 0x38ddf
cmpl $0x5, %r8d
jl 0x38e37
incl %r14d
cmpl $0x6, %r14d
jne 0x38d21
movl %esi, %eax
shrl $0x1f, %eax
addl %esi, %eax
sarl %eax
addl %eax, %ecx
movl %ecx, %eax
cltd
idivl %esi
movl %eax, 0x18(%rsp)
movq 0x40(%rsp), %r13
movl 0x30(%r13), %r14d
movl 0x34(%r13), %r15d
movl %r15d, %ecx
imull %r14d, %ecx
testl %ecx, %ecx
jle 0x38eb4
xorl %eax, %eax
movq 0x5c60(%r13), %rcx
movzbl 0x1(%rcx,%rax,4), %edx
movslq 0x13d0(%rsp,%rdx,4), %rdx
movb (%rcx,%rax,4), %sil
movl %edx, %edi
shlb $0x5, %dil
andb $0x60, %dil
andb $-0x61, %sil
orb %dil, %sil
movb %sil, (%rcx,%rax,4)
movb 0x20(%rsp,%rdx,4), %dl
movb %dl, 0x1(%rcx,%rax,4)
incq %rax
movl 0x30(%r13), %r14d
movl 0x34(%r13), %r15d
movl %r15d, %ecx
imull %r14d, %ecx
movslq %ecx, %rdx
cmpq %rdx, %rax
jl 0x38e65
cmpl $0x2, 0x10(%rsp)
movl 0x1c(%rsp), %ebp
jl 0x39097
movq (%r13), %rax
testb $0x1, 0x44(%rax)
je 0x39097
movslq %ecx, %rdi
movl $0x1, %esi
callq 0x3145c
testq %rax, %rax
je 0x39097
cmpl $0x3, %r15d
jl 0x3908f
decl %r15d
leal -0x1(%r14), %ecx
movl %r14d, %edx
negl %edx
movl %r14d, %esi
notl %esi
movslq %esi, %rsi
movslq %edx, %rdi
movl $0x1, %edx
subl %r14d, %edx
movslq %edx, %r8
movslq %ecx, %r9
movslq %r14d, %r10
movl %r10d, %edx
movq %rdx, 0x8(%rsp)
movl $0x1, %edx
movq %rdx, 0x10(%rsp)
pxor %xmm0, %xmm0
cmpl $0x3, %r14d
jl 0x39023
movq 0x10(%rsp), %rbx
imulq 0x8(%rsp), %rbx
movl $0x1, %r12d
movdqa %xmm0, 0x30(%rsp)
movq 0x5c60(%r13), %rdx
leaq (%r12,%rbx), %r13
leaq (%rdx,%r13,4), %rdx
movb (%rdx), %bpl
movzbl (%rdx,%rsi,4), %r11d
shrl $0x3, %r11d
andl $0xc, %r11d
incl 0x30(%rsp,%r11)
movzbl (%rdx,%rdi,4), %r11d
shrl $0x3, %r11d
andl $0xc, %r11d
incl 0x30(%rsp,%r11)
movzbl (%rdx,%r8,4), %r11d
shrl $0x3, %r11d
andl $0xc, %r11d
incl 0x30(%rsp,%r11)
movzbl -0x4(%rdx), %r11d
shrl $0x3, %r11d
andl $0xc, %r11d
incl 0x30(%rsp,%r11)
movzbl 0x4(%rdx), %r11d
shrl $0x3, %r11d
andl $0xc, %r11d
incl 0x30(%rsp,%r11)
movzbl (%rdx,%r9,4), %r11d
shrl $0x3, %r11d
andl $0xc, %r11d
incl 0x30(%rsp,%r11)
shrb $0x5, %bpl
movzbl (%rdx,%r10,4), %r11d
shrl $0x3, %r11d
andl $0xc, %r11d
incl 0x30(%rsp,%r11)
andb $0x3, %bpl
movzbl 0x4(%rdx,%r10,4), %edx
shrl $0x3, %edx
andl $0xc, %edx
incl 0x30(%rsp,%rdx)
xorl %edx, %edx
cmpl $0x5, 0x30(%rsp,%rdx,4)
jge 0x3900c
incq %rdx
cmpq $0x4, %rdx
jne 0x38ffa
jmp 0x3900e
movl %edx, %ebp
movb %bpl, (%rax,%r13)
incq %r12
cmpq %rcx, %r12
movq 0x40(%rsp), %r13
jne 0x38f4a
movq 0x10(%rsp), %rdx
incq %rdx
movq %rdx, 0x10(%rsp)
cmpq %r15, %rdx
jne 0x38f2f
decq %rcx
movq 0x8(%rsp), %rbx
leaq 0x1(%rbx), %rdx
movl $0x1, %esi
movl 0x1c(%rsp), %ebp
cmpl $0x3, %r14d
jl 0x39084
movq %rdx, %rdi
movq %rcx, %r8
movq 0x5c60(%r13), %r9
movb (%rax,%rdi), %r10b
movb (%r9,%rdi,4), %r11b
shlb $0x5, %r10b
andb $0x60, %r10b
andb $-0x61, %r11b
orb %r10b, %r11b
movb %r11b, (%r9,%rdi,4)
incq %rdi
decq %r8
jne 0x3905a
incq %rsi
addq %rbx, %rdx
cmpq %r15, %rsi
jne 0x3904e
movq %rax, %rdi
callq 0x314a8
movslq 0x20(%r13), %rsi
movl 0x20(%rsp), %edi
cmpq $0x2, %rsi
jl 0x390c6
xorl %ecx, %ecx
movl %edi, %eax
movl 0x18(%rsp), %r11d
movl 0x20(%rsp,%rcx,4), %edx
cmpl %edx, %edi
cmovgel %edx, %edi
cmpl %edx, %eax
cmovlel %edx, %eax
incq %rcx
cmpq %rcx, %rsi
jne 0x390ae
jmp 0x390cd
movl %edi, %eax
movl 0x18(%rsp), %r11d
testl %esi, %esi
jle 0x391c1
leal 0x1(%rdi), %r8d
cmpl %edi, %eax
cmovnel %eax, %r8d
subl %edi, %r8d
addq $0x504, %r13 # imm = 0x504
xorl %r9d, %r9d
movl 0x20(%rsp,%r9,4), %r10d
movl %r10d, %ecx
subl %r11d, %ecx
movl %ecx, %eax
shll $0x8, %eax
subl %ecx, %eax
cltd
idivl %r8d
movl %eax, %ecx
subl %edi, %r10d
movl %r10d, %eax
shll $0x8, %eax
subl %r10d, %eax
cltd
idivl %r8d
cmpl $0x7f, %ecx
jl 0x3911e
movl $0x7f, %ecx
cmpl $-0x7e, %ecx
jge 0x39128
movl $0xffffff81, %ecx # imm = 0xFFFFFF81
movl %ecx, -0x4(%r13)
movl $0xff, %ecx
cmpl %ecx, %eax
jl 0x3913a
movl $0xff, %eax
testl %eax, %eax
jg 0x39140
xorl %eax, %eax
movl %eax, (%r13)
incq %r9
addq $0x2e8, %r13 # imm = 0x2E8
cmpq %r9, %rsi
jne 0x390ec
jmp 0x391c1
movl 0x34(%r13), %eax
imull 0x30(%r13), %eax
testl %eax, %eax
jle 0x3918f
xorl %eax, %eax
movq 0x5c60(%r13), %rcx
movb (%rcx,%rax,4), %dl
andb $-0x80, %dl
incb %dl
movb %dl, (%rcx,%rax,4)
movb $0x0, 0x1(%rcx,%rax,4)
incq %rax
movslq 0x30(%r13), %rcx
movslq 0x34(%r13), %rdx
imulq %rcx, %rdx
cmpq %rdx, %rax
jl 0x39164
xorl %eax, %eax
movq %rax, 0x500(%r13)
movq %rax, 0xe04(%r13)
movq 0x8(%r13), %rdi
movl 0x218(%r13), %esi
addq $0x218, %r13 # imm = 0x218
addl $0x14, %esi
movq %r13, %rdx
callq 0x2fb53
movl $0x1, %ebp
movl %ebp, %eax
addq $0x2718, %rsp # imm = 0x2718
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/enc/analysis_enc.c
|
DoSegmentsJob
|
static int DoSegmentsJob(void* arg1, void* arg2) {
SegmentJob* const job = (SegmentJob*)arg1;
VP8EncIterator* const it = (VP8EncIterator*)arg2;
int ok = 1;
if (!VP8IteratorIsDone(it)) {
uint8_t tmp[32 + WEBP_ALIGN_CST];
uint8_t* const scratch = (uint8_t*)WEBP_ALIGN(tmp);
do {
// Let's pretend we have perfect lossless reconstruction.
VP8IteratorImport(it, scratch);
MBAnalyze(it, job->alphas, &job->alpha, &job->uv_alpha);
ok = VP8IteratorProgress(it, job->delta_progress);
} while (ok && VP8IteratorNext(it));
}
return ok;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movq %rsi, %rbx
movq %rdi, %r14
movq %rsi, %rdi
callq 0x3b533
movl $0x1, %ebp
testl %eax, %eax
jne 0x395b7
leaq 0x8f(%rsp), %rax
andq $-0x20, %rax
movq %rax, 0x10(%rsp)
leaq 0x31ca5(%rip), %r12 # 0x6af48
leaq 0x1f516(%rip), %r13 # 0x587c0
movq %r14, 0x18(%rsp)
movq %rbx, %rdi
movq 0x10(%rsp), %rsi
callq 0x3b628
movq 0x28(%rbx), %r14
movq %rbx, %rdi
xorl %esi, %esi
callq 0x3bf71
movq %rbx, %rdi
xorl %esi, %esi
callq 0x3bfe5
movq %rbx, %rdi
xorl %esi, %esi
callq 0x3bffc
cmpl $0x1, 0x5c40(%r14)
jg 0x393cd
movq 0x28(%rbx), %rax
movq (%rax), %rax
cvttss2si 0x4(%rax), %r13d
movq $-0x4, %r14
xorl %r15d, %r15d
leaq 0x20(%rsp), %rbp
movq 0x8(%rbx), %rdi
addq %r15, %rdi
movq %rbp, %rsi
callq *(%r12)
addq $0x4, %r14
addq $0x10, %rbp
subq $-0x80, %r15
cmpq $0xc, %r14
jb 0x39308
pxor %xmm1, %xmm1
xorl %eax, %eax
pxor %xmm0, %xmm0
movdqa 0x20(%rsp,%rax,4), %xmm2
paddd %xmm2, %xmm0
pshufd $0xf5, %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
pmuludq %xmm2, %xmm2
pshufd $0xe8, %xmm2, %xmm2 # xmm2 = xmm2[0,2,2,3]
pmuludq %xmm3, %xmm3
pshufd $0xe8, %xmm3, %xmm3 # xmm3 = xmm3[0,2,2,3]
punpckldq %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
paddd %xmm2, %xmm1
addq $0x4, %rax
cmpq $0x10, %rax
jne 0x39332
pshufd $0xee, %xmm1, %xmm2 # xmm2 = xmm1[2,3,2,3]
paddd %xmm1, %xmm2
pshufd $0x55, %xmm2, %xmm1 # xmm1 = xmm2[1,1,1,1]
paddd %xmm2, %xmm1
movd %xmm1, %eax
pshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
paddd %xmm0, %xmm1
pshufd $0x55, %xmm1, %xmm0 # xmm0 = xmm1[1,1,1,1]
paddd %xmm1, %xmm0
movd %xmm0, %ecx
leal (%r13,%r13,8), %edx
movslq %edx, %rdx
imulq $0x51eb851f, %rdx, %rdx # imm = 0x51EB851F
movq %rdx, %rsi
sarq $0x25, %rsi
shrq $0x3f, %rdx
addl %esi, %edx
addl $0x8, %edx
imull %eax, %edx
imull %ecx, %ecx
cmpl %ecx, %edx
jae 0x39466
movq %rbx, %rdi
xorl %esi, %esi
callq 0x3bf71
jmp 0x3947d
movq %rbx, %rdi
callq 0x3c867
movl $0xffffffff, %r15d # imm = 0xFFFFFFFF
xorl %r14d, %r14d
movb $0x1, %r12b
xorl %ebp, %ebp
movabsq $0x100000000, %rax # imm = 0x100000000
movq %rax, 0x20(%rsp)
movq 0x8(%rbx), %rdi
movzwl (%r13,%r14,2), %esi
addq 0x20(%rbx), %rsi
xorl %edx, %edx
movl $0x10, %ecx
leaq 0x20(%rsp), %r8
leaq 0x31ac5(%rip), %rax # 0x6aed8
callq *(%rax)
movl 0x20(%rsp), %ecx
movl $0x0, %eax
cmpl $0x2, %ecx
jl 0x39432
movl 0x24(%rsp), %eax
leal (%rax,%rax), %edx
shll $0x9, %eax
subl %edx, %eax
cltd
idivl %ecx
cmpl %r15d, %eax
cmovgl %eax, %r15d
cmovgl %r14d, %ebp
movl $0x1, %r14d
testb $0x1, %r12b
movl $0x0, %r12d
jne 0x393e3
movq %rbx, %rdi
movl %ebp, %esi
callq 0x3bf71
leal (%r15,%r15,2), %eax
addl $0x2, %eax
movl %eax, 0xc(%rsp)
jmp 0x39485
pxor %xmm0, %xmm0
movdqa %xmm0, 0x60(%rsp)
movq %rbx, %rdi
leaq 0x60(%rsp), %rsi
callq 0x3bfa3
movl $0x2, 0xc(%rsp)
movq %rbx, %rdi
callq 0x3c898
xorl %r15d, %r15d
movl $0xffffffff, %r13d # imm = 0xFFFFFFFF
movb $0x1, %bpl
xorl %r14d, %r14d
xorl %r12d, %r12d
movabsq $0x100000000, %rax # imm = 0x100000000
movq %rax, 0x20(%rsp)
movq 0x8(%rbx), %rdi
addq $0x10, %rdi
leaq 0x1f30b(%rip), %rax # 0x587c8
movzwl (%rax,%r15,2), %esi
addq 0x20(%rbx), %rsi
movl $0x10, %edx
movl $0x18, %ecx
leaq 0x20(%rsp), %r8
leaq 0x319fc(%rip), %rax # 0x6aed8
callq *(%rax)
movl 0x20(%rsp), %ecx
movl $0x0, %eax
cmpl $0x2, %ecx
jl 0x394fb
movl 0x24(%rsp), %eax
leal (%rax,%rax), %edx
shll $0x9, %eax
subl %edx, %eax
cltd
idivl %ecx
cmpl %r13d, %eax
cmovgl %eax, %r13d
cmpl %r14d, %eax
setl %cl
orb %bpl, %cl
movl %r15d, %esi
testb $0x1, %cl
jne 0x39519
movl %r12d, %esi
movl %r14d, %eax
movl $0x1, %r15d
movl %eax, %r14d
movl %esi, %r12d
testb $0x1, %bpl
movl $0x0, %ebp
jne 0x3949f
movq %rbx, %rdi
callq 0x3bfce
movl 0xc(%rsp), %edx
addl %r13d, %edx
sarl $0x2, %edx
movl $0xff, %eax
subl %edx, %eax
movl $0xff, %ecx
cmpl %ecx, %eax
cmovael %ecx, %eax
cmpl $0x100, %edx # imm = 0x100
movl $0x0, %ecx
cmovgel %ecx, %eax
movq 0x18(%rsp), %r14
incl 0x30(%r14,%rax,4)
movq 0x30(%rbx), %rcx
movb %al, 0x1(%rcx)
addl %eax, 0x430(%r14)
addl %r13d, 0x434(%r14)
movl 0x1340(%r14), %esi
movq %rbx, %rdi
callq 0x3b5cb
movl %eax, %ebp
testl %eax, %eax
leaq 0x319aa(%rip), %r12 # 0x6af48
leaq 0x1f21b(%rip), %r13 # 0x587c0
je 0x395b7
movq %rbx, %rdi
callq 0x3bf04
testl %eax, %eax
jne 0x392af
movl %ebp, %eax
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/PKRoma[P]libwebp/src/enc/analysis_enc.c
|
VP8EncLoop
|
int VP8EncLoop(VP8Encoder* const enc) {
VP8EncIterator it;
int ok = PreLoopInitialize(enc);
if (!ok) return 0;
StatLoop(enc); // stats-collection loop
VP8IteratorInit(enc, &it);
VP8InitFilter(&it);
do {
VP8ModeScore info;
const int dont_use_skip = !enc->proba_.use_skip_proba_;
const VP8RDLevel rd_opt = enc->rd_opt_level_;
VP8IteratorImport(&it, NULL);
// Warning! order is important: first call VP8Decimate() and
// *then* decide how to code the skip decision if there's one.
if (!VP8Decimate(&it, &info, rd_opt) || dont_use_skip) {
CodeResiduals(it.bw_, &it, &info);
} else { // reset predictors after a skip
ResetAfterSkip(&it);
}
StoreSideInfo(&it);
VP8StoreFilterStats(&it);
VP8IteratorExport(&it);
ok = VP8IteratorProgress(&it, 20);
VP8IteratorSaveBoundary(&it);
} while (ok && VP8IteratorNext(&it));
return PostLoopFinalize(&it, ok);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1398, %rsp # imm = 0x1398
movq %rdi, %r12
callq 0x39ff3
testl %eax, %eax
je 0x39fdf
movl 0x5c40(%r12), %r14d
movl 0x5c54(%r12), %ecx
testl %r14d, %r14d
setne %al
cmpl $0x3, %r14d
setne %r15b
setge %bl
andb %al, %r15b
movl %ecx, 0x3c(%rsp)
testl %ecx, %ecx
movq (%r12), %rdi
movl 0x3c(%rdi), %ecx
setne %bpl
movl %ecx, %eax
shrl $0x1f, %eax
addl %ecx, %eax
sarl %eax
addl $0x14, %eax
cltd
movq %rcx, %r13
idivl %ecx
movl %eax, 0x58(%rsp)
movl 0x218(%r12), %eax
movl %eax, 0x8c(%rsp)
movl 0x34(%r12), %eax
imull 0x30(%r12), %eax
movl %eax, 0x10(%rsp)
leaq 0xa8(%rsp), %rsi
callq 0x3a98c
leaq 0x1244(%r12), %rdi
movl $0x1080, %edx # imm = 0x1080
xorl %esi, %esi
callq 0x40c0
orb %bpl, %r15b
jne 0x396b2
cmpl $0x3, %r14d
je 0x39698
movl 0x10(%rsp), %ecx
movl %ecx, %eax
sarl $0x2, %eax
cmpl $0xc9, %ecx
movl $0x32, %ecx
jmp 0x396ab
movl 0x10(%rsp), %ecx
movl %ecx, %eax
sarl %eax
cmpl $0xc9, %ecx
movl $0x64, %ecx
cmovgel %eax, %ecx
movl %ecx, 0x10(%rsp)
testl %r13d, %r13d
movq %r12, 0x60(%rsp)
jle 0x39b80
orb %bpl, %bl
movzbl %bl, %eax
movl %eax, 0x2c(%rsp)
movl 0x10(%rsp), %eax
shll $0x7, %eax
leal (%rax,%rax,2), %eax
cltq
movq %rax, %xmm0
punpckldq 0x1d5bd(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
leaq 0xe20(%r12), %rax
movq %rax, 0xa0(%rsp)
subpd 0x1d5b5(%rip), %xmm0 # 0x56cb0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd 0x1d559(%rip), %xmm1 # 0x56c68
movapd %xmm1, 0xe0(%rsp)
leaq 0xa8(%rsp), %rax
movss 0x4(%rax), %xmm2
leaq 0x490(%rsp), %r14
leal -0x1(%r13), %eax
movaps %xmm2, %xmm0
andps 0x1ef65(%rip), %xmm0 # 0x586a0
cvtss2sd %xmm0, %xmm0
movsd 0x1ef79(%rip), %xmm1 # 0x586c0
ucomisd %xmm0, %xmm1
movb $0x1, %dl
movq %r13, 0x50(%rsp)
movaps %xmm2, 0x90(%rsp)
movl %eax, 0x48(%rsp)
jae 0x39770
testl %eax, %eax
je 0x39770
cmpl $0x0, 0x5c48(%r12)
sete %dl
movb %dl, 0xf(%rsp)
movq %r12, %rdi
movq %r14, %rsi
callq 0x3b540
movss 0xb0(%rsp), %xmm0
movq %r12, %rdi
callq 0x3aa30
movl 0x10(%rsp), %eax
movq %rax, 0x40(%rsp)
xorl %r13d, %r13d
xorl %r15d, %r15d
xorl %ebp, %ebp
movq %r14, %rdi
xorl %esi, %esi
callq 0x3b628
movq %r14, %rdi
leaq 0x120(%rsp), %rsi
movl 0x2c(%rsp), %edx
callq 0x3c8e0
testl %eax, %eax
je 0x397cb
incl 0x5bd0(%r12)
movq 0x4b8(%rsp), %rbx
movq %r14, %rdi
callq 0x3bc74
movq 0x4c0(%rsp), %rax
movb (%rax), %al
andb $0x3, %al
cmpb $0x1, %al
movq %r15, 0x68(%rsp)
movq %r13, 0x78(%rsp)
movq %rbp, 0x70(%rsp)
movq %rbx, 0x18(%rsp)
jne 0x3985d
xorl %edi, %edi
movl $0x1, %esi
movq %rbx, %rdx
leaq 0xf0(%rsp), %r12
movq %r12, %rcx
callq 0x49c15
leaq 0x148(%rsp), %rdi
movq %r12, %rsi
leaq 0x3167d(%rip), %rax # 0x6aea8
callq *(%rax)
movl 0x558(%rsp), %edi
addl 0x534(%rsp), %edi
movq %r12, %rsi
callq 0x49f9c
movl %eax, 0x558(%rsp)
movl %eax, 0x534(%rsp)
movl $0x1, %edi
xorl %esi, %esi
movq %rbx, %rdx
jmp 0x3986f
xorl %edi, %edi
movl $0x3, %esi
movq %rbx, %rdx
leaq 0xf0(%rsp), %r12
movq %r12, %rcx
callq 0x49c15
leaq 0x168(%rsp), %r15
xorl %r13d, %r13d
movl $0x21, %ebp
movq %r15, 0x20(%rsp)
leaq 0x31615(%rip), %r14 # 0x6aea8
movl 0x538(%rsp,%r13,4), %ebx
addl 0x490(%rsp,%rbp,4), %ebx
movq %r15, %rdi
movq %r12, %rsi
callq *(%r14)
movl %ebx, %edi
movq %r12, %rsi
callq 0x49f9c
movl %eax, 0x538(%rsp,%r13,4)
movl %eax, 0x490(%rsp,%rbp,4)
incq %rbp
addq $0x20, %r15
cmpq $0x25, %rbp
jne 0x39893
incq %r13
movq 0x20(%rsp), %r15
subq $-0x80, %r15
cmpq $0x4, %r13
jne 0x39882
xorl %edi, %edi
movl $0x2, %esi
movq 0x18(%rsp), %rdx
movq %r12, %rcx
callq 0x49c15
movb $0x1, %al
xorl %ecx, %ecx
movl %eax, 0x30(%rsp)
movq %rcx, %r13
orq $0x4, %r13
movb $0x1, %al
xorl %esi, %esi
movq %rcx, 0x80(%rsp)
movl %eax, 0x18(%rsp)
movq %rsi, %rax
movq %rsi, %r14
orq %r13, %r14
orq %rcx, %rax
addq %rax, %rax
movq %rax, 0x20(%rsp)
movl 0x538(%rsp,%r14,4), %ebp
movb $0x1, %bl
xorl %edi, %edi
leaq 0xf0(%rsp), %r15
movq %rdi, %r12
orq %r13, %r12
addl 0x514(%rsp,%r12,4), %ebp
orq 0x20(%rsp), %rdi
shlq $0x5, %rdi
leaq 0x368(%rsp), %rax
addq %rax, %rdi
movq %r15, %rsi
leaq 0x3153e(%rip), %rax # 0x6aea8
callq *(%rax)
movl %ebp, %edi
movq %r15, %rsi
callq 0x49f9c
movl %eax, %ebp
movl %eax, 0x538(%rsp,%r14,4)
movl %eax, 0x514(%rsp,%r12,4)
movl $0x1, %edi
testb $0x1, %bl
movl $0x0, %ebx
jne 0x3993e
movl $0x1, %esi
testb $0x1, 0x18(%rsp)
movl $0x0, %eax
movq 0x80(%rsp), %rcx
jne 0x39912
movl $0x2, %ecx
testb $0x1, 0x30(%rsp)
jne 0x398fb
leaq 0x490(%rsp), %r14
movq %r14, %rdi
callq 0x3bd68
movq 0x78(%rsp), %r13
addq 0x138(%rsp), %r13
movq 0x130(%rsp), %rbx
movq 0x70(%rsp), %rbp
addq 0x120(%rsp), %rbp
movl 0x58(%rsp), %esi
testl %esi, %esi
je 0x39a0e
movq %r14, %rdi
callq 0x3b5cb
testl %eax, %eax
je 0x39bd6
addq %rbx, %r13
movq 0x68(%rsp), %r15
addq %rbx, %r15
movq %r14, %rdi
callq 0x3be2b
movq %r14, %rdi
callq 0x3bf04
testl %eax, %eax
movq 0x60(%rsp), %r12
je 0x39a48
movq 0x40(%rsp), %rcx
leal -0x1(%rcx), %eax
cmpl $0x1, %ecx
movq %rax, 0x40(%rsp)
jg 0x397a1
movslq 0x28(%r12), %rax
addq %rax, %r15
cmpl $0x0, 0xd8(%rsp)
je 0x39a92
movq %r12, %rdi
callq 0x3af3a
movslq %eax, %rbx
movq 0xa0(%rsp), %rdi
callq 0x3ac52
cltq
addq %r15, %r13
addq %rbx, %r13
addq %r13, %rax
addq $0x400, %rax # imm = 0x400
shrq $0xb, %rax
addq $0x1e, %rax
cvtsi2sd %rax, %xmm0
jmp 0x39ae7
cmpl $0x0, 0x10(%rsp)
sete %al
testq %rbp, %rbp
sete %cl
orb %al, %cl
movsd 0x1d19e(%rip), %xmm0 # 0x56c48
jne 0x39ae7
movq %rbp, %xmm0
punpckldq 0x1d1e7(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd 0x1d1ef(%rip), %xmm0 # 0x56cb0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
movapd 0xe0(%rsp), %xmm0
divsd %xmm1, %xmm0
callq 0x4030
mulsd 0x1d1d9(%rip), %xmm0 # 0x56cc0
movsd %xmm0, 0xc0(%rsp)
testq %r15, %r15
movaps 0x90(%rsp), %xmm2
je 0x39bdb
movl 0x5c48(%r12), %eax
testl %eax, %eax
setle %cl
cmpq $0x3fc00001, %r15 # imm = 0x3FC00001
setb %dl
orb %cl, %dl
jne 0x39b2d
shrl %eax
movl %eax, 0x5c48(%r12)
movq 0x50(%rsp), %rax
jmp 0x39b75
cmpb $0x0, 0xf(%rsp)
movl 0x48(%rsp), %eax
jne 0x39b80
cmpl $0x0, 0x3c(%rsp)
je 0x39b75
leaq 0xa8(%rsp), %rdi
movl %eax, %ebx
callq 0x3ae79
movl %ebx, %eax
movss 0xac(%rsp), %xmm2
movaps %xmm2, %xmm0
andps 0x1eb3d(%rip), %xmm0 # 0x586a0
cvtss2sd %xmm0, %xmm0
movsd 0x1eb51(%rip), %xmm1 # 0x586c0
ucomisd %xmm0, %xmm1
jae 0x39b80
movl %eax, %r13d
testl %eax, %eax
jg 0x3972d
leaq 0x218(%r12), %r14
movl 0x8c(%rsp), %ebx
addl $0x14, %ebx
cmpl $0x0, 0x3c(%rsp)
je 0x39ba3
cmpl $0x0, 0xd8(%rsp)
jne 0x39bb8
movq %r12, %rdi
callq 0x3af3a
leaq 0xe20(%r12), %rdi
callq 0x3ac52
leaq 0xe20(%r12), %rdi
callq 0x49974
movq 0x8(%r12), %rdi
movl %ebx, %esi
movq %r14, %rdx
callq 0x2fb53
jmp 0x39bdb
movq 0x60(%rsp), %r12
leaq 0x490(%rsp), %r14
movq %r12, %rdi
movq %r14, %rsi
callq 0x3b540
movq %r14, %rdi
callq 0x4a1f7
leaq 0x120(%rsp), %r15
leaq 0x312a3(%rip), %r13 # 0x6aea8
movl 0x5bcc(%r12), %ebp
movl 0x5c44(%r12), %ebx
movq %r14, %rdi
xorl %esi, %esi
callq 0x3b628
movq %r14, %rdi
movq %r15, %rsi
movl %ebx, %edx
callq 0x3c8e0
testl %eax, %eax
je 0x39c62
testl %ebp, %ebp
je 0x39c62
movq 0x4c0(%rsp), %rcx
movq 0x4d8(%rsp), %rax
movb (%rcx), %cl
andb $0x3, %cl
cmpb $0x1, %cl
jne 0x39f78
xorl %ecx, %ecx
movl %ecx, (%rax)
movl %ecx, 0x558(%rsp)
jmp 0x39f7e
movq 0x4c8(%rsp), %r15
movq 0x4b8(%rsp), %r12
movq 0x4c0(%rsp), %rax
movzbl (%rax), %ebx
movl %ebx, %ebp
andb $0x3, %bpl
movq %r14, %rdi
callq 0x3bc74
movslq 0xc(%r15), %rax
movq 0x18(%r15), %rcx
movslq 0x8(%r15), %rdx
cmpb $0x1, %bpl
movq %r15, 0x30(%rsp)
movq %rbx, 0x68(%rsp)
movq %rax, 0x40(%rsp)
movq %rcx, 0x58(%rsp)
movb %bpl, 0x2c(%rsp)
movq %rdx, 0x10(%rsp)
movq %r12, 0x18(%rsp)
jne 0x39d1c
xorl %edi, %edi
movl $0x1, %esi
movq %r12, %rdx
leaq 0xa8(%rsp), %rbp
movq %rbp, %rcx
callq 0x49c15
leaq 0x148(%rsp), %rdi
movq %rbp, %rsi
callq *(%r13)
movl 0x558(%rsp), %esi
addl 0x534(%rsp), %esi
movq %r15, %rdi
movq %rbp, %rdx
callq 0x3afd5
movl %eax, 0x558(%rsp)
movl %eax, 0x534(%rsp)
movl $0x1, %edi
xorl %esi, %esi
movq %r12, %rdx
jmp 0x39d2e
xorl %edi, %edi
movl $0x3, %esi
movq %r12, %rdx
leaq 0xa8(%rsp), %rbp
movq %rbp, %rcx
callq 0x49c15
leaq 0x168(%rsp), %r12
xorl %r15d, %r15d
movl $0x21, %r13d
movq %r12, 0x20(%rsp)
movq 0x30(%rsp), %r14
movl 0x538(%rsp,%r15,4), %ebx
addl 0x490(%rsp,%r13,4), %ebx
movq %r12, %rdi
movq %rbp, %rsi
leaq 0x3113a(%rip), %rax # 0x6aea8
callq *(%rax)
movq %r14, %rdi
movl %ebx, %esi
movq %rbp, %rdx
callq 0x3afd5
movl %eax, 0x538(%rsp,%r15,4)
movl %eax, 0x490(%rsp,%r13,4)
incq %r13
addq $0x20, %r12
cmpq $0x25, %r13
jne 0x39d51
incq %r15
movq 0x20(%rsp), %r12
subq $-0x80, %r12
cmpq $0x4, %r15
jne 0x39d41
movq 0x30(%rsp), %rax
movslq 0xc(%rax), %rcx
movq %rcx, 0x90(%rsp)
movq 0x18(%rax), %rcx
movq %rcx, 0x50(%rsp)
movslq 0x8(%rax), %rax
movq %rax, 0x48(%rsp)
xorl %edi, %edi
movl $0x2, %esi
movq 0x18(%rsp), %rdx
movq %rbp, %rcx
callq 0x49c15
movb $0x1, %al
xorl %ecx, %ecx
movl %eax, 0x70(%rsp)
movq %rcx, %rbx
orq $0x4, %rbx
movb $0x1, %al
xorl %esi, %esi
movq %rcx, 0x78(%rsp)
movl %eax, 0x80(%rsp)
movq %rsi, %rax
orq %rbx, %rax
orq %rcx, %rsi
addq %rsi, %rsi
movq %rsi, 0x20(%rsp)
movq %rax, 0x18(%rsp)
movl 0x538(%rsp,%rax,4), %r15d
movb $0x1, %r13b
xorl %edi, %edi
leaq 0xa8(%rsp), %r12
movq 0x30(%rsp), %r14
movq %rdi, %rbp
orq %rbx, %rbp
addl 0x514(%rsp,%rbp,4), %r15d
orq 0x20(%rsp), %rdi
shlq $0x5, %rdi
leaq 0x368(%rsp), %rax
addq %rax, %rdi
movq %r12, %rsi
leaq 0x3104a(%rip), %rax # 0x6aea8
callq *(%rax)
movq %r14, %rdi
movl %r15d, %esi
movq %r12, %rdx
callq 0x3afd5
movl %eax, %r15d
movq 0x18(%rsp), %rax
movl %r15d, 0x538(%rsp,%rax,4)
movl %r15d, 0x514(%rsp,%rbp,4)
movl $0x1, %edi
testb $0x1, %r13b
movl $0x0, %r13d
jne 0x39e32
movl $0x1, %esi
testb $0x1, 0x80(%rsp)
movl $0x0, %eax
movq 0x78(%rsp), %rcx
jne 0x39dfb
movl $0x2, %ecx
testb $0x1, 0x70(%rsp)
jne 0x39de7
xorl %eax, %eax
cmpb $0x1, 0x2c(%rsp)
sete %al
movq 0x50(%rsp), %rdx
addq 0x48(%rsp), %rdx
movq 0x90(%rsp), %rcx
leaq (%rcx,%rdx,8), %rcx
addq $0x8, %rcx
movq 0x58(%rsp), %r9
addq 0x10(%rsp), %r9
movq 0x68(%rsp), %r8
shrl $0x5, %r8d
andl $0x3, %r8d
movq 0x30(%rsp), %rdi
movslq 0xc(%rdi), %rdx
movslq 0x8(%rdi), %rsi
addq 0x18(%rdi), %rsi
movq 0x40(%rsp), %rdi
leaq (%rdi,%r9,8), %rdi
subq %rcx, %rdx
subq %rdi, %rcx
addq $-0x8, %rcx
movq %rcx, 0x5c0(%rsp)
leaq (%rdx,%rsi,8), %rdx
addq $0x8, %rdx
movq %rdx, 0x5c8(%rsp)
leaq (%r8,%r8,2), %rsi
leaq 0x490(%rsp), %r14
leaq (%r14,%rsi,8), %rsi
addq $0xd0, %rsi
addq %rcx, (%rsi,%rax,8)
addq %rdx, 0x10(%rsi)
movq %r14, %rdi
callq 0x3bd68
movq 0x60(%rsp), %r12
leaq 0x120(%rsp), %r15
leaq 0x30f32(%rip), %r13 # 0x6aea8
jmp 0x39f7e
andl $0x1000000, (%rax) # imm = 0x1000000
movq %r14, %rdi
callq 0x3a07b
movq %r14, %rdi
callq 0x4a217
movq %r14, %rdi
callq 0x3bb19
movq %r14, %rdi
movl $0x14, %esi
callq 0x3b5cb
movl %eax, %ebp
movq %r14, %rdi
callq 0x3be2b
testl %ebp, %ebp
je 0x39fd2
movq %r14, %rdi
callq 0x3bf04
testl %eax, %eax
jne 0x39c05
leaq 0x490(%rsp), %rdi
movl %ebp, %esi
callq 0x3a1ff
jmp 0x39fe1
movq 0x4b8(%rsp), %rdi
callq 0x3e648
xorl %eax, %eax
addq $0x1398, %rsp # imm = 0x1398
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/enc/frame_enc.c
|
PreLoopInitialize
|
static int PreLoopInitialize(VP8Encoder* const enc) {
int p;
int ok = 1;
const int average_bytes_per_MB = kAverageBytesPerMB[enc->base_quant_ >> 4];
const int bytes_per_parts =
enc->mb_w_ * enc->mb_h_ * average_bytes_per_MB / enc->num_parts_;
// Initialize the bit-writers
for (p = 0; ok && p < enc->num_parts_; ++p) {
ok = VP8BitWriterInit(enc->parts_ + p, bytes_per_parts);
}
if (!ok) {
VP8EncFreeBitWriters(enc); // malloc error occurred
WebPEncodingSetError(enc->pic_, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
return ok;
}
|
movl 0xe00(%rdi), %eax
sarl $0x4, %eax
cltq
leaq 0x1e6cb(%rip), %rcx # 0x586d0
movzbl (%rax,%rcx), %eax
imull 0x30(%rdi), %eax
imull 0x34(%rdi), %eax
movl 0x3c(%rdi), %ecx
cltd
idivl %ecx
testl %ecx, %ecx
jle 0x3a051
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rdi, %rbx
leaq 0x70(%rdi), %r14
movslq %eax, %r15
xorl %r12d, %r12d
movq %r14, %rdi
movq %r15, %rsi
callq 0x443f9
testl %eax, %eax
je 0x3a057
incq %r12
movslq 0x3c(%rbx), %rcx
addq $0x30, %r14
cmpq %rcx, %r12
jl 0x3a030
jmp 0x3a06f
movl $0x1, %eax
retq
movq %rbx, %rdi
callq 0x3e648
movq 0x8(%rbx), %rdi
movl $0x1, %esi
callq 0x2fb4a
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/PKRoma[P]libwebp/src/enc/frame_enc.c
|
PostLoopFinalize
|
static int PostLoopFinalize(VP8EncIterator* const it, int ok) {
VP8Encoder* const enc = it->enc_;
if (ok) { // Finalize the partitions, check for extra errors.
int p;
for (p = 0; p < enc->num_parts_; ++p) {
VP8BitWriterFinish(enc->parts_ + p);
ok &= !enc->parts_[p].error_;
}
}
if (ok) { // All good. Finish up.
#if !defined(WEBP_DISABLE_STATS)
if (enc->pic_->stats != NULL) { // finalize byte counters...
int i, s;
for (i = 0; i <= 2; ++i) {
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
enc->residual_bytes_[i][s] = (int)((it->bit_count_[s][i] + 7) >> 3);
}
}
}
#endif
VP8AdjustFilterStrength(it); // ...and store filter stats.
} else {
// Something bad happened -> need to do some memory cleanup.
VP8EncFreeBitWriters(enc);
}
return ok;
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
movq 0x28(%rdi), %r15
testl %esi, %esi
je 0x3a2cd
movl %esi, %ebx
movq %rdi, %r14
cmpl $0x0, 0x3c(%r15)
jle 0x3a258
leaq 0x70(%r15), %r12
andl $0x1, %ebx
xorl %r13d, %r13d
movl %ebx, %eax
movl %eax, %ebx
movq %r12, %rdi
callq 0x444a5
cmpl $0x0, 0x28(%r12)
movl $0x0, %eax
cmovel %ebx, %eax
incq %r13
movslq 0x3c(%r15), %rcx
addq $0x30, %r12
cmpq %rcx, %r13
jl 0x3a22c
testl %eax, %eax
je 0x3a2cd
movq 0x8(%r15), %rax
cmpq $0x0, 0x80(%rax)
je 0x3a2c3
leaq 0xd0(%r14), %rax
addq $0x5c04, %r15 # imm = 0x5C04
xorl %ecx, %ecx
movdqa 0x1e432(%rip), %xmm0 # 0x586b0
movq %rax, %rdx
xorl %esi, %esi
movq 0x18(%rdx), %xmm1
movq (%rdx), %xmm2
punpcklqdq %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0]
paddq %xmm0, %xmm2
psrlq $0x3, %xmm2
pshufd $0xe8, %xmm2, %xmm1 # xmm1 = xmm2[0,2,2,3]
movq %xmm1, (%r15,%rsi,4)
addq $0x2, %rsi
addq $0x30, %rdx
cmpq $0x4, %rsi
jne 0x3a283
incq %rcx
addq $0x8, %rax
addq $0x10, %r15
cmpq $0x3, %rcx
jne 0x3a27e
movq %r14, %rdi
callq 0x4a5b9
jmp 0x3a2d7
movq %r15, %rdi
callq 0x3e648
xorl %ebx, %ebx
movl %ebx, %eax
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
|
/PKRoma[P]libwebp/src/enc/frame_enc.c
|
VP8EncTokenLoop
|
int VP8EncTokenLoop(VP8Encoder* const enc) {
// Roughly refresh the proba eight times per pass
int max_count = (enc->mb_w_ * enc->mb_h_) >> 3;
int num_pass_left = enc->config_->pass;
const int do_search = enc->do_search_;
VP8EncIterator it;
VP8EncProba* const proba = &enc->proba_;
const VP8RDLevel rd_opt = enc->rd_opt_level_;
const uint64_t pixel_count = enc->mb_w_ * enc->mb_h_ * 384;
PassStats stats;
int ok;
InitPassStats(enc, &stats);
ok = PreLoopInitialize(enc);
if (!ok) return 0;
if (max_count < MIN_COUNT) max_count = MIN_COUNT;
assert(enc->num_parts_ == 1);
assert(enc->use_tokens_);
assert(proba->use_skip_proba_ == 0);
assert(rd_opt >= RD_OPT_BASIC); // otherwise, token-buffer won't be useful
assert(num_pass_left > 0);
while (ok && num_pass_left-- > 0) {
const int is_last_pass = (fabs(stats.dq) <= DQ_LIMIT) ||
(num_pass_left == 0) ||
(enc->max_i4_header_bits_ == 0);
uint64_t size_p0 = 0;
uint64_t distortion = 0;
int cnt = max_count;
VP8IteratorInit(enc, &it);
SetLoopParams(enc, stats.q);
if (is_last_pass) {
ResetTokenStats(enc);
VP8InitFilter(&it); // don't collect stats until last pass (too costly)
}
VP8TBufferClear(&enc->tokens_);
do {
VP8ModeScore info;
VP8IteratorImport(&it, NULL);
if (--cnt < 0) {
FinalizeTokenProbas(proba);
VP8CalculateLevelCosts(proba); // refresh cost tables for rd-opt
cnt = max_count;
}
VP8Decimate(&it, &info, rd_opt);
ok = RecordTokens(&it, &info, &enc->tokens_);
if (!ok) {
WebPEncodingSetError(enc->pic_, VP8_ENC_ERROR_OUT_OF_MEMORY);
break;
}
size_p0 += info.H;
distortion += info.D;
if (is_last_pass) {
StoreSideInfo(&it);
VP8StoreFilterStats(&it);
VP8IteratorExport(&it);
ok = VP8IteratorProgress(&it, 20);
}
VP8IteratorSaveBoundary(&it);
} while (ok && VP8IteratorNext(&it));
if (!ok) break;
size_p0 += enc->segment_hdr_.size_;
if (stats.do_size_search) {
uint64_t size = FinalizeTokenProbas(&enc->proba_);
size += VP8EstimateTokenSize(&enc->tokens_,
(const uint8_t*)proba->coeffs_);
size = (size + size_p0 + 1024) >> 11; // -> size in bytes
size += HEADER_SIZE_ESTIMATE;
stats.value = (double)size;
} else { // compute and store PSNR
stats.value = GetPSNR(distortion, pixel_count);
}
#if (DEBUG_SEARCH > 0)
printf("#%2d metric:%.1lf -> %.1lf last_q=%.2lf q=%.2lf dq=%.2lf "
" range:[%.1f, %.1f]\n",
num_pass_left, stats.last_value, stats.value,
stats.last_q, stats.q, stats.dq, stats.qmin, stats.qmax);
#endif
if (enc->max_i4_header_bits_ > 0 && size_p0 > PARTITION0_SIZE_LIMIT) {
++num_pass_left;
enc->max_i4_header_bits_ >>= 1; // strengthen header bit limitation...
if (is_last_pass) {
ResetSideInfo(&it);
}
continue; // ...and start over
}
if (is_last_pass) {
break; // done
}
if (do_search) {
ComputeNextQ(&stats); // Adjust q
}
}
if (ok) {
if (!stats.do_size_search) {
FinalizeTokenProbas(&enc->proba_);
}
ok = VP8EmitTokens(&enc->tokens_, enc->parts_ + 0,
(const uint8_t*)proba->coeffs_, 1);
}
ok = ok && WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_);
return PostLoopFinalize(&it, ok);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1388, %rsp # imm = 0x1388
movq %rdi, %r14
movslq 0x30(%rdi), %rbx
movslq 0x34(%rdi), %r15
movq (%rdi), %rdi
movl 0x3c(%rdi), %r12d
movl 0x5c54(%r14), %eax
movl %eax, 0x44(%rsp)
movl 0x5c44(%r14), %eax
movl %eax, 0x48(%rsp)
leaq 0xa8(%rsp), %rsi
callq 0x3a98c
movq %r14, %rdi
callq 0x39ff3
testl %eax, %eax
je 0x3a959
leaq 0xe20(%r14), %rax
movq %rax, 0x18(%rsp)
imulq %rbx, %r15
movl %r15d, %eax
sarl $0x3, %eax
cmpl $0x61, %eax
movl $0x60, %ecx
cmovgel %eax, %ecx
movl %ecx, 0x14(%rsp)
testl %r12d, %r12d
jle 0x3a8fe
movq %r15, %rcx
movq %r12, %rdx
movq %r15, %rax
shlq $0x7, %rax
leaq (%rax,%rax,2), %rax
leaq 0x1244(%r14), %rsi
movq %rsi, 0x68(%rsp)
leaq 0x1f0(%r14), %rbx
leaq 0xe24(%r14), %rsi
movq %rsi, 0x60(%rsp)
movq %rax, %xmm0
punpckldq 0x1c8fc(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd 0x1c904(%rip), %xmm0 # 0x56cb0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd 0x1c8a8(%rip), %xmm1 # 0x56c68
movapd %xmm1, 0x90(%rsp)
movaps 0x1e2d0(%rip), %xmm1 # 0x586a0
movsd 0x1e2e8(%rip), %xmm2 # 0x586c0
leaq 0x480(%rsp), %r15
leaq 0xe0(%rsp), %r12
movq %r14, 0x78(%rsp)
movq %rcx, 0x58(%rsp)
movq %rbx, 0x28(%rsp)
leal -0x1(%rdx), %eax
movss 0xac(%rsp), %xmm0
andps %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
ucomisd %xmm0, %xmm2
movb $0x1, 0xf(%rsp)
movq %rdx, 0x70(%rsp)
movl %eax, 0x10(%rsp)
jae 0x3a42f
testl %eax, %eax
je 0x3a42f
cmpl $0x0, 0x5c48(%r14)
sete 0xf(%rsp)
movq %r14, %rdi
movq %r15, %rsi
callq 0x3b540
movss 0xb0(%rsp), %xmm0
movq %r14, %rdi
callq 0x3aa30
cmpb $0x0, 0xf(%rsp)
je 0x3a46b
movl $0x1080, %edx # imm = 0x1080
movq 0x68(%rsp), %rdi
xorl %esi, %esi
callq 0x40c0
movq %r15, %rdi
callq 0x4a1f7
movq %rbx, %rdi
callq 0x3edd5
xorl %ebp, %ebp
movq $0x0, 0x20(%rsp)
movl 0x14(%rsp), %r13d
movq %r15, %rdi
xorl %esi, %esi
callq 0x3b628
testl %r13d, %r13d
jle 0x3a497
decl %r13d
jmp 0x3a4b7
movq %rbx, %r14
movq 0x18(%rsp), %rbx
movq %rbx, %rdi
callq 0x3ac52
movq %rbx, %rdi
movq %r14, %rbx
callq 0x49974
movl 0x14(%rsp), %r13d
movq %rbp, 0x80(%rsp)
movq %r15, %rdi
leaq 0x110(%rsp), %rsi
movl 0x48(%rsp), %edx
callq 0x3c8e0
movq 0x4a8(%rsp), %r14
movq %r15, %rdi
callq 0x3bc74
movq 0x4b0(%rsp), %rax
movb (%rax), %al
andb $0x3, %al
cmpb $0x1, %al
movl %r13d, 0x4c(%rsp)
movq %r14, 0x30(%rsp)
jne 0x3a555
movl 0x548(%rsp), %ebp
addl 0x524(%rsp), %ebp
xorl %edi, %edi
movl $0x1, %esi
movq %r14, %rdx
movq %r12, %rcx
callq 0x49c15
leaq 0x138(%rsp), %rdi
movq %r12, %rsi
leaq 0x30979(%rip), %rax # 0x6aea8
callq *(%rax)
movl %ebp, %edi
movq %r12, %rsi
movq %rbx, %rdx
callq 0x3ee27
movl %eax, 0x548(%rsp)
movl %eax, 0x524(%rsp)
movl $0x1, %edi
xorl %esi, %esi
jmp 0x3a55c
xorl %edi, %edi
movl $0x3, %esi
movq %r14, %rdx
movq %r12, %rcx
callq 0x49c15
leaq 0x158(%rsp), %rbp
xorl %r14d, %r14d
movl $0x21, %r12d
movq %rbp, 0x38(%rsp)
leaq 0xe0(%rsp), %r13
movq %rbx, %r15
movl 0x528(%rsp,%r14,4), %ebx
addl 0x480(%rsp,%r12,4), %ebx
movq %rbp, %rdi
movq %r13, %rsi
leaq 0x30903(%rip), %rax # 0x6aea8
callq *(%rax)
movl %ebx, %edi
movq %r15, %rbx
movq %r13, %rsi
movq %r15, %rdx
callq 0x3ee27
movl %eax, 0x528(%rsp,%r14,4)
movl %eax, 0x480(%rsp,%r12,4)
incq %r12
addq $0x20, %rbp
cmpq $0x25, %r12
jne 0x3a585
incq %r14
movq 0x38(%rsp), %rbp
subq $-0x80, %rbp
cmpq $0x4, %r14
jne 0x3a572
xorl %edi, %edi
movl $0x2, %esi
movq 0x30(%rsp), %rdx
leaq 0xe0(%rsp), %rcx
callq 0x49c15
movb $0x1, %al
xorl %ecx, %ecx
movl %eax, 0x50(%rsp)
movq %rcx, %r15
orq $0x4, %r15
movb $0x1, %al
xorl %esi, %esi
movq %rcx, 0x88(%rsp)
movl %eax, 0x54(%rsp)
movq %rsi, %rax
orq %r15, %rax
orq %rcx, %rsi
addq %rsi, %rsi
movq %rsi, 0x38(%rsp)
movq %rax, 0x30(%rsp)
movl 0x528(%rsp,%rax,4), %r14d
movb $0x1, %r12b
xorl %edi, %edi
movq 0x28(%rsp), %rbp
leaq 0xe0(%rsp), %rbx
movq %rdi, %r13
orq %r15, %r13
addl 0x504(%rsp,%r13,4), %r14d
orq 0x38(%rsp), %rdi
shlq $0x5, %rdi
leaq 0x358(%rsp), %rax
addq %rax, %rdi
movq %rbx, %rsi
leaq 0x3082e(%rip), %rax # 0x6aea8
callq *(%rax)
movl %r14d, %edi
movq %rbx, %rsi
movq %rbp, %rdx
callq 0x3ee27
movl %eax, %r14d
movq 0x30(%rsp), %rax
movl %r14d, 0x528(%rsp,%rax,4)
movl %r14d, 0x504(%rsp,%r13,4)
movl $0x1, %edi
testb $0x1, %r12b
movl $0x0, %r12d
jne 0x3a64e
movl $0x1, %esi
testb $0x1, 0x54(%rsp)
movl $0x0, %eax
movq 0x88(%rsp), %rcx
jne 0x3a61a
movl $0x2, %ecx
testb $0x1, 0x50(%rsp)
jne 0x3a603
leaq 0x480(%rsp), %r15
movq %r15, %rdi
callq 0x3bd68
movq 0x78(%rsp), %r14
cmpl $0x0, 0x210(%r14)
movl 0x4c(%rsp), %r13d
jne 0x3a95d
movq 0x80(%rsp), %rbp
addq 0x120(%rsp), %rbp
movq 0x20(%rsp), %rax
addq 0x110(%rsp), %rax
movq %rax, 0x20(%rsp)
cmpb $0x0, 0xf(%rsp)
je 0x3a76e
movq %r15, %rdi
callq 0x3a07b
movq %r15, %rdi
callq 0x4a217
movq %r15, %rdi
callq 0x3bb19
movq %r15, %rdi
movl $0x14, %esi
callq 0x3b5cb
movl %eax, %ebx
movq %r15, %rdi
callq 0x3be2b
testl %ebx, %ebx
movq 0x28(%rsp), %rbx
jne 0x3a77b
jmp 0x3a96b
movq %r15, %rdi
callq 0x3be2b
movq 0x28(%rsp), %rbx
movq %r15, %rdi
callq 0x3bf04
testl %eax, %eax
leaq 0xe0(%rsp), %r12
jne 0x3a483
movq %rbx, %r13
movslq 0x28(%r14), %rax
addq %rax, %rbp
cmpl $0x0, 0xd8(%rsp)
je 0x3a7dc
movq 0x18(%rsp), %rdi
callq 0x3ac52
movslq %eax, %rbx
movq %r13, %rdi
movq 0x60(%rsp), %rsi
callq 0x3f95b
addq %rbp, %rbx
addq %rbx, %rax
addq $0x400, %rax # imm = 0x400
shrq $0xb, %rax
addq $0x1e, %rax
cvtsi2sd %rax, %xmm0
jmp 0x3a836
cmpl $0x0, 0x58(%rsp)
sete %al
movq 0x20(%rsp), %rdx
testq %rdx, %rdx
sete %cl
orb %al, %cl
movsd 0x1c44f(%rip), %xmm0 # 0x56c48
jne 0x3a836
movq %rdx, %xmm0
punpckldq 0x1c498(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd 0x1c4a0(%rip), %xmm0 # 0x56cb0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
movapd 0x90(%rsp), %xmm0
divsd %xmm1, %xmm0
callq 0x4030
mulsd 0x1c48a(%rip), %xmm0 # 0x56cc0
movsd %xmm0, 0xc0(%rsp)
movl 0x5c48(%r14), %eax
testl %eax, %eax
movq 0x70(%rsp), %rdx
movaps 0x1de4c(%rip), %xmm1 # 0x586a0
movsd 0x1de64(%rip), %xmm2 # 0x586c0
jle 0x3a8c3
cmpq $0x3fc00001, %rbp # imm = 0x3FC00001
jb 0x3a8c3
shrl %eax
movl %eax, 0x5c48(%r14)
cmpb $0x0, 0xf(%rsp)
je 0x3a8be
movq 0x4a8(%rsp), %rax
movq 0x8(%rax), %rcx
cmpq $0x0, 0x80(%rcx)
je 0x3a8a2
movl $0x0, 0x5c3c(%rax)
movq $0x0, 0x5c34(%rax)
xorl %ecx, %ecx
movq %rcx, 0x5bf8(%rax)
xorpd %xmm0, %xmm0
movupd %xmm0, 0x5bd8(%rax)
movq %rcx, 0x5be8(%rax)
movq %r13, %rbx
jmp 0x3a8f6
cmpb $0x0, 0xf(%rsp)
jne 0x3a8fe
movq %r13, %rbx
cmpl $0x0, 0x44(%rsp)
je 0x3a8f0
leaq 0xa8(%rsp), %rdi
callq 0x3ae79
movsd 0x1ddd7(%rip), %xmm2 # 0x586c0
movaps 0x1ddb0(%rip), %xmm1 # 0x586a0
movl 0x10(%rsp), %eax
movl %eax, %edx
testl %edx, %edx
jg 0x3a3f7
cmpl $0x0, 0xd8(%rsp)
jne 0x3a912
movq 0x18(%rsp), %rdi
callq 0x3ac52
leaq 0x1f0(%r14), %rdi
leaq 0x70(%r14), %rsi
leaq 0xe24(%r14), %rdx
movl $0x1, %ecx
callq 0x3f8a4
testl %eax, %eax
je 0x3a96b
movq 0x8(%r14), %rdi
movl 0x218(%r14), %esi
addq $0x218, %r14 # imm = 0x218
addl $0x14, %esi
movq %r14, %rdx
callq 0x2fb53
xorl %esi, %esi
testl %eax, %eax
setne %sil
jmp 0x3a96d
xorl %eax, %eax
jmp 0x3a97a
movq 0x8(%r14), %rdi
movl $0x1, %esi
callq 0x2fb4a
xorl %esi, %esi
leaq 0x480(%rsp), %rdi
callq 0x3a1ff
addq $0x1388, %rsp # imm = 0x1388
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/enc/frame_enc.c
|
InitPassStats
|
static int InitPassStats(const VP8Encoder* const enc, PassStats* const s) {
const uint64_t target_size = (uint64_t)enc->config_->target_size;
const int do_size_search = (target_size != 0);
const float target_PSNR = enc->config_->target_PSNR;
s->is_first = 1;
s->dq = 10.f;
s->qmin = 1.f * enc->config_->qmin;
s->qmax = 1.f * enc->config_->qmax;
s->q = s->last_q = Clamp(enc->config_->quality, s->qmin, s->qmax);
s->target = do_size_search ? (double)target_size
: (target_PSNR > 0.) ? target_PSNR
: 40.; // default, just in case
s->value = s->last_value = 0.;
s->do_size_search = do_size_search;
return do_size_search;
}
|
movslq 0x10(%rdi), %rax
movss 0x14(%rdi), %xmm0
movabsq $0x4120000000000001, %rcx # imm = 0x4120000000000001
movq %rcx, (%rsi)
movsd 0x6c(%rdi), %xmm1
cvtdq2ps %xmm1, %xmm1
movlps %xmm1, 0x10(%rsi)
movss 0x4(%rdi), %xmm2
movaps %xmm1, %xmm3
shufps $0x55, %xmm1, %xmm3 # xmm3 = xmm3[1,1],xmm1[1,1]
minss %xmm2, %xmm3
cmpltss %xmm1, %xmm2
movaps %xmm2, %xmm4
andnps %xmm3, %xmm4
andps %xmm1, %xmm2
orps %xmm4, %xmm2
movss %xmm2, 0xc(%rsi)
movss %xmm2, 0x8(%rsi)
testq %rax, %rax
je 0x3aa01
movq %rax, %xmm1
punpckldq 0x1c2b5(%rip), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
subpd 0x1c2bd(%rip), %xmm1 # 0x56cb0
movapd %xmm1, %xmm0
unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
addsd %xmm1, %xmm0
jmp 0x3aa17
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
ja 0x3aa13
movsd 0x1dcb7(%rip), %xmm0 # 0x586c8
jmp 0x3aa17
cvtss2sd %xmm0, %xmm0
xorl %ecx, %ecx
testl %eax, %eax
setne %cl
movsd %xmm0, 0x28(%rsi)
xorpd %xmm0, %xmm0
movupd %xmm0, 0x18(%rsi)
movl %ecx, 0x30(%rsi)
retq
|
/PKRoma[P]libwebp/src/enc/frame_enc.c
|
SetLoopParams
|
static void SetLoopParams(VP8Encoder* const enc, float q) {
// Make sure the quality parameter is inside valid bounds
q = Clamp(q, 0.f, 100.f);
VP8SetSegmentParams(enc, q); // setup segment quantizations and filters
SetSegmentProbas(enc); // compute segment probabilities
ResetStats(enc);
ResetSSE(enc);
}
|
pushq %rbp
pushq %r14
pushq %rbx
subq $0x10, %rsp
movq %rdi, %rbx
movss 0x165c9(%rip), %xmm1 # 0x5100c
minss %xmm0, %xmm1
xorps %xmm2, %xmm2
cmpltss %xmm2, %xmm0
andnps %xmm1, %xmm0
callq 0x3c13c
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movl 0x34(%rbx), %ecx
imull 0x30(%rbx), %ecx
testl %ecx, %ecx
jle 0x3aa89
movq 0x5c60(%rbx), %rax
movl %ecx, %ecx
xorl %edx, %edx
movzbl (%rax,%rdx,4), %esi
shrl $0x3, %esi
andl $0xc, %esi
incl (%rsp,%rsi)
incq %rdx
cmpq %rdx, %rcx
jne 0x3aa74
movq 0x8(%rbx), %rax
movq 0x80(%rax), %rax
testq %rax, %rax
je 0x3aaa1
movaps (%rsp), %xmm0
movups %xmm0, 0x5c(%rax)
cmpl $0x2, 0x20(%rbx)
jl 0x3ab37
movl (%rsp), %r10d
movl 0x4(%rsp), %r9d
leal (%r9,%r10), %ebp
movl 0x8(%rsp), %r8d
movl 0xc(%rsp), %edi
leal (%rdi,%r8), %r11d
movl %r11d, %r14d
movb $-0x1, %cl
movb $-0x1, %sil
addl %ebp, %r14d
je 0x3aaec
movl %ebp, %edx
shll $0x8, %edx
subl %ebp, %edx
movl %r14d, %eax
shrl $0x1f, %eax
addl %r14d, %eax
sarl %eax
addl %edx, %eax
cltd
idivl %r14d
movl %eax, %esi
movb %sil, 0xe20(%rbx)
testl %ebp, %ebp
je 0x3ab10
movl %r10d, %ecx
shll $0x8, %ecx
subl %r10d, %ecx
movl %ebp, %eax
shrl $0x1f, %eax
addl %ebp, %eax
sarl %eax
addl %ecx, %eax
cltd
idivl %ebp
movl %eax, %ecx
movb %cl, 0xe21(%rbx)
testl %r11d, %r11d
je 0x3ab45
movl %r8d, %edx
shll $0x8, %edx
subl %r8d, %edx
movl %r11d, %eax
shrl $0x1f, %eax
addl %r11d, %eax
sarl %eax
addl %edx, %eax
cltd
idivl %r11d
jmp 0x3ab4a
movl $0x0, 0x24(%rbx)
xorl %eax, %eax
jmp 0x3ac16
movl $0xff, %eax
movb %al, 0xe22(%rbx)
cmpb $-0x1, %sil
jne 0x3ab6e
cmpb $-0x1, %cl
jne 0x3ab6e
xorl %edx, %edx
movb $-0x1, %cl
cmpb $-0x1, %al
setne %dl
movl %edx, 0x24(%rbx)
je 0x3ab77
movb $-0x1, %sil
jmp 0x3abb9
movl $0x1, 0x24(%rbx)
jmp 0x3abb9
movl 0x34(%rbx), %edx
imull 0x30(%rbx), %edx
movb $-0x1, %sil
testl %edx, %edx
jle 0x3abb9
xorl %eax, %eax
movq 0x5c60(%rbx), %rcx
andb $-0x61, (%rcx,%rax,4)
incq %rax
movslq 0x30(%rbx), %rcx
movslq 0x34(%rbx), %rdx
imulq %rcx, %rdx
cmpq %rdx, %rax
jl 0x3ab87
movb 0xe20(%rbx), %sil
movb 0xe21(%rbx), %cl
movb 0xe22(%rbx), %al
movzbl %sil, %esi
leaq 0x1c5bc(%rip), %rdx # 0x57180
movzwl (%rdx,%rsi,2), %r11d
movzbl %cl, %ecx
movzwl (%rdx,%rcx,2), %ebp
addl %r11d, %ebp
imull %r10d, %ebp
notb %cl
movzbl %cl, %ecx
movzwl (%rdx,%rcx,2), %ecx
addl %r11d, %ecx
imull %r9d, %ecx
addl %ebp, %ecx
notb %sil
movzbl %sil, %esi
movzwl (%rdx,%rsi,2), %esi
movzbl %al, %eax
movzwl (%rdx,%rax,2), %r9d
addl %esi, %r9d
imull %r8d, %r9d
notb %al
movzbl %al, %eax
movzwl (%rdx,%rax,2), %eax
addl %esi, %eax
imull %edi, %eax
addl %r9d, %eax
addl %ecx, %eax
movl %eax, 0x28(%rbx)
leaq 0xe20(%rbx), %rdi
callq 0x49974
movl $0x0, 0x5bd0(%rbx)
xorl %eax, %eax
movq %rax, 0x5bf8(%rbx)
xorps %xmm0, %xmm0
movups %xmm0, 0x5bd8(%rbx)
movq %rax, 0x5be8(%rbx)
addq $0x10, %rsp
popq %rbx
popq %r14
popq %rbp
retq
|
/PKRoma[P]libwebp/src/enc/frame_enc.c
|
ComputeNextQ
|
static float ComputeNextQ(PassStats* const s) {
float dq;
if (s->is_first) {
dq = (s->value > s->target) ? -s->dq : s->dq;
s->is_first = 0;
} else if (s->value != s->last_value) {
const double slope = (s->target - s->value) / (s->last_value - s->value);
dq = (float)(slope * (s->last_q - s->q));
} else {
dq = 0.; // we're done?!
}
// Limit variable to avoid large swings.
s->dq = Clamp(dq, -30.f, 30.f);
s->last_q = s->q;
s->last_value = s->value;
s->q = Clamp(s->q + s->dq, s->qmin, s->qmax);
return s->q;
}
|
cmpl $0x0, (%rdi)
movsd 0x18(%rdi), %xmm0
je 0x3ae9e
ucomisd 0x28(%rdi), %xmm0
movss 0x4(%rdi), %xmm1
jbe 0x3ae96
xorps 0x1991a(%rip), %xmm1 # 0x547b0
movl $0x0, (%rdi)
jmp 0x3aed8
movsd 0x20(%rdi), %xmm1
ucomisd %xmm1, %xmm0
jne 0x3aeab
jnp 0x3aed4
movsd 0x28(%rdi), %xmm2
subsd %xmm0, %xmm2
subsd %xmm0, %xmm1
divsd %xmm1, %xmm2
movss 0xc(%rdi), %xmm1
subss 0x8(%rdi), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm2, %xmm1
cvtsd2ss %xmm1, %xmm1
jmp 0x3aed8
xorpd %xmm1, %xmm1
movss 0x1d82c(%rip), %xmm2 # 0x5870c
minss %xmm1, %xmm2
movss 0x1d824(%rip), %xmm3 # 0x58710
cmpltss %xmm3, %xmm1
movaps %xmm1, %xmm4
andnps %xmm2, %xmm4
andps %xmm3, %xmm1
orps %xmm4, %xmm1
movss %xmm1, 0x4(%rdi)
movss 0x8(%rdi), %xmm2
movss 0x10(%rdi), %xmm3
movss %xmm2, 0xc(%rdi)
movsd %xmm0, 0x20(%rdi)
addss %xmm2, %xmm1
movss 0x14(%rdi), %xmm0
minss %xmm1, %xmm0
cmpltss %xmm3, %xmm1
movaps %xmm1, %xmm2
andnps %xmm0, %xmm2
andps %xmm3, %xmm1
orps %xmm2, %xmm1
movss %xmm1, 0x8(%rdi)
retq
|
/PKRoma[P]libwebp/src/enc/frame_enc.c
|
PutCoeffs
|
static int PutCoeffs(VP8BitWriter* const bw, int ctx, const VP8Residual* res) {
int n = res->first;
// should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1
const uint8_t* p = res->prob[n][ctx];
if (!VP8PutBit(bw, res->last >= 0, p[0])) {
return 0;
}
while (n < 16) {
const int c = res->coeffs[n++];
const int sign = c < 0;
int v = sign ? -c : c;
if (!VP8PutBit(bw, v != 0, p[1])) {
p = res->prob[VP8EncBands[n]][0];
continue;
}
if (!VP8PutBit(bw, v > 1, p[2])) {
p = res->prob[VP8EncBands[n]][1];
} else {
if (!VP8PutBit(bw, v > 4, p[3])) {
if (VP8PutBit(bw, v != 2, p[4])) {
VP8PutBit(bw, v == 4, p[5]);
}
} else if (!VP8PutBit(bw, v > 10, p[6])) {
if (!VP8PutBit(bw, v > 6, p[7])) {
VP8PutBit(bw, v == 6, 159);
} else {
VP8PutBit(bw, v >= 9, 165);
VP8PutBit(bw, !(v & 1), 145);
}
} else {
int mask;
const uint8_t* tab;
if (v < 3 + (8 << 1)) { // VP8Cat3 (3b)
VP8PutBit(bw, 0, p[8]);
VP8PutBit(bw, 0, p[9]);
v -= 3 + (8 << 0);
mask = 1 << 2;
tab = VP8Cat3;
} else if (v < 3 + (8 << 2)) { // VP8Cat4 (4b)
VP8PutBit(bw, 0, p[8]);
VP8PutBit(bw, 1, p[9]);
v -= 3 + (8 << 1);
mask = 1 << 3;
tab = VP8Cat4;
} else if (v < 3 + (8 << 3)) { // VP8Cat5 (5b)
VP8PutBit(bw, 1, p[8]);
VP8PutBit(bw, 0, p[10]);
v -= 3 + (8 << 2);
mask = 1 << 4;
tab = VP8Cat5;
} else { // VP8Cat6 (11b)
VP8PutBit(bw, 1, p[8]);
VP8PutBit(bw, 1, p[10]);
v -= 3 + (8 << 3);
mask = 1 << 10;
tab = VP8Cat6;
}
while (mask) {
VP8PutBit(bw, !!(v & mask), *tab++);
mask >>= 1;
}
}
p = res->prob[VP8EncBands[n]][2];
}
VP8PutBitUniform(bw, sign);
if (n == 16 || !VP8PutBit(bw, n <= res->last, p[0])) {
return 1; // EOB
}
}
return 1;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rdx, %r13
movq %rdi, %r14
movslq (%rdx), %r15
movslq %esi, %rax
leaq (%rax,%rax,4), %rcx
leaq (%rax,%rcx,2), %rbx
movq %r15, %r12
shlq $0x5, %r12
addq %r15, %r12
addq 0x18(%rdx), %r12
movl 0x4(%rdx), %esi
notl %esi
shrl $0x1f, %esi
movzbl (%r12,%rbx), %edx
callq 0x44208
testl %eax, %eax
je 0x3b366
movq %r14, 0x10(%rsp)
movl $0x1, (%rsp)
cmpl $0xf, %r15d
jg 0x3b36d
addq %rbx, %r12
movq %r13, 0x8(%rsp)
movq 0x8(%r13), %rax
movq %r13, %r14
leaq 0x1(%r15), %r13
movzwl (%rax,%r15,2), %eax
xorl %esi, %esi
testl %eax, %eax
setne %sil
movl %eax, %ebx
negw %bx
movl %eax, (%rsp)
cmovsw %ax, %bx
movzbl 0x1(%r12), %edx
movq 0x10(%rsp), %rbp
movq %rbp, %rdi
callq 0x44208
testl %eax, %eax
je 0x3b134
movq %r13, 0x18(%rsp)
xorl %esi, %esi
cmpw $0x2, %bx
setae %sil
movzbl 0x2(%r12), %edx
movq %rbp, %rdi
callq 0x44208
testl %eax, %eax
movq %r15, 0x20(%rsp)
je 0x3b159
movzwl %bx, %r13d
xorl %esi, %esi
cmpw $0x5, %bx
setae %sil
movzbl 0x3(%r12), %edx
movq %rbp, %rdi
callq 0x44208
testl %eax, %eax
je 0x3b163
movl %r13d, 0x4(%rsp)
xorl %esi, %esi
cmpw $0xb, %bx
setae %sil
movzbl 0x6(%r12), %edx
movq %rbp, %rdi
callq 0x44208
testl %eax, %eax
movq %rbp, %r14
je 0x3b1a5
cmpw $0x12, %bx
ja 0x3b1f4
movzbl 0x8(%r12), %edx
movq %r14, %rdi
xorl %esi, %esi
callq 0x44208
movzbl 0x9(%r12), %edx
movq %r14, %rdi
xorl %esi, %esi
callq 0x44208
movl $0x4, %r13d
movl $0xfffffff5, %r12d # imm = 0xFFFFFFF5
leaq 0x1d5c5(%rip), %r15 # 0x586f4
jmp 0x3b2a7
leaq 0x1d245(%rip), %rax # 0x58380
movzbl (%rax,%r13), %eax
movl %eax, %r12d
shll $0x5, %r12d
addq %rax, %r12
addq 0x18(%r14), %r12
movq %r13, %rax
movq %r14, %r13
jmp 0x3b350
movl $0x1, %ebx
jmp 0x3b2dc
xorl %esi, %esi
cmpl $0x2, %r13d
setne %sil
movzbl 0x4(%r12), %edx
movq %rbp, %rdi
callq 0x44208
movl $0x2, %ebx
testl %eax, %eax
je 0x3b2dc
xorl %esi, %esi
cmpl $0x4, %r13d
sete %sil
movzbl 0x5(%r12), %edx
movq %rbp, %rdi
callq 0x44208
jmp 0x3b2dc
xorl %esi, %esi
cmpw $0x7, %bx
setae %sil
movzbl 0x7(%r12), %edx
movq %r14, %rdi
callq 0x44208
testl %eax, %eax
je 0x3b270
movl 0x4(%rsp), %ebp
notl %ebp
xorl %esi, %esi
cmpw $0x9, %bx
setae %sil
movq %r14, %rdi
movl $0xa5, %edx
callq 0x44208
andl $0x1, %ebp
movq %r14, %rdi
movl %ebp, %esi
movl $0x91, %edx
jmp 0x3b283
cmpw $0x22, %bx
ja 0x3b232
movzbl 0x8(%r12), %edx
movq %r14, %rdi
xorl %esi, %esi
callq 0x44208
movzbl 0x9(%r12), %edx
movq %r14, %rdi
movl $0x1, %esi
callq 0x44208
movl $0x8, %r13d
movl $0xffffffed, %r12d # imm = 0xFFFFFFED
leaq 0x1d4c7(%rip), %r15 # 0x586f7
jmp 0x3b2a7
movzbl 0x8(%r12), %edx
movq %r14, %rdi
movl $0x1, %esi
callq 0x44208
movzbl 0xa(%r12), %edx
movq %r14, %rdi
cmpw $0x42, %bx
ja 0x3b28a
xorl %esi, %esi
callq 0x44208
movl $0x10, %r13d
movl $0xffffffdd, %r12d # imm = 0xFFFFFFDD
leaq 0x1d48d(%rip), %r15 # 0x586fb
jmp 0x3b2a7
xorl %esi, %esi
cmpl $0x6, 0x4(%rsp)
sete %sil
movq %r14, %rdi
movl $0x9f, %edx
callq 0x44208
jmp 0x3b2d2
movl $0x1, %esi
callq 0x44208
movl $0x400, %r13d # imm = 0x400
movl $0xffffffbd, %r12d # imm = 0xFFFFFFBD
leaq 0x1d459(%rip), %r15 # 0x58700
addl 0x4(%rsp), %r12d
movl %r13d, %ebx
xorl %esi, %esi
testl %r12d, %r13d
setne %sil
movzbl (%r15), %edx
incq %r15
movq %r14, %rdi
callq 0x44208
shrl %ebx
cmpl $0x1, %r13d
movl %ebx, %r13d
ja 0x3b2af
movl $0x2, %ebx
movq 0x8(%rsp), %r14
movl (%rsp), %esi
shrl $0xf, %esi
movq 0x18(%r14), %r12
leaq 0x1d093(%rip), %rax # 0x58380
movq 0x18(%rsp), %r13
movzbl (%rax,%r13), %r15d
movq 0x10(%rsp), %rbp
movq %rbp, %rdi
callq 0x442ee
movl $0x1, (%rsp)
cmpq $0x10, %r13
je 0x3b36d
leaq (%rbx,%rbx,4), %rax
leaq (%rbx,%rax,2), %rbx
movl %r15d, %eax
shll $0x5, %eax
addq %r15, %r12
addq %rax, %r12
movslq 0x4(%r14), %rax
xorl %esi, %esi
cmpq %rax, 0x20(%rsp)
setl %sil
movzbl (%r12,%rbx), %edx
movq %rbp, %rdi
callq 0x44208
testl %eax, %eax
je 0x3b36d
movq %r14, %r13
addq %rbx, %r12
movq 0x18(%rsp), %rax
movq %rax, %r15
cmpq $0x10, %rax
jne 0x3b03d
movl $0x1, (%rsp)
jmp 0x3b36d
movl $0x0, (%rsp)
movl (%rsp), %eax
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/PKRoma[P]libwebp/src/enc/frame_enc.c
|
InitLeft
|
static void InitLeft(VP8EncIterator* const it) {
it->y_left_[-1] = it->u_left_[-1] = it->v_left_[-1] =
(it->y_ > 0) ? 129 : 127;
memset(it->y_left_, 129, 16);
memset(it->u_left_, 129, 8);
memset(it->v_left_, 129, 8);
it->left_nz_[8] = 0;
if (it->top_derr_ != NULL) {
memset(&it->left_derr_, 0, sizeof(it->left_derr_));
}
}
|
xorl %eax, %eax
cmpl $0x0, 0x4(%rdi)
setg %al
leal 0x7f(,%rax,2), %eax
movq 0x178(%rdi), %rcx
movb %al, -0x1(%rcx)
movq 0x170(%rdi), %rcx
movb %al, -0x1(%rcx)
movq 0x168(%rdi), %rcx
movb %al, -0x1(%rcx)
movq 0x168(%rdi), %rax
movaps 0x1d09e(%rip), %xmm0 # 0x584d0
movups %xmm0, (%rax)
movq 0x170(%rdi), %rax
movabsq $-0x7e7e7e7e7e7e7e7f, %rcx # imm = 0x8181818181818181
movq %rcx, (%rax)
movq 0x178(%rdi), %rax
movq %rcx, (%rax)
movl $0x0, 0xc8(%rdi)
cmpq $0x0, 0x160(%rdi)
je 0x3b471
movl $0x0, 0x158(%rdi)
retq
|
/PKRoma[P]libwebp/src/enc/iterator_enc.c
|
VP8IteratorInit
|
void VP8IteratorInit(VP8Encoder* const enc, VP8EncIterator* const it) {
it->enc_ = enc;
it->yuv_in_ = (uint8_t*)WEBP_ALIGN(it->yuv_mem_);
it->yuv_out_ = it->yuv_in_ + YUV_SIZE_ENC;
it->yuv_out2_ = it->yuv_out_ + YUV_SIZE_ENC;
it->yuv_p_ = it->yuv_out2_ + YUV_SIZE_ENC;
it->lf_stats_ = enc->lf_stats_;
it->percent0_ = enc->percent_;
it->y_left_ = (uint8_t*)WEBP_ALIGN(it->yuv_left_mem_ + 1);
it->u_left_ = it->y_left_ + 16 + 16;
it->v_left_ = it->u_left_ + 16;
it->top_derr_ = enc->top_derr_;
VP8IteratorReset(it);
}
|
movq %rdi, 0x28(%rsi)
leaq 0x207(%rsi), %rax
andq $-0x20, %rax
movq %rax, 0x8(%rsi)
leaq 0x200(%rax), %rcx
movq %rcx, 0x10(%rsi)
leaq 0x400(%rax), %rcx
movq %rcx, 0x18(%rsi)
addq $0x600, %rax # imm = 0x600
movq %rax, 0x20(%rsi)
movq 0x5c88(%rdi), %rax
movq %rax, 0x140(%rsi)
movl 0x218(%rdi), %eax
movl %eax, 0x154(%rsi)
leaq 0x1b0(%rsi), %rax
andq $-0x20, %rax
movq %rax, 0x168(%rsi)
leaq 0x20(%rax), %rcx
movq %rcx, 0x170(%rsi)
addq $0x30, %rax
movq %rax, 0x178(%rsi)
movq 0x5c90(%rdi), %rax
movq %rax, 0x160(%rsi)
movq %rsi, %rdi
jmp 0x3b472
|
/PKRoma[P]libwebp/src/enc/iterator_enc.c
|
VP8IteratorNzToBytes
|
void VP8IteratorNzToBytes(VP8EncIterator* const it) {
const int tnz = it->nz_[0], lnz = it->nz_[-1];
int* const top_nz = it->top_nz_;
int* const left_nz = it->left_nz_;
// Top-Y
top_nz[0] = BIT(tnz, 12);
top_nz[1] = BIT(tnz, 13);
top_nz[2] = BIT(tnz, 14);
top_nz[3] = BIT(tnz, 15);
// Top-U
top_nz[4] = BIT(tnz, 18);
top_nz[5] = BIT(tnz, 19);
// Top-V
top_nz[6] = BIT(tnz, 22);
top_nz[7] = BIT(tnz, 23);
// DC
top_nz[8] = BIT(tnz, 24);
// left-Y
left_nz[0] = BIT(lnz, 3);
left_nz[1] = BIT(lnz, 7);
left_nz[2] = BIT(lnz, 11);
left_nz[3] = BIT(lnz, 15);
// left-U
left_nz[4] = BIT(lnz, 17);
left_nz[5] = BIT(lnz, 19);
// left-V
left_nz[6] = BIT(lnz, 21);
left_nz[7] = BIT(lnz, 23);
// left-DC is special, iterated separately
}
|
movq 0x48(%rdi), %rcx
movl -0x4(%rcx), %eax
movl (%rcx), %ecx
movl %ecx, %edx
shrl $0xc, %edx
andl $0x1, %edx
movl %edx, 0x84(%rdi)
movl %ecx, %edx
shrl $0xd, %edx
andl $0x1, %edx
movl %edx, 0x88(%rdi)
movl %ecx, %edx
shrl $0xe, %edx
andl $0x1, %edx
movl %edx, 0x8c(%rdi)
movl %ecx, %edx
shrl $0xf, %edx
andl $0x1, %edx
movl %edx, 0x90(%rdi)
movl %ecx, %edx
shrl $0x12, %edx
andl $0x1, %edx
movl %edx, 0x94(%rdi)
movl %ecx, %edx
shrl $0x13, %edx
andl $0x1, %edx
movl %edx, 0x98(%rdi)
movl %ecx, %edx
shrl $0x16, %edx
andl $0x1, %edx
movl %edx, 0x9c(%rdi)
movl %ecx, %edx
shrl $0x17, %edx
andl $0x1, %edx
movl %edx, 0xa0(%rdi)
shrl $0x18, %ecx
andl $0x1, %ecx
movl %ecx, 0xa4(%rdi)
movl %eax, %ecx
shrl $0x3, %ecx
andl $0x1, %ecx
movl %ecx, 0xa8(%rdi)
movl %eax, %ecx
shrl $0x7, %ecx
andl $0x1, %ecx
movl %ecx, 0xac(%rdi)
movl %eax, %ecx
shrl $0xb, %ecx
andl $0x1, %ecx
movl %ecx, 0xb0(%rdi)
movl %eax, %ecx
shrl $0xf, %ecx
andl $0x1, %ecx
movl %ecx, 0xb4(%rdi)
movl %eax, %ecx
shrl $0x11, %ecx
andl $0x1, %ecx
movl %ecx, 0xb8(%rdi)
movl %eax, %ecx
shrl $0x13, %ecx
andl $0x1, %ecx
movl %ecx, 0xbc(%rdi)
movl %eax, %ecx
shrl $0x15, %ecx
andl $0x1, %ecx
movl %ecx, 0xc0(%rdi)
shrl $0x17, %eax
andl $0x1, %eax
movl %eax, 0xc4(%rdi)
retq
|
/PKRoma[P]libwebp/src/enc/iterator_enc.c
|
VP8IteratorSaveBoundary
|
void VP8IteratorSaveBoundary(VP8EncIterator* const it) {
VP8Encoder* const enc = it->enc_;
const int x = it->x_, y = it->y_;
const uint8_t* const ysrc = it->yuv_out_ + Y_OFF_ENC;
const uint8_t* const uvsrc = it->yuv_out_ + U_OFF_ENC;
if (x < enc->mb_w_ - 1) { // left
int i;
for (i = 0; i < 16; ++i) {
it->y_left_[i] = ysrc[15 + i * BPS];
}
for (i = 0; i < 8; ++i) {
it->u_left_[i] = uvsrc[7 + i * BPS];
it->v_left_[i] = uvsrc[15 + i * BPS];
}
// top-left (before 'top'!)
it->y_left_[-1] = it->y_top_[15];
it->u_left_[-1] = it->uv_top_[0 + 7];
it->v_left_[-1] = it->uv_top_[8 + 7];
}
if (y < enc->mb_h_ - 1) { // top
memcpy(it->y_top_, ysrc + 15 * BPS, 16);
memcpy(it->uv_top_, uvsrc + 7 * BPS, 8 + 8);
}
}
|
movq 0x28(%rdi), %rdx
movl 0x4(%rdi), %ecx
movq 0x10(%rdi), %rax
movl 0x30(%rdx), %esi
decl %esi
cmpl %esi, (%rdi)
jge 0x3bed8
leaq 0xf(%rax), %rsi
xorl %r8d, %r8d
movb (%rsi), %r9b
movq 0x168(%rdi), %r10
movb %r9b, (%r10,%r8)
incq %r8
addq $0x20, %rsi
cmpq $0x10, %r8
jne 0x3be4a
leaq 0x1f(%rax), %rsi
xorl %r8d, %r8d
movb -0x8(%rsi), %r9b
movq 0x170(%rdi), %r10
movb %r9b, (%r10,%r8)
movb (%rsi), %r9b
movq 0x178(%rdi), %r10
movb %r9b, (%r10,%r8)
incq %r8
addq $0x20, %rsi
cmpq $0x8, %r8
jne 0x3be6c
movq 0x168(%rdi), %rsi
movq 0x180(%rdi), %r8
movb 0xf(%r8), %r8b
movb %r8b, -0x1(%rsi)
movq 0x170(%rdi), %rsi
movq 0x188(%rdi), %r8
movb 0x7(%r8), %r8b
movb %r8b, -0x1(%rsi)
movq 0x178(%rdi), %rsi
movq 0x188(%rdi), %r8
movb 0xf(%r8), %r8b
movb %r8b, -0x1(%rsi)
movl 0x34(%rdx), %edx
decl %edx
cmpl %edx, %ecx
jge 0x3bf03
movq 0x180(%rdi), %rcx
movups 0x1e0(%rax), %xmm0
movups %xmm0, (%rcx)
movq 0x188(%rdi), %rcx
movups 0xf0(%rax), %xmm0
movups %xmm0, (%rcx)
retq
|
/PKRoma[P]libwebp/src/enc/iterator_enc.c
|
VP8SetIntra16Mode
|
void VP8SetIntra16Mode(const VP8EncIterator* const it, int mode) {
uint8_t* preds = it->preds_;
int y;
for (y = 0; y < 4; ++y) {
memset(preds, mode, 4);
preds += it->enc_->preds_w_;
}
it->mb_->type_ = 1;
}
|
movq 0x40(%rdi), %rax
movl $0x4, %ecx
movzbl %sil, %edx
imull $0x1010101, %edx, %edx # imm = 0x1010101
movl %edx, (%rax)
movq 0x28(%rdi), %rsi
movslq 0x38(%rsi), %rsi
addq %rsi, %rax
decl %ecx
jne 0x3bf84
movq 0x30(%rdi), %rax
movb (%rax), %cl
andb $-0x4, %cl
incb %cl
movb %cl, (%rax)
retq
|
/PKRoma[P]libwebp/src/enc/iterator_enc.c
|
VP8IteratorStartI4
|
void VP8IteratorStartI4(VP8EncIterator* const it) {
const VP8Encoder* const enc = it->enc_;
int i;
it->i4_ = 0; // first 4x4 sub-block
it->i4_top_ = it->i4_boundary_ + VP8TopLeftI4[0];
// Import the boundary samples
for (i = 0; i < 17; ++i) { // left
it->i4_boundary_[i] = it->y_left_[15 - i];
}
for (i = 0; i < 16; ++i) { // top
it->i4_boundary_[17 + i] = it->y_top_[i];
}
// top-right samples have a special case on the far right of the picture
if (it->x_ < enc->mb_w_ - 1) {
for (i = 16; i < 16 + 4; ++i) {
it->i4_boundary_[17 + i] = it->y_top_[i];
}
} else { // else, replicate the last valid pixel four times
for (i = 16; i < 16 + 4; ++i) {
it->i4_boundary_[17 + i] = it->i4_boundary_[17 + 15];
}
}
VP8IteratorNzToBytes(it); // import the non-zero context
}
|
movq 0x28(%rdi), %rax
movl $0x0, 0x80(%rdi)
leaq 0x50(%rdi), %rcx
leaq 0x61(%rdi), %rdx
movq %rdx, 0x78(%rdi)
movl $0x11, %edx
movq 0x168(%rdi), %rsi
movb -0x2(%rsi,%rdx), %sil
movb %sil, (%rcx)
incq %rcx
decq %rdx
jne 0x3c032
xorl %ecx, %ecx
movq 0x180(%rdi), %rdx
movb (%rdx,%rcx), %dl
movb %dl, 0x61(%rdi,%rcx)
incq %rcx
cmpq $0x10, %rcx
jne 0x3c04b
movl 0x30(%rax), %eax
decl %eax
cmpl %eax, (%rdi)
jge 0x3c08a
xorl %eax, %eax
movq 0x180(%rdi), %rcx
movb 0x10(%rcx,%rax), %cl
movb %cl, 0x71(%rdi,%rax)
incq %rax
cmpq $0x4, %rax
jne 0x3c06d
jmp 0x3bc74
movd 0x70(%rdi), %xmm0
punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
pshuflw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0,4,5,6,7]
movd %xmm0, 0x71(%rdi)
jmp 0x3bc74
|
/PKRoma[P]libwebp/src/enc/iterator_enc.c
|
VP8SetSegmentParams
|
void VP8SetSegmentParams(VP8Encoder* const enc, float quality) {
int i;
int dq_uv_ac, dq_uv_dc;
const int num_segments = enc->segment_hdr_.num_segments_;
const double amp = SNS_TO_DQ * enc->config_->sns_strength / 100. / 128.;
const double Q = quality / 100.;
const double c_base = enc->config_->emulate_jpeg_size ?
QualityToJPEGCompression(Q, enc->alpha_ / 255.) :
QualityToCompression(Q);
for (i = 0; i < num_segments; ++i) {
// We modulate the base coefficient to accommodate for the quantization
// susceptibility and allow denser segments to be quantized more.
const double expn = 1. - amp * enc->dqm_[i].alpha_;
const double c = pow(c_base, expn);
const int q = (int)(127. * (1. - c));
assert(expn > 0.);
enc->dqm_[i].quant_ = clip(q, 0, 127);
}
// purely indicative in the bitstream (except for the 1-segment case)
enc->base_quant_ = enc->dqm_[0].quant_;
// fill-in values for the unused segments (required by the syntax)
for (i = num_segments; i < NUM_MB_SEGMENTS; ++i) {
enc->dqm_[i].quant_ = enc->base_quant_;
}
// uv_alpha_ is normally spread around ~60. The useful range is
// typically ~30 (quite bad) to ~100 (ok to decimate UV more).
// We map it to the safe maximal range of MAX/MIN_DQ_UV for dq_uv.
dq_uv_ac = (enc->uv_alpha_ - MID_ALPHA) * (MAX_DQ_UV - MIN_DQ_UV)
/ (MAX_ALPHA - MIN_ALPHA);
// we rescale by the user-defined strength of adaptation
dq_uv_ac = dq_uv_ac * enc->config_->sns_strength / 100;
// and make it safe.
dq_uv_ac = clip(dq_uv_ac, MIN_DQ_UV, MAX_DQ_UV);
// We also boost the dc-uv-quant a little, based on sns-strength, since
// U/V channels are quite more reactive to high quants (flat DC-blocks
// tend to appear, and are unpleasant).
dq_uv_dc = -4 * enc->config_->sns_strength / 100;
dq_uv_dc = clip(dq_uv_dc, -15, 15); // 4bit-signed max allowed
enc->dq_y1_dc_ = 0; // TODO(skal): dq-lum
enc->dq_y2_dc_ = 0;
enc->dq_y2_ac_ = 0;
enc->dq_uv_dc_ = dq_uv_dc;
enc->dq_uv_ac_ = dq_uv_ac;
SetupFilterStrength(enc); // initialize segments' filtering, eventually
if (num_segments > 1) SimplifySegments(enc);
SetupMatrices(enc); // finalize quantization matrices
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movq %rdi, %rbx
movl 0x20(%rdi), %r13d
movq (%rdi), %rax
cvtsi2sdl 0x1c(%rax), %xmm1
movsd %xmm1, 0x8(%rsp)
cvtss2sd %xmm0, %xmm0
divsd 0x14ebd(%rip), %xmm0 # 0x51028
cmpl $0x0, 0x50(%rax)
je 0x3c198
xorps %xmm1, %xmm1
cvtsi2sdl 0xe04(%rbx), %xmm1
divsd 0x19ac4(%rip), %xmm1 # 0x55c48
ucomisd 0x1c5dc(%rip), %xmm1 # 0x58768
jbe 0x3c1d3
movsd 0x1c52a(%rip), %xmm1 # 0x586c0
jmp 0x3c208
movsd 0x1c5e8(%rip), %xmm1 # 0x58788
mulsd %xmm0, %xmm1
movapd %xmm0, %xmm2
addsd %xmm0, %xmm2
addsd 0x1c5dc(%rip), %xmm2 # 0x58790
cmpltsd 0x1c5db(%rip), %xmm0 # 0x58798
andpd %xmm0, %xmm1
andnpd %xmm2, %xmm0
orpd %xmm1, %xmm0
movsd 0x1c5cf(%rip), %xmm1 # 0x587a0
jmp 0x3c208
movsd 0x1c595(%rip), %xmm2 # 0x58770
addsd %xmm1, %xmm2
mulsd 0x1c591(%rip), %xmm2 # 0x58778
movsd 0x1c571(%rip), %xmm3 # 0x58760
cmpltsd 0x1c588(%rip), %xmm1 # 0x58780
addsd %xmm3, %xmm2
andpd %xmm1, %xmm3
andnpd %xmm2, %xmm1
orpd %xmm3, %xmm1
callq 0x4590
movapd %xmm0, %xmm2
testl %r13d, %r13d
movsd 0x8(%rsp), %xmm0
jle 0x3c2db
mulsd 0x1c538(%rip), %xmm0 # 0x58760
divsd 0x14df8(%rip), %xmm0 # 0x51028
mulsd 0x1c570(%rip), %xmm0 # 0x587a8
imulq $0x2e8, %r13, %r14 # imm = 0x2E8
xorl %r15d, %r15d
movl $0x7f, %ebp
xorl %r12d, %r12d
movsd %xmm0, 0x8(%rsp)
movsd %xmm2, 0x20(%rsp)
xorps %xmm1, %xmm1
cvtsi2sdl 0x500(%rbx,%r12), %xmm1
mulsd %xmm0, %xmm1
movsd 0x1a9e9(%rip), %xmm0 # 0x56c58
addsd %xmm0, %xmm1
movapd %xmm2, %xmm0
callq 0x4590
movsd 0x20(%rsp), %xmm2
movsd 0x1a9ce(%rip), %xmm1 # 0x56c58
subsd %xmm0, %xmm1
movsd 0x8(%rsp), %xmm0
mulsd 0x1c514(%rip), %xmm1 # 0x587b0
cvttsd2si %xmm1, %eax
cmpl $0x7f, %eax
cmovgel %ebp, %eax
testl %eax, %eax
cmovlel %r15d, %eax
movl %eax, 0x508(%rbx,%r12)
addq $0x2e8, %r12 # imm = 0x2E8
cmpq %r12, %r14
jne 0x3c256
leaq 0x260(%rbx), %r14
movl 0x508(%rbx), %eax
movl %eax, 0xe00(%rbx)
cmpl $0x3, %r13d
jle 0x3c2ee
jmp 0x3c31c
leaq 0x260(%rbx), %r14
movl 0x508(%rbx), %eax
movl %eax, 0xe00(%rbx)
movslq %r13d, %rcx
imulq $0x2e8, %rcx, %rdx # imm = 0x2E8
addq %rbx, %rdx
addq $0x508, %rdx # imm = 0x508
incq %rcx
movq %rcx, %rsi
movl %eax, (%rdx)
addq $0x2e8, %rdx # imm = 0x2E8
incq %rsi
cmpl $0x4, %ecx
movq %rsi, %rcx
jne 0x3c308
movq %r13, 0x8(%rsp)
movl 0xe08(%rbx), %eax
leal (%rax,%rax,4), %eax
leal -0x280(,%rax,2), %ecx
movslq %ecx, %rcx
imulq $-0x15f15f15, %rcx, %rcx # imm = 0xEA0EA0EB
shrq $0x20, %rcx
leal (%rcx,%rax,2), %ecx
addl $0xfffffd80, %ecx # imm = 0xFFFFFD80
movl %ecx, %eax
shrl $0x1f, %eax
sarl $0x6, %ecx
addl %eax, %ecx
movq (%rbx), %rax
movslq 0x1c(%rax), %rdx
movl %edx, %esi
imull %ecx, %esi
movslq %esi, %rcx
imulq $0x51eb851f, %rcx, %rcx # imm = 0x51EB851F
movq %rcx, %rsi
shrq $0x3f, %rsi
sarq $0x25, %rcx
addl %esi, %ecx
cmpl $0x6, %ecx
movl $0x6, %esi
cmovll %ecx, %esi
cmpl $-0x3, %esi
movl $0xfffffffc, %ecx # imm = 0xFFFFFFFC
cmovgel %esi, %ecx
imulq $-0x51eb851f, %rdx, %rdx # imm = 0xAE147AE1
movq %rdx, %rsi
shrq $0x3f, %rsi
sarq $0x23, %rdx
addl %esi, %edx
cmpl $0xf, %edx
movl $0xf, %esi
cmovll %edx, %esi
cmpl $-0xe, %esi
movl $0xfffffff1, %edx # imm = 0xFFFFFFF1
cmovgel %esi, %edx
movq $0x0, 0xe0c(%rbx)
movl $0x0, 0xe14(%rbx)
movl %edx, 0xe18(%rbx)
movl %ecx, 0xe1c(%rbx)
movl 0x20(%rax), %eax
leal (%rax,%rax,4), %ebp
movl $0x2ac, %r12d # imm = 0x2AC
xorl %r15d, %r15d
movl $0x100, %r13d # imm = 0x100
movl -0x4(%r14,%r12), %eax
cmpl $0x7f, %eax
movl $0x7f, %ecx
cmovgel %ecx, %eax
testl %eax, %eax
cmovlel %r15d, %eax
leaq 0x1c408(%rip), %rcx # 0x58810
movzwl (%rcx,%rax,2), %esi
shrl $0x2, %esi
movl 0x18(%rbx), %edi
callq 0x4a1d4
leaq 0x1c3f2(%rip), %rsi # 0x58810
imull %ebp, %eax
movl -0x8(%r14,%r12), %ecx
addl %r13d, %ecx
cltd
idivl %ecx
cmpl $0x3f, %eax
movl $0x3f, %ecx
cmovll %eax, %ecx
cmpl $0x2, %eax
cmovll %r15d, %ecx
movl %ecx, (%r14,%r12)
addq $0x2e8, %r12 # imm = 0x2E8
cmpq $0xe4c, %r12 # imm = 0xE4C
jne 0x3c3eb
movl 0x50c(%rbx), %eax
movl %eax, 0x14(%rbx)
movq (%rbx), %rax
xorl %ecx, %ecx
cmpl $0x0, 0x28(%rax)
sete %cl
movl %ecx, 0x10(%rbx)
movl 0x24(%rax), %eax
movl %eax, 0x18(%rbx)
cmpl $0x2, 0x8(%rsp)
movq %rbx, 0x8(%rsp)
jl 0x3c5da
movapd 0x1c638(%rip), %xmm0 # 0x58ac0
movapd %xmm0, 0x10(%rsp)
movl 0x20(%rbx), %eax
cmpl $0x4, %eax
movl $0x4, %ebx
cmovll %eax, %ebx
movl $0x1, %r12d
cmpl $0x2, %eax
jl 0x3c538
movl %ebx, %r15d
leaq 0x2ac(%r14), %r13
movl $0x1, %r12d
movl $0x1, %ebp
movl %r12d, %eax
imulq $0x2e8, %rbp, %rsi # imm = 0x2E8
addq %r14, %rsi
testl %r12d, %r12d
jle 0x3c502
movl 0x2a8(%rsi), %ecx
movq %r13, %rdi
xorl %edx, %edx
cmpl -0x4(%rdi), %ecx
jne 0x3c4ee
movl 0x2ac(%rsi), %r8d
cmpl (%rdi), %r8d
je 0x3c529
incq %rdx
addq $0x2e8, %rdi # imm = 0x2E8
cmpq %rdx, %rax
jne 0x3c4dd
movl %r12d, %ecx
jmp 0x3c504
xorl %ecx, %ecx
movl %ecx, 0x10(%rsp,%rbp,4)
cmpq %rbp, %rax
je 0x3c524
movslq %r12d, %rax
imulq $0x2e8, %rax, %rdi # imm = 0x2E8
addq %r14, %rdi
movl $0x2e8, %edx # imm = 0x2E8
callq 0x45d0
incl %r12d
jmp 0x3c530
movl %edx, 0x10(%rsp,%rbp,4)
movl %eax, %r12d
incq %rbp
cmpq %r15, %rbp
jne 0x3c4c0
cmpl %ebx, %r12d
jge 0x3c5ce
movq 0x8(%rsp), %rdi
movl 0x34(%rdi), %eax
imull 0x30(%rdi), %eax
testl %eax, %eax
jle 0x3c58a
movl %eax, %eax
incq %rax
movq 0x5c60(%rdi), %rcx
movzbl -0x8(%rcx,%rax,4), %edx
movl %edx, %esi
shrl $0x3, %esi
andl $0xc, %esi
movb 0x10(%rsp,%rsi), %sil
shlb $0x5, %sil
andb $0x60, %sil
andb $-0x61, %dl
orb %sil, %dl
movb %dl, -0x8(%rcx,%rax,4)
decq %rax
cmpq $0x1, %rax
ja 0x3c556
movl %r12d, 0x20(%rdi)
leal -0x1(%r12), %eax
cltq
imulq $0x2e8, %rax, %r15 # imm = 0x2E8
addq %r14, %r15
movslq %r12d, %rax
movslq %ebx, %r13
subq %rax, %r13
imulq $0x2e8, %rax, %r12 # imm = 0x2E8
addq %r14, %r12
movl $0x2e8, %edx # imm = 0x2E8
movq %r12, %rdi
movq %r15, %rsi
callq 0x45d0
addq $0x2e8, %r12 # imm = 0x2E8
decq %r13
jne 0x3c5b2
leaq 0x1c23b(%rip), %rsi # 0x58810
movq 0x8(%rsp), %rbx
xorl %eax, %eax
cmpl $0x4, 0x5c40(%rbx)
jl 0x3c5eb
movq (%rbx), %rax
movl 0x1c(%rax), %eax
movslq 0x20(%rbx), %r13
testq %r13, %r13
jle 0x3c858
movl %eax, %ebx
addq $0x1c0, %r14 # imm = 0x1C0
movl $0x7f, %r12d
movl 0xe8(%r14), %eax
movq 0x8(%rsp), %r8
movl 0xe0c(%r8), %ecx
addl %eax, %ecx
cmpl %r12d, %ecx
cmovgel %r12d, %ecx
testl %ecx, %ecx
movl $0x0, %r9d
cmovlel %r9d, %ecx
cmpl %r12d, %eax
movl $0x7f, %edx
cmovll %eax, %edx
leaq 0x1c2cf(%rip), %r10 # 0x58910
movzbl (%rcx,%r10), %ecx
testl %edx, %edx
cmovlel %r9d, %edx
movw %cx, -0x1c0(%r14)
movzwl (%rsi,%rdx,2), %ecx
movw %cx, -0x1be(%r14)
movl 0xe10(%r8), %ecx
addl %eax, %ecx
cmpl %r12d, %ecx
cmovgel %r12d, %ecx
testl %ecx, %ecx
cmovlel %r9d, %ecx
movzbl (%rcx,%r10), %ecx
addl %ecx, %ecx
movw %cx, -0xe0(%r14)
movl 0xe14(%r8), %ecx
addl %eax, %ecx
cmpl %r12d, %ecx
cmovgel %r12d, %ecx
testl %ecx, %ecx
cmovlel %r9d, %ecx
leaq -0x1c0(%r14), %rdi
leaq 0x1c2e7(%rip), %rdx # 0x58990
movzwl (%rdx,%rcx,2), %ecx
movw %cx, -0xde(%r14)
movl 0xe18(%r8), %ecx
addl %eax, %ecx
cmpl $0x75, %ecx
movl $0x75, %edx
cmovgel %edx, %ecx
leaq -0xe0(%r14), %r15
testl %ecx, %ecx
cmovlel %r9d, %ecx
movzbl (%rcx,%r10), %ecx
movw %cx, (%r14)
addl 0xe1c(%r8), %eax
cmpl $0x7f, %eax
cmovgel %r12d, %eax
testl %eax, %eax
cmovlel %r9d, %eax
movzwl (%rsi,%rax,2), %eax
movw %ax, 0x2(%r14)
xorl %esi, %esi
callq 0x3d9aa
movl %eax, %ebp
movq %r15, %rdi
movl $0x1, %esi
callq 0x3d9aa
movl %eax, %r15d
movq %r14, %rdi
movl $0x2, %esi
callq 0x3d9aa
movl %ebp, %ecx
imull %ecx, %ecx
leal (%rcx,%rcx,2), %edi
sarl $0x7, %edi
movl %edi, 0xfc(%r14)
imull %r15d, %r15d
leal (%r15,%r15,2), %r8d
movl %r8d, 0xf8(%r14)
imull %eax, %eax
leal (%rax,%rax,2), %esi
sarl $0x6, %esi
movl %esi, 0x100(%r14)
movl %ecx, %edx
shrl $0x7, %edx
movl %edx, 0x104(%r14)
leal (,%rcx,8), %edx
subl %ecx, %edx
sarl $0x3, %edx
movl %edx, 0x114(%r14)
movl %r15d, %r9d
shrl $0x2, %r9d
movl %r9d, 0x110(%r14)
addl %eax, %eax
movl %eax, 0x118(%r14)
imull %ebx, %ebp
sarl $0x5, %ebp
movl %ebp, 0x10c(%r14)
testl %edi, %edi
jg 0x3c7a5
movl $0x1, 0xfc(%r14)
testl %r8d, %r8d
jg 0x3c7b5
movl $0x1, 0xf8(%r14)
testl %esi, %esi
jg 0x3c7c4
movl $0x1, 0x100(%r14)
cmpl $0x7f, %ecx
leaq 0x1c042(%rip), %rsi # 0x58810
ja 0x3c7db
movl $0x1, 0x104(%r14)
testl %edx, %edx
jg 0x3c7ea
movl $0x1, 0x114(%r14)
cmpl $0x3, %r15d
ja 0x3c7fb
movl $0x1, 0x110(%r14)
testl %eax, %eax
jg 0x3c80a
movl $0x1, 0x118(%r14)
testl %ebp, %ebp
jg 0x3c819
movl $0x1, 0x10c(%r14)
movzwl -0x1c0(%r14), %eax
shll $0x2, %eax
leal (%rax,%rax,4), %eax
movl %eax, 0xf4(%r14)
movl $0x0, 0xf0(%r14)
imull $0x3e8, %ecx, %eax # imm = 0x3E8
cltq
movq %rax, 0x120(%r14)
addq $0x2e8, %r14 # imm = 0x2E8
decq %r13
jne 0x3c607
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/enc/quant_enc.c
|
ReconstructIntra4
|
static int ReconstructIntra4(VP8EncIterator* const it,
int16_t levels[16],
const uint8_t* const src,
uint8_t* const yuv_out,
int mode) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode];
const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
int nz = 0;
int16_t tmp[16];
VP8FTransform(src, ref, tmp);
if (DO_TRELLIS_I4 && it->do_trellis_) {
const int x = it->i4_ & 3, y = it->i4_ >> 2;
const int ctx = it->top_nz_[x] + it->left_nz_[y];
nz = TrellisQuantizeBlock(enc, tmp, levels, ctx, TYPE_I4_AC, &dqm->y1_,
dqm->lambda_trellis_i4_);
} else {
nz = VP8EncQuantizeBlock(tmp, levels, &dqm->y1_);
}
VP8ITransform(ref, tmp, yuv_out, 0);
return nz;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x38, %rsp
movq %rcx, %rbx
movq %rdx, %rax
movq %rsi, %r15
movq %rdi, %rbp
movq 0x28(%rdi), %r12
movq 0x30(%rdi), %rcx
movslq %r8d, %rdx
leaq 0x1a53c(%rip), %rsi # 0x587d0
movzwl (%rsi,%rdx,2), %r14d
addq 0x20(%rdi), %r14
movzbl (%rcx), %ecx
shrl $0x5, %ecx
andl $0x3, %ecx
imulq $0x2e8, %rcx, %rcx # imm = 0x2E8
leaq (%r12,%rcx), %r13
addq $0x260, %r13 # imm = 0x260
leaq 0x2cc29(%rip), %rcx # 0x6aee8
leaq 0x10(%rsp), %rdx
movq %rax, %rdi
movq %r14, %rsi
callq *(%rcx)
cmpl $0x0, 0x148(%rbp)
je 0x3e318
movl 0x80(%rbp), %eax
movl %eax, %edx
andl $0x3, %edx
sarl $0x2, %eax
cltq
movl 0xa8(%rbp,%rax,4), %ecx
addl 0x84(%rbp,%rdx,4), %ecx
movl 0x2d4(%r13), %eax
movl %eax, (%rsp)
leaq 0x10(%rsp), %rsi
movq %r12, %rdi
movq %r15, %rdx
movl $0x3, %r8d
movq %r13, %r9
callq 0x3dd21
jmp 0x3e32c
leaq 0x2cc31(%rip), %rax # 0x6af50
leaq 0x10(%rsp), %rdi
movq %r15, %rsi
movq %r13, %rdx
callq *(%rax)
movl %eax, %ebp
leaq 0x2cbab(%rip), %rax # 0x6aee0
leaq 0x10(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
xorl %ecx, %ecx
callq *(%rax)
movl %ebp, %eax
addq $0x38, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/enc/quant_enc.c
|
VP8TBufferInit
|
void VP8TBufferInit(VP8TBuffer* const b, int page_size) {
b->tokens_ = NULL;
b->pages_ = NULL;
b->last_page_ = &b->pages_;
b->left_ = 0;
b->page_size_ = (page_size < MIN_PAGE_SIZE) ? MIN_PAGE_SIZE : page_size;
b->error_ = 0;
}
|
xorl %eax, %eax
movq %rax, 0x10(%rdi)
movq %rax, (%rdi)
movq %rdi, 0x8(%rdi)
movl %eax, 0x18(%rdi)
cmpl $0x2001, %esi # imm = 0x2001
movl $0x2000, %ecx # imm = 0x2000
cmovgel %esi, %ecx
movl %ecx, 0x1c(%rdi)
movl %eax, 0x20(%rdi)
retq
|
/PKRoma[P]libwebp/src/enc/token_enc.c
|
VP8RecordCoeffTokens
|
int VP8RecordCoeffTokens(int ctx, const struct VP8Residual* const res,
VP8TBuffer* const tokens) {
const int16_t* const coeffs = res->coeffs;
const int coeff_type = res->coeff_type;
const int last = res->last;
int n = res->first;
uint32_t base_id = TOKEN_ID(coeff_type, n, ctx);
// should be stats[VP8EncBands[n]], but it's equivalent for n=0 or 1
proba_t* s = res->stats[n][ctx];
if (!AddToken(tokens, last >= 0, base_id + 0, s + 0)) {
return 0;
}
while (n < 16) {
const int c = coeffs[n++];
const int sign = c < 0;
const uint32_t v = sign ? -c : c;
if (!AddToken(tokens, v != 0, base_id + 1, s + 1)) {
base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 0); // ctx=0
s = res->stats[VP8EncBands[n]][0];
continue;
}
if (!AddToken(tokens, v > 1, base_id + 2, s + 2)) {
base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 1); // ctx=1
s = res->stats[VP8EncBands[n]][1];
} else {
if (!AddToken(tokens, v > 4, base_id + 3, s + 3)) {
if (AddToken(tokens, v != 2, base_id + 4, s + 4)) {
AddToken(tokens, v == 4, base_id + 5, s + 5);
}
} else if (!AddToken(tokens, v > 10, base_id + 6, s + 6)) {
if (!AddToken(tokens, v > 6, base_id + 7, s + 7)) {
AddConstantToken(tokens, v == 6, 159);
} else {
AddConstantToken(tokens, v >= 9, 165);
AddConstantToken(tokens, !(v & 1), 145);
}
} else {
int mask;
const uint8_t* tab;
uint32_t residue = v - 3;
if (residue < (8 << 1)) { // VP8Cat3 (3b)
AddToken(tokens, 0, base_id + 8, s + 8);
AddToken(tokens, 0, base_id + 9, s + 9);
residue -= (8 << 0);
mask = 1 << 2;
tab = VP8Cat3;
} else if (residue < (8 << 2)) { // VP8Cat4 (4b)
AddToken(tokens, 0, base_id + 8, s + 8);
AddToken(tokens, 1, base_id + 9, s + 9);
residue -= (8 << 1);
mask = 1 << 3;
tab = VP8Cat4;
} else if (residue < (8 << 3)) { // VP8Cat5 (5b)
AddToken(tokens, 1, base_id + 8, s + 8);
AddToken(tokens, 0, base_id + 10, s + 9);
residue -= (8 << 2);
mask = 1 << 4;
tab = VP8Cat5;
} else { // VP8Cat6 (11b)
AddToken(tokens, 1, base_id + 8, s + 8);
AddToken(tokens, 1, base_id + 10, s + 9);
residue -= (8 << 3);
mask = 1 << 10;
tab = VP8Cat6;
}
while (mask) {
AddConstantToken(tokens, !!(residue & mask), *tab++);
mask >>= 1;
}
}
base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 2); // ctx=2
s = res->stats[VP8EncBands[n]][2];
}
AddConstantToken(tokens, sign, 128);
if (n == 16 || !AddToken(tokens, n <= last, base_id + 0, s + 0)) {
return 1; // EOB
}
}
return 1;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movq %rdx, %rbx
movq %rsi, %r12
movq 0x8(%rsi), %rax
movq %rax, 0x40(%rsp)
movl 0x10(%rsi), %eax
movl 0x4(%rsi), %r15d
movslq (%rsi), %rbp
movq %rax, 0x28(%rsp)
leal (%rbp,%rax,8), %eax
leal (%rax,%rax,2), %eax
addl %edi, %eax
leal (%rax,%rax,4), %ecx
leal (%rax,%rcx,2), %r14d
movslq %edi, %rax
movq %rbp, %rcx
shlq $0x7, %rcx
leaq (%rcx,%rbp,4), %rcx
addq 0x20(%rsi), %rcx
imulq $0x2c, %rax, %r13
addq %rcx, %r13
movl 0x18(%rdx), %eax
testl %eax, %eax
jg 0x3ee92
movq %rbx, %rdi
callq 0x3f9d4
testl %eax, %eax
je 0x3eeb4
movl 0x18(%rbx), %eax
leal -0x1(%rax), %ecx
movl %ecx, 0x18(%rbx)
movl %r15d, %ecx
notl %ecx
shrl $0x10, %ecx
andl $0x8000, %ecx # imm = 0x8000
orl %r14d, %ecx
movq 0x10(%rbx), %rdx
cltq
movw %cx, -0x2(%rdx,%rax,2)
movl (%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
movl %r15d, %eax
shrl $0x1f, %eax
xorl $0x10001, %eax # imm = 0x10001
addl %ecx, %eax
movl %eax, (%r13)
testl %r15d, %r15d
js 0x3f88c
movl $0x1, %eax
cmpl $0xf, %ebp
jg 0x3f895
movq 0x28(%rsp), %rax
shll $0x3, %eax
movq %rax, 0x28(%rsp)
movq %r15, 0x38(%rsp)
movq %r12, 0x20(%rsp)
movq 0x40(%rsp), %rax
movzwl (%rax,%rbp,2), %edi
movl %edi, %r15d
negw %r15w
cmovsw %di, %r15w
movl 0x18(%rbx), %eax
testl %eax, %eax
movl %edi, 0xc(%rsp)
jg 0x3ef3d
movq %rbx, %rdi
callq 0x3f9d4
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3ef5f
movl 0x18(%rbx), %eax
leal 0x1(%r14), %ecx
xorl %edx, %edx
testw %di, %di
setne %dl
leal -0x1(%rax), %esi
movl %esi, 0x18(%rbx)
shll $0xf, %edx
orl %ecx, %edx
movq 0x10(%rbx), %rcx
cltq
movw %dx, -0x2(%rcx,%rax,2)
leaq 0x1(%rbp), %rsi
xorl %eax, %eax
testw %di, %di
sete %al
movl 0x4(%r13), %ecx
leal 0x1(%rcx), %edx
shrl %edx
andl $0x7fff7fff, %edx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %ecx # imm = 0xFFFE0000
cmovbl %ecx, %edx
xorl $0x10001, %eax # imm = 0x10001
addl %edx, %eax
movl %eax, 0x4(%r13)
testw %di, %di
je 0x3f117
movq %rsi, 0x18(%rsp)
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3efb6
movq %rbx, %rdi
callq 0x3f9d4
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3efda
movl 0x18(%rbx), %eax
leal 0x2(%r14), %ecx
xorl %edx, %edx
cmpw $0x2, %r15w
setae %dl
leal -0x1(%rax), %esi
movl %esi, 0x18(%rbx)
shll $0xf, %edx
orl %ecx, %edx
movq 0x10(%rbx), %rcx
cltq
movw %dx, -0x2(%rcx,%rax,2)
movl 0x8(%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
cmpw $0x2, %r15w
movl $0x10001, %eax # imm = 0x10001
sbbl $0x0, %eax
addl %ecx, %eax
movl %eax, 0x8(%r13)
cmpw $0x2, %r15w
movq %rbp, 0x30(%rsp)
jae 0x3f153
movl $0x1, %r15d
movl $0xb, %ebp
movq 0x18(%rsp), %rdx
leaq 0x19355(%rip), %rax # 0x58380
movzbl (%rax,%rdx), %r14d
movl %r14d, %eax
shll $0x7, %eax
leaq (%rax,%r14,4), %r13
addq 0x20(%r12), %r13
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f05e
movq %rbx, %rdi
callq 0x3f9d4
movq 0x18(%rsp), %rdx
testl %eax, %eax
je 0x3f07b
movl 0x18(%rbx), %eax
movl 0xc(%rsp), %edi
andl $0x8000, %edi # imm = 0x8000
leal -0x1(%rax), %ecx
movl %ecx, 0x18(%rbx)
orl $0x4080, %edi # imm = 0x4080
movq 0x10(%rbx), %rcx
cltq
movw %di, -0x2(%rcx,%rax,2)
cmpq $0x10, %rdx
je 0x3f890
addl 0x28(%rsp), %r14d
movl %r14d, %eax
shll $0x5, %eax
addl %ebp, %r14d
addl %eax, %r14d
movl 0x18(%rbx), %eax
testl %eax, %eax
movq 0x38(%rsp), %rbp
jg 0x3f0b6
movq %rbx, %rdi
callq 0x3f9d4
testl %eax, %eax
movq 0x30(%rsp), %rsi
je 0x3f0da
movl 0x18(%rbx), %eax
xorl %ecx, %ecx
movq 0x30(%rsp), %rsi
cmpq %rbp, %rsi
setl %cl
leal -0x1(%rax), %edx
movl %edx, 0x18(%rbx)
shll $0xf, %ecx
orl %r14d, %ecx
movq 0x10(%rbx), %rdx
cltq
movw %cx, -0x2(%rdx,%rax,2)
imulq $0x2c, %r15, %rax
addq %rax, %r13
movl (%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
xorl %eax, %eax
cmpq %rbp, %rsi
setl %al
leal 0x10000(%rcx,%rax), %eax
movl %eax, (%r13)
movq 0x18(%rsp), %rsi
jl 0x3f141
jmp 0x3f890
leaq 0x19262(%rip), %rax # 0x58380
movzbl (%rax,%rsi), %eax
movq 0x28(%rsp), %rcx
addl %eax, %ecx
movl %ecx, %r14d
shll $0x5, %r14d
addl %ecx, %r14d
movl %eax, %ecx
shll $0x7, %ecx
leaq (%rcx,%rax,4), %r13
addq 0x20(%r12), %r13
movq %rsi, %rbp
cmpq $0x10, %rsi
jne 0x3ef0a
jmp 0x3f890
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f16d
movq %rbx, %rdi
callq 0x3f9d4
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f191
movl 0x18(%rbx), %eax
leal 0x3(%r14), %ecx
xorl %edx, %edx
cmpw $0x5, %r15w
setae %dl
leal -0x1(%rax), %esi
movl %esi, 0x18(%rbx)
shll $0xf, %edx
orl %ecx, %edx
movq 0x10(%rbx), %rcx
cltq
movw %dx, -0x2(%rcx,%rax,2)
movzwl %r15w, %r8d
movl 0xc(%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
cmpw $0x5, %r15w
movl $0x10001, %eax # imm = 0x10001
sbbl $0x0, %eax
addl %ecx, %eax
movl %eax, 0xc(%r13)
movl 0x18(%rbx), %eax
cmpw $0x4, %r15w
ja 0x3f2c8
testl %eax, %eax
jg 0x3f1ea
movq %rbx, %rdi
movq %r8, %r15
callq 0x3f9d4
movq %r15, %r8
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f20d
movl 0x18(%rbx), %eax
leal 0x4(%r14), %ecx
xorl %edx, %edx
cmpl $0x2, %r8d
setne %dl
leal -0x1(%rax), %esi
movl %esi, 0x18(%rbx)
shll $0xf, %edx
orl %ecx, %edx
movq 0x10(%rbx), %rcx
cltq
movw %dx, -0x2(%rcx,%rax,2)
xorl %eax, %eax
cmpl $0x2, %r8d
sete %al
movl 0x10(%r13), %ecx
leal 0x1(%rcx), %edx
shrl %edx
andl $0x7fff7fff, %edx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %ecx # imm = 0xFFFE0000
cmovbl %ecx, %edx
xorl $0x10001, %eax # imm = 0x10001
addl %edx, %eax
movl %eax, 0x10(%r13)
movl $0x2, %r15d
movl $0x16, %ebp
cmpl $0x2, %r8d
je 0x3f01f
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f273
movq %rbx, %rdi
movq %r8, %r12
callq 0x3f9d4
movq %r12, %r8
movq 0x20(%rsp), %r12
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f297
movl 0x18(%rbx), %eax
addl $0x5, %r14d
xorl %ecx, %ecx
cmpl $0x4, %r8d
sete %cl
leal -0x1(%rax), %edx
movl %edx, 0x18(%rbx)
shll $0xf, %ecx
orl %r14d, %ecx
movq 0x10(%rbx), %rdx
cltq
movw %cx, -0x2(%rdx,%rax,2)
xorl %eax, %eax
cmpl $0x4, %r8d
sete %al
movl 0x14(%r13), %ecx
leal 0x1(%rcx), %edx
shrl %edx
andl $0x7fff7fff, %edx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %ecx # imm = 0xFFFE0000
cmovbl %ecx, %edx
addl %edx, %eax
addl $0x10000, %eax # imm = 0x10000
movl %eax, 0x14(%r13)
jmp 0x3f01f
testl %eax, %eax
jg 0x3f2ea
movq %rbx, %rdi
movq %r8, %r12
callq 0x3f9d4
movq %r12, %r8
movq 0x20(%rsp), %r12
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f30e
movl 0x18(%rbx), %eax
leal 0x6(%r14), %ecx
xorl %edx, %edx
cmpw $0xb, %r15w
setae %dl
leal -0x1(%rax), %esi
movl %esi, 0x18(%rbx)
shll $0xf, %edx
orl %ecx, %edx
movq 0x10(%rbx), %rcx
cltq
movw %dx, -0x2(%rcx,%rax,2)
movl 0x18(%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
cmpw $0xb, %r15w
movl $0x10001, %eax # imm = 0x10001
sbbl $0x0, %eax
addl %ecx, %eax
movl %eax, 0x18(%r13)
cmpw $0xa, %r15w
ja 0x3f40e
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f368
movq %rbx, %rdi
movq %r8, %r12
callq 0x3f9d4
movq %r12, %r8
movq 0x20(%rsp), %r12
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f38d
movl 0x18(%rbx), %eax
addl $0x7, %r14d
xorl %ecx, %ecx
cmpw $0x7, %r15w
setae %cl
leal -0x1(%rax), %edx
movl %edx, 0x18(%rbx)
shll $0xf, %ecx
orl %r14d, %ecx
movq 0x10(%rbx), %rdx
cltq
movw %cx, -0x2(%rdx,%rax,2)
movl 0x1c(%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
cmpw $0x7, %r15w
movl $0x10001, %eax # imm = 0x10001
sbbl $0x0, %eax
addl %ecx, %eax
movl %eax, 0x1c(%r13)
movl 0x18(%rbx), %ecx
cmpw $0x6, %r15w
ja 0x3f4c2
testl %ecx, %ecx
jg 0x3f3e6
movq %r8, %r14
movq %rbx, %rdi
callq 0x3f9d4
testl %eax, %eax
je 0x3f6be
movl 0x18(%rbx), %ecx
movl 0xc(%rsp), %edi
movq %r14, %r8
xorl %eax, %eax
cmpl $0x6, %r8d
setne %al
leal -0x1(%rcx), %edx
movl %edx, 0x18(%rbx)
shll $0xf, %eax
addl $0xffffc09f, %eax # imm = 0xFFFFC09F
movq 0x10(%rbx), %rdx
movslq %ecx, %rcx
movw %ax, -0x2(%rdx,%rcx,2)
jmp 0x3f541
leal -0x3(%r8), %ecx
cmpl $0xf, %ecx
ja 0x3f551
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f43b
movq %rbx, %rdi
movq %r8, %r15
callq 0x3f9d4
movq %r15, %r8
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f450
movl 0x18(%rbx), %eax
leal -0x1(%rax), %ecx
movl %ecx, 0x18(%rbx)
leal 0x8(%r14), %ecx
movq 0x10(%rbx), %rdx
cltq
movw %cx, -0x2(%rdx,%rax,2)
movl 0x20(%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
addl $0x10000, %ecx # imm = 0x10000
movl %ecx, 0x20(%r13)
addq $0x24, %r13
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f49d
movq %r8, 0x10(%rsp)
movq %rbx, %rdi
callq 0x3f9d4
testl %eax, %eax
je 0x3f81c
movl 0x18(%rbx), %eax
movl 0xc(%rsp), %edi
movq 0x10(%rsp), %r8
decl %eax
movl %eax, 0x18(%rbx)
addl $0x9, %r14d
movl $0x4, %r15d
movl $0xfffffff5, %ebp # imm = 0xFFFFFFF5
movl $0x10000, %ecx # imm = 0x10000
leaq 0x19237(%rip), %r12 # 0x586f4
jmp 0x3f783
testl %ecx, %ecx
jg 0x3f4df
movq %rbx, %rdi
movq %r8, %r14
callq 0x3f9d4
movq %r14, %r8
movl 0xc(%rsp), %edi
movl 0x18(%rbx), %ecx
testl %eax, %eax
je 0x3f502
xorl %eax, %eax
cmpw $0x9, %r15w
setb %al
movslq %ecx, %rdx
decl %ecx
movl %ecx, 0x18(%rbx)
shll $0xf, %eax
addl $0xffffc0a5, %eax # imm = 0xFFFFC0A5
movq 0x10(%rbx), %rsi
movw %ax, -0x2(%rsi,%rdx,2)
testl %ecx, %ecx
jg 0x3f523
movq %r8, %r14
movq %rbx, %rdi
callq 0x3f9d4
testl %eax, %eax
je 0x3f6be
movl 0x18(%rbx), %ecx
movl 0xc(%rsp), %edi
movq %r14, %r8
leal -0x1(%rcx), %eax
movl %eax, 0x18(%rbx)
shll $0xf, %r8d
xorl $0xc091, %r8d # imm = 0xC091
movq 0x10(%rbx), %rax
movslq %ecx, %rcx
movw %r8w, -0x2(%rax,%rcx,2)
movl $0x2, %r15d
movl $0x16, %ebp
jmp 0x3f01f
cmpl $0x1f, %ecx
ja 0x3f608
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f57a
movq %rbx, %rdi
movq %r8, %r15
callq 0x3f9d4
movq %r15, %r8
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f58f
movl 0x18(%rbx), %eax
leal -0x1(%rax), %ecx
movl %ecx, 0x18(%rbx)
leal 0x8(%r14), %ecx
movq 0x10(%rbx), %rdx
cltq
movw %cx, -0x2(%rdx,%rax,2)
movl 0x20(%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
addl $0x10000, %ecx # imm = 0x10000
movl %ecx, 0x20(%r13)
addq $0x24, %r13
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f5dc
movq %r8, 0x10(%rsp)
movq %rbx, %rdi
callq 0x3f9d4
testl %eax, %eax
je 0x3f835
movl 0x18(%rbx), %eax
movl 0xc(%rsp), %edi
movq 0x10(%rsp), %r8
decl %eax
movl %eax, 0x18(%rbx)
addl $0x9, %r14d
orl $0x8000, %r14d # imm = 0x8000
movl $0x8, %r15d
movl $0xffffffed, %ebp # imm = 0xFFFFFFED
movl $0x10001, %ecx # imm = 0x10001
leaq 0x190f4(%rip), %r12 # 0x586f7
jmp 0x3f783
movl 0x18(%rbx), %eax
cmpl $0x3f, %ecx
ja 0x3f6d7
testl %eax, %eax
jg 0x3f631
movq %rbx, %rdi
movq %r8, %r15
callq 0x3f9d4
movq %r15, %r8
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f64c
movl 0x18(%rbx), %eax
leal -0x1(%rax), %ecx
movl %ecx, 0x18(%rbx)
leal 0x8(%r14), %ecx
orl $0x8000, %ecx # imm = 0x8000
movq 0x10(%rbx), %rdx
cltq
movw %cx, -0x2(%rdx,%rax,2)
movl 0x20(%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
addl $0x10001, %ecx # imm = 0x10001
movl %ecx, 0x20(%r13)
addq $0x24, %r13
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f699
movq %r8, 0x10(%rsp)
movq %rbx, %rdi
callq 0x3f9d4
testl %eax, %eax
je 0x3f84e
movl 0x18(%rbx), %eax
movl 0xc(%rsp), %edi
movq 0x10(%rsp), %r8
decl %eax
movl %eax, 0x18(%rbx)
addl $0xa, %r14d
movl $0x10, %r15d
movl $0xffffffdd, %ebp # imm = 0xFFFFFFDD
movl $0x10000, %ecx # imm = 0x10000
leaq 0x19042(%rip), %r12 # 0x586fb
jmp 0x3f783
movl $0x2, %r15d
movl $0x16, %ebp
movq 0x18(%rsp), %rdx
movl 0xc(%rsp), %edi
jmp 0x3f024
testl %eax, %eax
jg 0x3f6f4
movq %rbx, %rdi
movq %r8, %r15
callq 0x3f9d4
movq %r15, %r8
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f70f
movl 0x18(%rbx), %eax
leal -0x1(%rax), %ecx
movl %ecx, 0x18(%rbx)
leal 0x8(%r14), %ecx
orl $0x8000, %ecx # imm = 0x8000
movq 0x10(%rbx), %rdx
cltq
movw %cx, -0x2(%rdx,%rax,2)
movl 0x20(%r13), %eax
leal 0x1(%rax), %ecx
shrl %ecx
andl $0x7fff7fff, %ecx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %ecx
addl $0x10001, %ecx # imm = 0x10001
movl %ecx, 0x20(%r13)
addq $0x24, %r13
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f75c
movq %r8, 0x10(%rsp)
movq %rbx, %rdi
callq 0x3f9d4
testl %eax, %eax
je 0x3f867
movl 0x18(%rbx), %eax
movl 0xc(%rsp), %edi
movq 0x10(%rsp), %r8
decl %eax
movl %eax, 0x18(%rbx)
addl $0xa, %r14d
orl $0x8000, %r14d # imm = 0x8000
movl $0x400, %r15d # imm = 0x400
movl $0xffffffbd, %ebp # imm = 0xFFFFFFBD
movl $0x10001, %ecx # imm = 0x10001
leaq 0x18f7d(%rip), %r12 # 0x58700
movq 0x10(%rbx), %rdx
cltq
movw %r14w, (%rdx,%rax,2)
movl (%r13), %eax
leal 0x1(%rax), %edx
shrl %edx
andl $0x7fff7fff, %edx # imm = 0x7FFF7FFF
cmpl $0xfffe0000, %eax # imm = 0xFFFE0000
cmovbl %eax, %edx
addl %ecx, %edx
movl %edx, (%r13)
addl %r8d, %ebp
movl $0x4000, %r13d # imm = 0x4000
movzbl (%r12), %r14d
movl 0x18(%rbx), %eax
testl %eax, %eax
jg 0x3f7d3
movq %rbx, %rdi
callq 0x3f9d4
movl 0xc(%rsp), %edi
testl %eax, %eax
je 0x3f7f6
movl 0x18(%rbx), %eax
leal -0x1(%rax), %ecx
testl %ebp, %r15d
movl %ecx, 0x18(%rbx)
movl $0xc000, %ecx # imm = 0xC000
cmovel %r13d, %ecx
movzwl %r14w, %edx
orl %ecx, %edx
movq 0x10(%rbx), %rcx
cltq
movw %dx, -0x2(%rcx,%rax,2)
incq %r12
movl %r15d, %eax
shrl %eax
cmpl $0x1, %r15d
movl %eax, %r15d
ja 0x3f7b4
movl $0x2, %r15d
movl $0x16, %ebp
movq 0x20(%rsp), %r12
jmp 0x3f01f
movl $0x4, %r15d
movl $0xfffffff5, %ebp # imm = 0xFFFFFFF5
movl $0x10000, %ecx # imm = 0x10000
leaq 0x18ec1(%rip), %r12 # 0x586f4
jmp 0x3f87e
movl $0x8, %r15d
movl $0xffffffed, %ebp # imm = 0xFFFFFFED
movl $0x10001, %ecx # imm = 0x10001
leaq 0x18eab(%rip), %r12 # 0x586f7
jmp 0x3f87e
movl $0x10, %r15d
movl $0xffffffdd, %ebp # imm = 0xFFFFFFDD
movl $0x10000, %ecx # imm = 0x10000
leaq 0x18e96(%rip), %r12 # 0x586fb
jmp 0x3f87e
movl $0x400, %r15d # imm = 0x400
movl $0xffffffbd, %ebp # imm = 0xFFFFFFBD
movl $0x10001, %ecx # imm = 0x10001
leaq 0x18e82(%rip), %r12 # 0x58700
movl 0xc(%rsp), %edi
movq 0x10(%rsp), %r8
jmp 0x3f78e
xorl %eax, %eax
jmp 0x3f895
movl $0x1, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/enc/token_enc.c
|
VP8EstimateTokenSize
|
size_t VP8EstimateTokenSize(VP8TBuffer* const b, const uint8_t* const probas) {
size_t size = 0;
const VP8Tokens* p = b->pages_;
assert(!b->error_);
while (p != NULL) {
const VP8Tokens* const next = p->next_;
const int N = (next == NULL) ? b->left_ : 0;
int n = b->page_size_;
const token_t* const tokens = TOKEN_DATA(p);
while (n-- > N) {
const token_t token = tokens[n];
const int bit = token & (1 << 15);
if (token & FIXED_PROBA_BIT) {
size += VP8BitCost(bit, token & 0xffu);
} else {
size += VP8BitCost(bit, probas[token & 0x3fffu]);
}
}
p = next;
}
return size;
}
|
movq (%rdi), %rcx
testq %rcx, %rcx
je 0x3f9d1
pushq %rbp
pushq %rbx
movslq 0x1c(%rdi), %rdx
xorl %eax, %eax
leaq 0x1780e(%rip), %r8 # 0x57180
movq %rcx, %r9
movq (%rcx), %rcx
movl $0x0, %r10d
testq %rcx, %rcx
jne 0x3f987
movl 0x18(%rdi), %r10d
cmpl %r10d, %edx
jle 0x3f9c9
movslq %r10d, %r10
movq %rdx, %r11
movzwl 0x6(%r9,%r11,2), %ebx
movswl %bx, %ebp
btl $0xe, %ebx
jb 0x3f9b0
andl $0x3fff, %ebx # imm = 0x3FFF
shrl $0xf, %ebp
xorb (%rsi,%rbx), %bpl
jmp 0x3f9b5
shrl $0xf, %ebp
xorl %ebx, %ebp
movzbl %bpl, %ebx
decq %r11
movzwl (%r8,%rbx,2), %ebx
addq %rbx, %rax
cmpq %r10, %r11
jg 0x3f992
testq %rcx, %rcx
jne 0x3f972
popq %rbx
popq %rbp
retq
xorl %eax, %eax
retq
|
/PKRoma[P]libwebp/src/enc/token_enc.c
|
VP8WriteProbas
|
void VP8WriteProbas(VP8BitWriter* const bw, const VP8EncProba* const probas) {
int t, b, c, p;
for (t = 0; t < NUM_TYPES; ++t) {
for (b = 0; b < NUM_BANDS; ++b) {
for (c = 0; c < NUM_CTX; ++c) {
for (p = 0; p < NUM_PROBAS; ++p) {
const uint8_t p0 = probas->coeffs_[t][b][c][p];
const int update = (p0 != VP8CoeffsProba0[t][b][c][p]);
if (VP8PutBit(bw, update, VP8CoeffsUpdateProba[t][b][c][p])) {
VP8PutBits(bw, p0, 8);
}
}
}
}
}
if (VP8PutBitUniform(bw, probas->use_skip_proba_)) {
VP8PutBits(bw, probas->skip_proba_, 8);
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x58, %rsp
movq %rdi, %rbx
movq %rsi, 0x8(%rsp)
leaq 0x4(%rsi), %r14
leaq 0x18dc2(%rip), %r12 # 0x58b40
leaq 0x1956b(%rip), %r13 # 0x592f0
xorl %esi, %esi
movq %rsi, 0x10(%rsp)
movq %r13, 0x18(%rsp)
movq %r12, 0x20(%rsp)
movq %r14, 0x28(%rsp)
xorl %eax, %eax
movq %rax, 0x30(%rsp)
movq %r13, 0x48(%rsp)
movq %r12, 0x40(%rsp)
movq %r14, 0x38(%rsp)
xorl %eax, %eax
movq %rax, 0x50(%rsp)
xorl %r15d, %r15d
movzbl (%r14,%r15), %ebp
xorl %esi, %esi
cmpb (%r12,%r15), %bpl
setne %sil
movzbl (%r13,%r15), %edx
movq %rbx, %rdi
callq 0x44208
testl %eax, %eax
je 0x3fdeb
movq %rbx, %rdi
movl %ebp, %esi
movl $0x8, %edx
callq 0x44333
incq %r15
cmpq $0xb, %r15
jne 0x3fdbb
movq 0x50(%rsp), %rax
incq %rax
addq $0xb, %r14
addq $0xb, %r12
addq $0xb, %r13
cmpq $0x3, %rax
jne 0x3fdb3
movq 0x30(%rsp), %rax
incq %rax
movq 0x38(%rsp), %r14
addq $0x21, %r14
movq 0x40(%rsp), %r12
addq $0x21, %r12
movq 0x48(%rsp), %r13
addq $0x21, %r13
cmpq $0x8, %rax
jne 0x3fd9d
movq 0x10(%rsp), %rsi
incq %rsi
movq 0x28(%rsp), %r14
movl $0x108, %edi # imm = 0x108
addq %rdi, %r14
movq 0x20(%rsp), %r12
addq %rdi, %r12
movq 0x18(%rsp), %r13
addq %rdi, %r13
cmpq $0x4, %rsi
jne 0x3fd87
movq 0x8(%rsp), %r14
movl 0x4dac(%r14), %esi
movq %rbx, %rdi
callq 0x442ee
testl %eax, %eax
je 0x3fea2
movzbl 0x3(%r14), %esi
movq %rbx, %rdi
movl $0x8, %edx
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x44333
addq $0x58, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nopl (%rax)
|
/PKRoma[P]libwebp/src/enc/tree_enc.c
|
VP8LEncoderNew
|
static VP8LEncoder* VP8LEncoderNew(const WebPConfig* const config,
const WebPPicture* const picture) {
VP8LEncoder* const enc = (VP8LEncoder*)WebPSafeCalloc(1ULL, sizeof(*enc));
if (enc == NULL) {
WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
return NULL;
}
enc->config_ = config;
enc->pic_ = picture;
enc->argb_content_ = kEncoderNone;
VP8LEncDspInit();
return enc;
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %r14
movq %rdi, %r15
movl $0x1, %edi
movl $0x918, %esi # imm = 0x918
callq 0x31484
movq %rax, %rbx
testq %rax, %rax
je 0x40a7a
movq %r15, (%rbx)
movq %r14, 0x8(%rbx)
movl $0x0, 0x18(%rbx)
callq 0x45bcf
jmp 0x40a87
movq %r14, %rdi
movl $0x1, %esi
callq 0x2fb4a
movq %rbx, %rax
popq %rbx
popq %r14
popq %r15
retq
|
/PKRoma[P]libwebp/src/enc/vp8l_enc.c
|
VP8LEncodeImage
|
int VP8LEncodeImage(const WebPConfig* const config,
const WebPPicture* const picture) {
int width, height;
int has_alpha;
size_t coded_size;
int percent = 0;
int initial_size;
WebPEncodingError err = VP8_ENC_OK;
VP8LBitWriter bw;
if (picture == NULL) return 0;
if (config == NULL || picture->argb == NULL) {
err = VP8_ENC_ERROR_NULL_PARAMETER;
WebPEncodingSetError(picture, err);
return 0;
}
width = picture->width;
height = picture->height;
// Initialize BitWriter with size corresponding to 16 bpp to photo images and
// 8 bpp for graphical images.
initial_size = (config->image_hint == WEBP_HINT_GRAPH) ?
width * height : width * height * 2;
if (!VP8LBitWriterInit(&bw, initial_size)) {
err = VP8_ENC_ERROR_OUT_OF_MEMORY;
goto Error;
}
if (!WebPReportProgress(picture, 1, &percent)) {
UserAbort:
err = VP8_ENC_ERROR_USER_ABORT;
goto Error;
}
// Reset stats (for pure lossless coding)
if (picture->stats != NULL) {
WebPAuxStats* const stats = picture->stats;
memset(stats, 0, sizeof(*stats));
stats->PSNR[0] = 99.f;
stats->PSNR[1] = 99.f;
stats->PSNR[2] = 99.f;
stats->PSNR[3] = 99.f;
stats->PSNR[4] = 99.f;
}
// Write image size.
if (!WriteImageSize(picture, &bw)) {
err = VP8_ENC_ERROR_OUT_OF_MEMORY;
goto Error;
}
has_alpha = WebPPictureHasTransparency(picture);
// Write the non-trivial Alpha flag and lossless version.
if (!WriteRealAlphaAndVersion(&bw, has_alpha)) {
err = VP8_ENC_ERROR_OUT_OF_MEMORY;
goto Error;
}
if (!WebPReportProgress(picture, 5, &percent)) goto UserAbort;
// Encode main image stream.
err = VP8LEncodeStream(config, picture, &bw, 1 /*use_cache*/);
if (err != VP8_ENC_OK) goto Error;
if (!WebPReportProgress(picture, 90, &percent)) goto UserAbort;
// Finish the RIFF chunk.
err = WriteImage(picture, &bw, &coded_size);
if (err != VP8_ENC_OK) goto Error;
if (!WebPReportProgress(picture, 100, &percent)) goto UserAbort;
#if !defined(WEBP_DISABLE_STATS)
// Save size.
if (picture->stats != NULL) {
picture->stats->coded_size += (int)coded_size;
picture->stats->lossless_size = (int)coded_size;
}
#endif
if (picture->extra_info != NULL) {
const int mb_w = (width + 15) >> 4;
const int mb_h = (height + 15) >> 4;
memset(picture->extra_info, 0, mb_w * mb_h * sizeof(*picture->extra_info));
}
Error:
if (bw.error_) err = VP8_ENC_ERROR_OUT_OF_MEMORY;
VP8LBitWriterWipeOut(&bw);
if (err != VP8_ENC_OK) {
WebPEncodingSetError(picture, err);
return 0;
}
return 1;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x48, %rsp
movl $0x0, 0x4(%rsp)
xorl %ebp, %ebp
testq %rsi, %rsi
je 0x428de
movq %rsi, %rbx
movq %rdi, %r14
testq %rdi, %rdi
je 0x4281b
cmpq $0x0, 0x48(%rbx)
je 0x4281b
movl 0x8(%rbx), %r12d
movl 0xc(%rbx), %r13d
cmpl $0x3, 0xc(%r14)
setne %cl
movl %r13d, %eax
imull %r12d, %eax
shll %cl, %eax
movslq %eax, %rsi
leaq 0x10(%rsp), %rdi
callq 0x44557
movl $0x1, %ebp
movl $0x1, %r15d
testl %eax, %eax
je 0x428ba
leaq 0x4(%rsp), %rdx
movq %rbx, %rdi
movl $0x1, %esi
callq 0x2fb53
testl %eax, %eax
je 0x428b4
movl %r13d, 0xc(%rsp)
movl %r12d, 0x8(%rsp)
movq 0x80(%rbx), %r15
testq %r15, %r15
je 0x426f8
movl $0xbc, %edx
movq %r15, %rdi
xorl %esi, %esi
callq 0x40c0
movaps 0x17035(%rip), %xmm0 # 0x59720
movups %xmm0, 0x4(%r15)
movl $0x42c60000, 0x14(%r15) # imm = 0x42C60000
movl 0x8(%rbx), %r13d
movl 0xc(%rbx), %r12d
decl %r13d
movl 0x18(%rsp), %eax
cmpl $0x20, %eax
jl 0x4271d
leaq 0x10(%rsp), %r15
movq %r15, %rdi
callq 0x44705
movl 0x8(%r15), %eax
movl %r13d, %edx
movl %eax, %ecx
shlq %cl, %rdx
orq 0x10(%rsp), %rdx
movq %rdx, 0x10(%rsp)
leal 0xe(%rax), %ecx
movl %ecx, 0x18(%rsp)
cmpl $0x12, %eax
jl 0x4274f
leaq 0x10(%rsp), %r15
movq %r15, %rdi
callq 0x44705
movl 0x8(%r15), %ecx
movq (%r15), %rdx
decl %r12d
shlq %cl, %r12
orq %rdx, %r12
movq %r12, 0x10(%rsp)
addl $0xe, %ecx
movl %ecx, 0x18(%rsp)
movl $0x1, %r15d
cmpl $0x0, 0x38(%rsp)
movl 0xc(%rsp), %r13d
jne 0x428ba
movq %rbx, %rdi
callq 0x2b118
movl %eax, %r12d
movl 0x18(%rsp), %ecx
cmpl $0x20, %ecx
jl 0x427a0
leaq 0x10(%rsp), %rdi
callq 0x44705
leaq 0x10(%rsp), %rax
movl 0x8(%rax), %ecx
movl %r12d, %eax
shlq %cl, %rax
orq %rax, 0x10(%rsp)
leal 0x1(%rcx), %eax
movl %eax, 0x18(%rsp)
cmpl $0x1f, %ecx
jl 0x427c9
leaq 0x10(%rsp), %r12
movq %r12, %rdi
callq 0x44705
movl 0x8(%r12), %eax
addl $0x3, %eax
movl %eax, 0x18(%rsp)
cmpl $0x0, 0x38(%rsp)
movl 0x8(%rsp), %r12d
jne 0x428ba
leaq 0x4(%rsp), %rdx
movq %rbx, %rdi
movl $0x5, %esi
callq 0x2fb53
testl %eax, %eax
je 0x428b4
leaq 0x10(%rsp), %rdx
movq %r14, %rdi
movq %rbx, %rsi
movl $0x1, %ecx
callq 0x3feb4
testl %eax, %eax
je 0x42828
movl %eax, %r15d
jmp 0x428ba
movq %rbx, %rdi
movl $0x3, %esi
jmp 0x428d7
leaq 0x4(%rsp), %rdx
movq %rbx, %rdi
movl $0x5a, %esi
callq 0x2fb53
testl %eax, %eax
je 0x428b4
leaq 0x10(%rsp), %rsi
leaq 0x40(%rsp), %rdx
movq %rbx, %rdi
callq 0x428f4
testl %eax, %eax
jne 0x42813
leaq 0x4(%rsp), %rdx
movq %rbx, %rdi
movl $0x64, %esi
callq 0x2fb53
movl $0xa, %r15d
testl %eax, %eax
je 0x428ba
movq 0x80(%rbx), %rax
testq %rax, %rax
je 0x42888
movl 0x40(%rsp), %ecx
addl %ecx, (%rax)
movl %ecx, 0xa8(%rax)
movq 0x78(%rbx), %rdi
testq %rdi, %rdi
je 0x428ef
addl $0xf, %r12d
sarl $0x4, %r12d
addl $0xf, %r13d
sarl $0x4, %r13d
imull %r12d, %r13d
movslq %r13d, %rdx
xorl %r15d, %r15d
xorl %esi, %esi
callq 0x40c0
jmp 0x428ba
movl $0xa, %r15d
leaq 0x10(%rsp), %rdi
cmpl $0x0, 0x28(%rdi)
cmovnel %ebp, %r15d
callq 0x44675
testl %r15d, %r15d
je 0x428de
movq %rbx, %rdi
movl %r15d, %esi
callq 0x2fb4a
xorl %ebp, %ebp
movl %ebp, %eax
addq $0x48, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
xorl %r15d, %r15d
jmp 0x428ba
|
/PKRoma[P]libwebp/src/enc/vp8l_enc.c
|
VP8LBitWriterResize
|
static int VP8LBitWriterResize(VP8LBitWriter* const bw, size_t extra_size) {
uint8_t* allocated_buf;
size_t allocated_size;
const size_t max_bytes = bw->end_ - bw->buf_;
const size_t current_size = bw->cur_ - bw->buf_;
const uint64_t size_required_64b = (uint64_t)current_size + extra_size;
const size_t size_required = (size_t)size_required_64b;
if (size_required != size_required_64b) {
bw->error_ = 1;
return 0;
}
if (max_bytes > 0 && size_required <= max_bytes) return 1;
allocated_size = (3 * max_bytes) >> 1;
if (allocated_size < size_required) allocated_size = size_required;
// make allocated size multiple of 1k
allocated_size = (((allocated_size >> 10) + 1) << 10);
allocated_buf = (uint8_t*)WebPSafeMalloc(1ULL, allocated_size);
if (allocated_buf == NULL) {
bw->error_ = 1;
return 0;
}
if (current_size > 0) {
memcpy(allocated_buf, bw->buf_, current_size);
}
WebPSafeFree(bw->buf_);
bw->buf_ = allocated_buf;
bw->cur_ = bw->buf_ + current_size;
bw->end_ = bw->buf_ + allocated_size;
return 1;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rdi, %rbx
movq 0x20(%rdi), %rcx
movq 0x10(%rdi), %r13
movq 0x18(%rdi), %rbp
movq %rbp, %r14
subq %r13, %r14
addq %r14, %rsi
movl $0x1, %eax
subq %r13, %rcx
je 0x44599
cmpq %rcx, %rsi
jbe 0x44609
leaq (%rcx,%rcx,2), %r15
shrq %r15
cmpq %rsi, %r15
cmovbeq %rsi, %r15
andq $-0x400, %r15 # imm = 0xFC00
addq $0x400, %r15 # imm = 0x400
movl $0x1, %edi
movq %r15, %rsi
callq 0x3145c
testq %rax, %rax
je 0x44600
movq %rax, %r12
cmpq %r13, %rbp
je 0x445de
movq 0x10(%rbx), %rsi
movq %r12, %rdi
movq %r14, %rdx
callq 0x45d0
movq 0x10(%rbx), %rdi
callq 0x314a8
movq %r12, 0x10(%rbx)
addq %r12, %r14
movq %r14, 0x18(%rbx)
addq %r12, %r15
movq %r15, 0x20(%rbx)
movl $0x1, %eax
jmp 0x44609
movl $0x1, 0x28(%rbx)
xorl %eax, %eax
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/utils/bit_writer_utils.c
|
CompareHuffmanTrees
|
static int CompareHuffmanTrees(const void* ptr1, const void* ptr2) {
const HuffmanTree* const t1 = (const HuffmanTree*)ptr1;
const HuffmanTree* const t2 = (const HuffmanTree*)ptr2;
if (t1->total_count_ > t2->total_count_) {
return -1;
} else if (t1->total_count_ < t2->total_count_) {
return 1;
} else {
assert(t1->value_ != t2->value_);
return (t1->value_ < t2->value_) ? -1 : 1;
}
}
|
movl (%rsi), %ecx
movl $0xffffffff, %eax # imm = 0xFFFFFFFF
cmpl %ecx, (%rdi)
ja 0x44f5e
movl $0x1, %eax
jb 0x44f5e
movl 0x4(%rdi), %eax
xorl %ecx, %ecx
cmpl 0x4(%rsi), %eax
setge %cl
leal -0x1(,%rcx,2), %eax
retq
|
/PKRoma[P]libwebp/src/utils/huffman_encode_utils.c
|
QuantizeLevels
|
int QuantizeLevels(uint8_t* const data, int width, int height,
int num_levels, uint64_t* const sse) {
int freq[NUM_SYMBOLS] = { 0 };
int q_level[NUM_SYMBOLS] = { 0 };
double inv_q_level[NUM_SYMBOLS] = { 0 };
int min_s = 255, max_s = 0;
const size_t data_size = height * width;
int i, num_levels_in, iter;
double last_err = 1.e38, err = 0.;
const double err_threshold = ERROR_THRESHOLD * data_size;
if (data == NULL) {
return 0;
}
if (width <= 0 || height <= 0) {
return 0;
}
if (num_levels < 2 || num_levels > 256) {
return 0;
}
{
size_t n;
num_levels_in = 0;
for (n = 0; n < data_size; ++n) {
num_levels_in += (freq[data[n]] == 0);
if (min_s > data[n]) min_s = data[n];
if (max_s < data[n]) max_s = data[n];
++freq[data[n]];
}
}
if (num_levels_in <= num_levels) goto End; // nothing to do!
// Start with uniformly spread centroids.
for (i = 0; i < num_levels; ++i) {
inv_q_level[i] = min_s + (double)(max_s - min_s) * i / (num_levels - 1);
}
// Fixed values. Won't be changed.
q_level[min_s] = 0;
q_level[max_s] = num_levels - 1;
assert(inv_q_level[0] == min_s);
assert(inv_q_level[num_levels - 1] == max_s);
// k-Means iterations.
for (iter = 0; iter < MAX_ITER; ++iter) {
double q_sum[NUM_SYMBOLS] = { 0 };
double q_count[NUM_SYMBOLS] = { 0 };
int s, slot = 0;
// Assign classes to representatives.
for (s = min_s; s <= max_s; ++s) {
// Keep track of the nearest neighbour 'slot'
while (slot < num_levels - 1 &&
2 * s > inv_q_level[slot] + inv_q_level[slot + 1]) {
++slot;
}
if (freq[s] > 0) {
q_sum[slot] += s * freq[s];
q_count[slot] += freq[s];
}
q_level[s] = slot;
}
// Assign new representatives to classes.
if (num_levels > 2) {
for (slot = 1; slot < num_levels - 1; ++slot) {
const double count = q_count[slot];
if (count > 0.) {
inv_q_level[slot] = q_sum[slot] / count;
}
}
}
// Compute convergence error.
err = 0.;
for (s = min_s; s <= max_s; ++s) {
const double error = s - inv_q_level[q_level[s]];
err += freq[s] * error * error;
}
// Check for convergence: we stop as soon as the error is no
// longer improving.
if (last_err - err < err_threshold) break;
last_err = err;
}
// Remap the alpha plane to quantized values.
{
// double->int rounding operation can be costly, so we do it
// once for all before remapping. We also perform the data[] -> slot
// mapping, while at it (avoid one indirection in the final loop).
uint8_t map[NUM_SYMBOLS];
int s;
size_t n;
for (s = min_s; s <= max_s; ++s) {
const int slot = q_level[s];
map[s] = (uint8_t)(inv_q_level[slot] + .5);
}
// Final pass.
for (n = 0; n < data_size; ++n) {
data[n] = map[data[n]];
}
}
End:
// Store sum of squared error if needed.
if (sse != NULL) *sse = (uint64_t)err;
return 1;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2068, %rsp # imm = 0x2068
movq %r8, %rbx
movl %ecx, %r15d
movl %edx, %ebp
movl %esi, %r13d
movq %rdi, %r14
leaq 0x460(%rsp), %rdi
xorl %r12d, %r12d
movl $0x400, %edx # imm = 0x400
xorl %esi, %esi
callq 0x40c0
leaq 0x60(%rsp), %rdi
movl $0x400, %edx # imm = 0x400
xorl %esi, %esi
callq 0x40c0
leaq 0x860(%rsp), %rdi
movl $0x800, %edx # imm = 0x800
xorl %esi, %esi
callq 0x40c0
testq %r14, %r14
je 0x4546c
testl %r13d, %r13d
setle %al
testl %ebp, %ebp
setle %cl
orb %al, %cl
leal -0x101(%r15), %eax
cmpl $0xffffff01, %eax # imm = 0xFFFFFF01
setb %al
orb %cl, %al
jne 0x4546c
imull %r13d, %ebp
movslq %ebp, %rdi
testl %ebp, %ebp
je 0x4508f
xorl %r8d, %r8d
movl $0xff, %r12d
xorl %ecx, %ecx
xorl %eax, %eax
movzbl (%r14,%rcx), %edx
movl 0x460(%rsp,%rdx,4), %esi
cmpl $0x1, %esi
adcl $0x0, %eax
cmpl %edx, %r12d
cmovgel %edx, %r12d
cmpl %edx, %r8d
cmovlel %edx, %r8d
incl %esi
movl %esi, 0x460(%rsp,%rdx,4)
incq %rcx
cmpq %rcx, %rdi
jne 0x4505c
jmp 0x4509a
movl $0xff, %r12d
xorl %eax, %eax
xorl %r8d, %r8d
cmpl %r15d, %eax
jle 0x451e9
movq %rdi, %xmm0
punpckldq 0x11bf0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd 0x11bf8(%rip), %xmm0 # 0x56cb0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
movapd %xmm1, 0x20(%rsp)
testl %r15d, %r15d
movq %rbx, 0x48(%rsp)
movq %r14, 0x40(%rsp)
movl %ebp, 0x10(%rsp)
movq %rdi, 0x38(%rsp)
movq %r15, 0x50(%rsp)
jle 0x451f0
xorps %xmm0, %xmm0
cvtsi2sd %r12d, %xmm0
movl %r8d, %eax
subl %r12d, %eax
xorps %xmm1, %xmm1
cvtsi2sd %eax, %xmm1
leal -0x1(%r15), %r13d
cvtsi2sd %r13d, %xmm2
movl %r15d, %ecx
leal 0x1(%r15), %eax
andl $0x3fe, %eax # imm = 0x3FE
decq %rcx
movq %rcx, %xmm3
pshufd $0x44, %xmm3, %xmm3 # xmm3 = xmm3[0,1,0,1]
movdqa 0xf496(%rip), %xmm4 # 0x545c0
xorl %ecx, %ecx
movdqa 0xf67c(%rip), %xmm5 # 0x547b0
pxor %xmm5, %xmm3
pcmpeqd %xmm6, %xmm6
movdqa 0xf69c(%rip), %xmm7 # 0x547e0
movdqa %xmm4, %xmm8
pxor %xmm5, %xmm8
movdqa %xmm8, %xmm9
pcmpgtd %xmm3, %xmm9
pcmpeqd %xmm3, %xmm8
pshufd $0xf5, %xmm8, %xmm10 # xmm10 = xmm8[1,1,3,3]
pand %xmm9, %xmm10
pshufd $0xf5, %xmm9, %xmm8 # xmm8 = xmm9[1,1,3,3]
por %xmm10, %xmm8
movd %xmm8, %edx
notl %edx
testb $0x1, %dl
je 0x451a1
xorps %xmm9, %xmm9
cvtsi2sd %ecx, %xmm9
mulsd %xmm1, %xmm9
divsd %xmm2, %xmm9
addsd %xmm0, %xmm9
movsd %xmm9, 0x860(%rsp,%rcx,8)
pxor %xmm6, %xmm8
pextrw $0x4, %xmm8, %edx
testb $0x1, %dl
je 0x451d6
leal 0x1(%rcx), %edx
xorps %xmm8, %xmm8
cvtsi2sd %edx, %xmm8
mulsd %xmm1, %xmm8
divsd %xmm2, %xmm8
addsd %xmm0, %xmm8
movsd %xmm8, 0x868(%rsp,%rcx,8)
addq $0x2, %rcx
paddq %xmm7, %xmm4
cmpq %rcx, %rax
jne 0x45144
jmp 0x451f4
xorl %eax, %eax
jmp 0x4545e
leal -0x1(%r15), %r13d
movapd 0x20(%rsp), %xmm0
mulsd 0x146ae(%rip), %xmm0 # 0x598b0
movapd %xmm0, 0x20(%rsp)
movl %r12d, %eax
movq %rax, 0x8(%rsp)
movl $0x0, 0x60(%rsp,%rax,4)
movl %r8d, %eax
movl %r13d, 0x60(%rsp,%rax,4)
movslq %r13d, %rbx
movq %r8, 0x18(%rsp)
leal 0x1(%r8), %r15d
movl %ebx, %r14d
movl %r15d, %eax
subl %r12d, %eax
movl %eax, 0x14(%rsp)
xorl %ebp, %ebp
movsd 0x14675(%rip), %xmm1 # 0x598b8
movsd %xmm1, 0x58(%rsp)
movl $0x800, %edx # imm = 0x800
leaq 0x1060(%rsp), %rdi
xorl %esi, %esi
callq 0x40c0
movl $0x800, %edx # imm = 0x800
leaq 0x1860(%rsp), %rdi
xorl %esi, %esi
callq 0x40c0
movq 0x18(%rsp), %rdi
cmpl %r12d, %edi
jl 0x4531a
xorl %ecx, %ecx
movq 0x8(%rsp), %rax
leal (%rax,%rax), %edx
xorps %xmm0, %xmm0
cvtsi2sd %edx, %xmm0
movslq %ecx, %rdx
cmpl %r13d, %edx
movl %r13d, %esi
cmovgl %edx, %esi
leal -0x1(%rdx), %ecx
cmpq %rbx, %rdx
jge 0x452c3
movsd 0x860(%rsp,%rdx,8), %xmm1
addsd 0x868(%rsp,%rdx,8), %xmm1
incq %rdx
incl %ecx
ucomisd %xmm1, %xmm0
ja 0x4529f
jmp 0x452c5
movl %esi, %ecx
movl 0x460(%rsp,%rax,4), %edx
testl %edx, %edx
jle 0x4530a
xorps %xmm0, %xmm0
cvtsi2sd %edx, %xmm0
movl %eax, %esi
imull %edx, %esi
xorps %xmm1, %xmm1
cvtsi2sd %esi, %xmm1
movslq %ecx, %rdx
addsd 0x1060(%rsp,%rdx,8), %xmm1
movsd %xmm1, 0x1060(%rsp,%rdx,8)
addsd 0x1860(%rsp,%rdx,8), %xmm0
movsd %xmm0, 0x1860(%rsp,%rdx,8)
movl %ecx, 0x60(%rsp,%rax,4)
incq %rax
cmpl %eax, %r15d
jne 0x45286
cmpl $0x3, 0x50(%rsp)
xorpd %xmm2, %xmm2
jl 0x45357
movl $0x1, %eax
movsd 0x1860(%rsp,%rax,8), %xmm0
ucomisd %xmm2, %xmm0
jbe 0x4534f
movsd 0x1060(%rsp,%rax,8), %xmm1
divsd %xmm0, %xmm1
movsd %xmm1, 0x860(%rsp,%rax,8)
incq %rax
cmpq %rax, %r14
jne 0x4532a
xorpd %xmm0, %xmm0
cmpl %r12d, %edi
jl 0x4539d
movl 0x14(%rsp), %eax
movq 0x8(%rsp), %rcx
xorps %xmm1, %xmm1
cvtsi2sd %ecx, %xmm1
movslq 0x60(%rsp,%rcx,4), %rdx
subsd 0x860(%rsp,%rdx,8), %xmm1
xorps %xmm2, %xmm2
cvtsi2sdl 0x460(%rsp,%rcx,4), %xmm2
mulsd %xmm1, %xmm2
mulsd %xmm1, %xmm2
addsd %xmm2, %xmm0
incq %rcx
decl %eax
jne 0x45369
movsd 0x58(%rsp), %xmm2
subsd %xmm0, %xmm2
movapd 0x20(%rsp), %xmm1
ucomisd %xmm2, %xmm1
ja 0x453c2
incl %ebp
movapd %xmm0, %xmm1
cmpl $0x6, %ebp
jne 0x45243
movq 0x18(%rsp), %r8
subl %r12d, %r8d
movq 0x38(%rsp), %rdi
jl 0x45416
movq 0x8(%rsp), %rcx
leaq (%rsp,%rcx,4), %rax
addq $0x60, %rax
addq %rsp, %rcx
addq $0x1060, %rcx # imm = 0x1060
incl %r8d
xorl %edx, %edx
movsd 0xbc3b(%rip), %xmm1 # 0x51030
movslq (%rax,%rdx,4), %rsi
movsd 0x860(%rsp,%rsi,8), %xmm2
addsd %xmm1, %xmm2
cvttsd2si %xmm2, %esi
movb %sil, (%rcx,%rdx)
incq %rdx
cmpl %edx, %r8d
jne 0x453f5
cmpl $0x0, 0x10(%rsp)
movq 0x48(%rsp), %rbx
movq 0x40(%rsp), %rdx
je 0x4543f
xorl %eax, %eax
movzbl (%rdx,%rax), %ecx
movb 0x1060(%rsp,%rcx), %cl
movb %cl, (%rdx,%rax)
incq %rax
cmpq %rax, %rdi
jne 0x45429
cvttsd2si %xmm0, %rcx
movq %rcx, %rdx
sarq $0x3f, %rdx
subsd 0x107c5(%rip), %xmm0 # 0x55c18
cvttsd2si %xmm0, %rax
andq %rdx, %rax
orq %rcx, %rax
movl $0x1, %r12d
testq %rbx, %rbx
je 0x4546c
movq %rax, (%rbx)
movl %r12d, %eax
addq $0x2068, %rsp # imm = 0x2068
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nopl (%rax)
|
/PKRoma[P]libwebp/src/utils/quant_levels_utils.c
|
VP8LBitsEntropyUnrefined
|
void VP8LBitsEntropyUnrefined(const uint32_t* const array, int n,
VP8LBitEntropy* const entropy) {
int i;
VP8LBitEntropyInit(entropy);
for (i = 0; i < n; ++i) {
if (array[i] != 0) {
entropy->sum += array[i];
entropy->nonzero_code = i;
++entropy->nonzeros;
entropy->entropy -= VP8LFastSLog2(array[i]);
if (entropy->max_val < array[i]) {
entropy->max_val = array[i];
}
}
}
entropy->entropy += VP8LFastSLog2(entropy->sum);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
movq %rdx, %rbx
xorps %xmm0, %xmm0
movups %xmm0, (%rdx)
movabsq $-0x100000000, %rax # imm = 0xFFFFFFFF00000000
movq %rax, 0x10(%rdx)
testl %esi, %esi
jle 0x4556a
movq %rdi, %r14
movl %esi, %r12d
xorpd %xmm1, %xmm1
xorl %eax, %eax
movaps 0x143ea(%rip), %xmm4 # 0x598c0
leaq 0x14803(%rip), %r13 # 0x59ce0
leaq 0x25e3c(%rip), %r15 # 0x6b320
xorl %ebp, %ebp
movl (%r14,%rbp,4), %ecx
testl %ecx, %ecx
je 0x4554a
movl %ebp, 0x14(%rbx)
movq 0x8(%rbx), %xmm0
movd %ecx, %xmm2
movaps %xmm4, %xmm3
movss %xmm2, %xmm3 # xmm3 = xmm2[0],xmm3[1,2,3]
paddd %xmm0, %xmm3
movq %xmm3, 0x8(%rbx)
movl (%r14,%rbp,4), %edi
cmpq $0xff, %rdi
ja 0x45520
movd (%r13,%rdi,4), %xmm0
jmp 0x45535
callq *(%r15)
movaps 0x14396(%rip), %xmm4 # 0x598c0
movsd (%rbx), %xmm1
movl 0x10(%rbx), %eax
movl (%r14,%rbp,4), %edi
cvtss2sd %xmm0, %xmm0
subsd %xmm0, %xmm1
movsd %xmm1, (%rbx)
cmpl %edi, %eax
jae 0x4554a
movl %edi, 0x10(%rbx)
jmp 0x4554c
movl %eax, %edi
incq %rbp
movl %edi, %eax
cmpq %rbp, %r12
jne 0x454e6
movl 0x8(%rbx), %edi
cmpl $0xff, %edi
jbe 0x45570
callq *(%r15)
movsd (%rbx), %xmm1
jmp 0x4557e
xorpd %xmm1, %xmm1
xorl %edi, %edi
movl %edi, %eax
leaq 0x14767(%rip), %rcx # 0x59ce0
movss (%rcx,%rax,4), %xmm0
cvtss2sd %xmm0, %xmm0
addsd %xmm1, %xmm0
movsd %xmm0, (%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/PKRoma[P]libwebp/src/dsp/lossless_enc.c
|
VP8LSubtractGreenFromBlueAndRed_C
|
void VP8LSubtractGreenFromBlueAndRed_C(uint32_t* argb_data, int num_pixels) {
int i;
for (i = 0; i < num_pixels; ++i) {
const int argb = argb_data[i];
const int green = (argb >> 8) & 0xff;
const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff;
const uint32_t new_b = (((argb >> 0) & 0xff) - green) & 0xff;
argb_data[i] = (argb & 0xff00ff00u) | (new_r << 16) | new_b;
}
}
|
testl %esi, %esi
jle 0x455d9
movl %esi, %eax
xorl %ecx, %ecx
movl (%rdi,%rcx,4), %edx
movl %edx, %esi
shrl $0x8, %esi
movl %edx, %r8d
subl %esi, %r8d
movzbl %r8b, %r8d
movl %edx, %r9d
andl $0xff00ff00, %r9d # imm = 0xFF00FF00
orl %r8d, %r9d
shll $0x10, %esi
subl %esi, %edx
andl $0xff0000, %edx # imm = 0xFF0000
orl %r9d, %edx
movl %edx, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne 0x455a1
retq
|
/PKRoma[P]libwebp/src/dsp/lossless_enc.c
|
VP8LCollectColorRedTransforms_C
|
void VP8LCollectColorRedTransforms_C(const uint32_t* argb, int stride,
int tile_width, int tile_height,
int green_to_red, int histo[]) {
while (tile_height-- > 0) {
int x;
for (x = 0; x < tile_width; ++x) {
++histo[TransformColorRed((uint8_t)green_to_red, argb[x])];
}
argb += stride;
}
}
|
testl %ecx, %ecx
jle 0x456ac
pushq %rbx
movsbl %r8b, %eax
movslq %esi, %rsi
movl %edx, %r8d
shlq $0x2, %rsi
testl %edx, %edx
jle 0x4569c
xorl %r10d, %r10d
movl (%rdi,%r10,4), %r11d
movl %r11d, %ebx
shrl $0x10, %ebx
shrl $0x8, %r11d
movsbl %r11b, %r11d
imull %eax, %r11d
shrl $0x5, %r11d
subl %r11d, %ebx
movzbl %bl, %r11d
incl (%r9,%r11,4)
incq %r10
cmpq %r10, %r8
jne 0x4566f
leal -0x1(%rcx), %r10d
addq %rsi, %rdi
cmpl $0x1, %ecx
movl %r10d, %ecx
jg 0x45668
popq %rbx
retq
|
/PKRoma[P]libwebp/src/dsp/lossless_enc.c
|
Subsets and Splits
SQL Console for LLM4Binary/decompile-bench
Filters out entries with file names ending in .cpp, providing a basic subset of the dataset that excludes C++ files.