code
stringlengths
1
2.01M
repo_name
stringlengths
3
62
path
stringlengths
1
267
language
stringclasses
231 values
license
stringclasses
13 values
size
int64
1
2.01M
/* * copyright (c) 2004 AGAWA Koji * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/avcodec.h" #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" #include "mpegvideo_arm.h" static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int level, qmul, qadd; int nCoeffs; DCTELEM *block_orig = block; assert(s->block_last_index[n]>=0); qmul = qscale << 1; if (!s->h263_aic) { if (n < 4) level = block[0] * s->y_dc_scale; else level = block[0] * s->c_dc_scale; qadd = (qscale - 1) | 1; }else{ qadd = 0; level = block[0]; } if(s->ac_pred) nCoeffs=63; else nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; __asm__ volatile ( /* "movd %1, %%mm6 \n\t" //qmul */ /* "packssdw %%mm6, %%mm6 \n\t" */ /* "packssdw %%mm6, %%mm6 \n\t" */ "tbcsth wr6, %[qmul] \n\t" /* "movd %2, %%mm5 \n\t" //qadd */ /* "packssdw %%mm5, %%mm5 \n\t" */ /* "packssdw %%mm5, %%mm5 \n\t" */ "tbcsth wr5, %[qadd] \n\t" "wzero wr7 \n\t" /* "pxor %%mm7, %%mm7 \n\t" */ "wzero wr4 \n\t" /* "pxor %%mm4, %%mm4 \n\t" */ "wsubh wr7, wr5, wr7 \n\t" /* "psubw %%mm5, %%mm7 \n\t" */ "1: \n\t" "wldrd wr2, [%[block]] \n\t" /* "movq (%0, %3), %%mm0 \n\t" */ "wldrd wr3, [%[block], #8] \n\t" /* "movq 8(%0, %3), %%mm1 \n\t" */ "wmulsl wr0, wr6, wr2 \n\t" /* "pmullw %%mm6, %%mm0 \n\t" */ "wmulsl wr1, wr6, wr3 \n\t" /* "pmullw %%mm6, %%mm1 \n\t" */ /* "movq (%0, %3), %%mm2 \n\t" */ /* "movq 8(%0, %3), %%mm3 \n\t" */ "wcmpgtsh wr2, wr4, wr2 \n\t" /* "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 */ "wcmpgtsh wr3, wr4, wr2 \n\t" /* "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 */ "wxor wr0, wr2, wr0 \n\t" /* "pxor %%mm2, %%mm0 \n\t" */ "wxor wr1, wr3, wr1 \n\t" /* "pxor %%mm3, %%mm1 \n\t" */ "waddh wr0, wr7, wr0 \n\t" /* "paddw %%mm7, %%mm0 \n\t" */ "waddh wr1, wr7, wr1 \n\t" /* "paddw %%mm7, %%mm1 \n\t" */ "wxor wr2, wr0, wr2 \n\t" /* "pxor %%mm0, %%mm2 \n\t" */ "wxor wr3, wr1, wr3 \n\t" /* "pxor %%mm1, %%mm3 \n\t" */ "wcmpeqh wr0, wr7, wr0 \n\t" /* "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0 */ "wcmpeqh wr1, wr7, wr1 \n\t" /* "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0 */ "wandn wr0, wr2, wr0 \n\t" /* "pandn %%mm2, %%mm0 \n\t" */ "wandn wr1, wr3, wr1 \n\t" /* "pandn %%mm3, %%mm1 \n\t" */ "wstrd wr0, [%[block]] \n\t" /* "movq %%mm0, (%0, %3) \n\t" */ "wstrd wr1, [%[block], #8] \n\t" /* "movq %%mm1, 8(%0, %3) \n\t" */ "add %[block], %[block], #16 \n\t" /* "addl $16, %3 \n\t" */ "subs %[i], %[i], #1 \n\t" "bne 1b \n\t" /* "jng 1b \n\t" */ :[block]"+r"(block) :[i]"r"((nCoeffs + 8) / 8), [qmul]"r"(qmul), [qadd]"r"(qadd) :"memory"); block_orig[0] = level; } #if 0 static void dct_unquantize_h263_inter_iwmmxt(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int nCoeffs; assert(s->block_last_index[n]>=0); if(s->ac_pred) nCoeffs=63; else nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; ippiQuantInvInter_Compact_H263_16s_I(block, nCoeffs+1, qscale); } #endif void MPV_common_init_iwmmxt(MpegEncContext *s) { if (!(mm_flags & FF_MM_IWMMXT)) return; s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_iwmmxt; #if 0 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_iwmmxt; #endif }
123linslouis-android-video-cutter
jni/libavcodec/arm/mpegvideo_iwmmxt.c
C
asf20
5,156
/* * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_ARM_AAC_H #define AVCODEC_ARM_AAC_H #include "config.h" #if HAVE_NEON && HAVE_INLINE_ASM #define VMUL2 VMUL2 static inline float *VMUL2(float *dst, const float *v, unsigned idx, const float *scale) { unsigned v0, v1; __asm__ volatile ("ubfx %0, %4, #0, #4 \n\t" "ubfx %1, %4, #4, #4 \n\t" "ldr %0, [%3, %0, lsl #2] \n\t" "ldr %1, [%3, %1, lsl #2] \n\t" "vld1.32 {d1[]}, [%5,:32] \n\t" "vmov d0, %0, %1 \n\t" "vmul.f32 d0, d0, d1 \n\t" "vst1.32 {d0}, [%2,:64]! \n\t" : "=&r"(v0), "=&r"(v1), "+r"(dst) : "r"(v), "r"(idx), "r"(scale) : "d0", "d1"); return dst; } #define VMUL4 VMUL4 static inline float *VMUL4(float *dst, const float *v, unsigned idx, const float *scale) { unsigned v0, v1, v2, v3; __asm__ volatile ("ubfx %0, %6, #0, #2 \n\t" "ubfx %1, %6, #2, #2 \n\t" "ldr %0, [%5, %0, lsl #2] \n\t" "ubfx %2, %6, #4, #2 \n\t" "ldr %1, [%5, %1, lsl #2] \n\t" "ubfx %3, %6, #6, #2 \n\t" "ldr %2, [%5, %2, lsl #2] \n\t" "vmov d0, %0, %1 \n\t" "ldr %3, [%5, %3, lsl #2] \n\t" "vld1.32 {d2[],d3[]},[%7,:32] \n\t" "vmov d1, %2, %3 \n\t" "vmul.f32 q0, q0, q1 \n\t" "vst1.32 {q0}, [%4,:128]! \n\t" : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst) : "r"(v), "r"(idx), "r"(scale) : "d0", "d1", "d2", "d3"); return dst; } #define VMUL2S VMUL2S static inline float *VMUL2S(float *dst, const float *v, unsigned idx, unsigned sign, const float *scale) { unsigned v0, v1, v2, v3; __asm__ volatile ("ubfx %0, %6, #0, #4 \n\t" "ubfx %1, %6, #4, #4 \n\t" "ldr %0, [%5, %0, lsl #2] \n\t" "lsl %2, %8, #30 \n\t" "ldr %1, [%5, %1, lsl #2] \n\t" "lsl %3, %8, #31 \n\t" "vmov d0, %0, %1 \n\t" "bic %2, %2, #1<<30 \n\t" "vld1.32 {d1[]}, [%7,:32] \n\t" "vmov d2, %2, %3 \n\t" "veor d0, d0, d2 \n\t" "vmul.f32 d0, d0, d1 \n\t" "vst1.32 {d0}, [%4,:64]! \n\t" : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst) : "r"(v), "r"(idx), "r"(scale), "r"(sign) : "d0", "d1", "d2"); return dst; } #define VMUL4S VMUL4S static inline float *VMUL4S(float *dst, const float *v, unsigned idx, unsigned sign, const float *scale) { unsigned v0, v1, v2, v3, nz; __asm__ volatile ("vld1.32 {d2[],d3[]},[%9,:32] \n\t" "ubfx %0, %8, #0, #2 \n\t" "ubfx %1, %8, #2, #2 \n\t" "ldr %0, [%7, %0, lsl #2] \n\t" "ubfx %2, %8, #4, #2 \n\t" "ldr %1, [%7, %1, lsl #2] \n\t" "ubfx %3, %8, #6, #2 \n\t" "ldr %2, [%7, %2, lsl #2] \n\t" "vmov d0, %0, %1 \n\t" "ldr %3, [%7, %3, lsl #2] \n\t" "lsr %6, %8, #12 \n\t" "rbit %6, %6 \n\t" "vmov d1, %2, %3 \n\t" "lsls %6, %6, #1 \n\t" "and %0, %5, #1<<31 \n\t" "lslcs %5, %5, #1 \n\t" "lsls %6, %6, #1 \n\t" "and %1, %5, #1<<31 \n\t" "lslcs %5, %5, #1 \n\t" "lsls %6, %6, #1 \n\t" "and %2, %5, #1<<31 \n\t" "lslcs %5, %5, #1 \n\t" "vmov d4, %0, %1 \n\t" "and %3, %5, #1<<31 \n\t" "vmov d5, %2, %3 \n\t" "veor q0, q0, q2 \n\t" "vmul.f32 q0, q0, q1 \n\t" "vst1.32 {q0}, [%4,:128]! \n\t" : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst), "+r"(sign), "=r"(nz) : "r"(v), "r"(idx), "r"(scale) : "d0", "d1", "d2", "d3", "d4", "d5"); return dst; } #endif /* HAVE_NEON && HAVE_INLINE_ASM */ #endif /* AVCODEC_ARM_AAC_H */
123linslouis-android-video-cutter
jni/libavcodec/arm/aac.h
C
asf20
6,249
/* * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" function ff_dca_lfe_fir_neon, export=1 push {r4-r6,lr} add r4, r0, r3, lsl #2 @ out2 add r5, r2, #256*4-16 @ cf1 sub r1, r1, #12 cmp r3, #32 moveq r6, #256/32 movne r6, #256/64 NOVFP vldr d0, [sp, #16] @ scale, bias mov lr, #-16 1: vmov.f32 q2, #0.0 @ v0 vmov.f32 q3, #0.0 @ v1 mov r12, r6 2: vld1.32 {q8}, [r2,:128]! @ cf0 vld1.32 {q9}, [r5,:128], lr @ cf1 vld1.32 {q1}, [r1], lr @ in subs r12, r12, #4 vrev64.32 q10, q8 vmla.f32 q3, q1, q9 vmla.f32 d4, d2, d21 vmla.f32 d5, d3, d20 bne 2b add r1, r1, r6, lsl #2 subs r3, r3, #1 vadd.f32 d4, d4, d5 vadd.f32 d6, d6, d7 vpadd.f32 d4, d4, d6 vdup.32 d5, d0[1] vmla.f32 d5, d4, d0[0] vst1.32 {d5[0]}, [r0,:32]! vst1.32 {d5[1]}, [r4,:32]! bne 1b pop {r4-r6,pc} endfunc
123linslouis-android-video-cutter
jni/libavcodec/arm/dcadsp_neon.S
Unix Assembly
asf20
2,206
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" .macro transpose_8x8 r0 r1 r2 r3 r4 r5 r6 r7 vtrn.32 \r0, \r4 vtrn.32 \r1, \r5 vtrn.32 \r2, \r6 vtrn.32 \r3, \r7 vtrn.16 \r0, \r2 vtrn.16 \r1, \r3 vtrn.16 \r4, \r6 vtrn.16 \r5, \r7 vtrn.8 \r0, \r1 vtrn.8 \r2, \r3 vtrn.8 \r4, \r5 vtrn.8 \r6, \r7 .endm .macro transpose_4x4 r0 r1 r2 r3 vtrn.16 \r0, \r2 vtrn.16 \r1, \r3 vtrn.8 \r0, \r1 vtrn.8 \r2, \r3 .endm .macro swap4 r0 r1 r2 r3 r4 r5 r6 r7 vswp \r0, \r4 vswp \r1, \r5 vswp \r2, \r6 vswp \r3, \r7 .endm .macro transpose16_4x4 r0 r1 r2 r3 r4 r5 r6 r7 vtrn.32 \r0, \r2 vtrn.32 \r1, \r3 vtrn.32 \r4, \r6 vtrn.32 \r5, \r7 vtrn.16 \r0, \r1 vtrn.16 \r2, \r3 vtrn.16 \r4, \r5 vtrn.16 \r6, \r7 .endm /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ .macro h264_chroma_mc8 type function ff_\type\()_h264_chroma_mc8_neon, export=1 push {r4-r7, lr} ldrd r4, [sp, #20] .ifc \type,avg mov lr, r0 .endif pld [r1] pld [r1, r2] muls r7, r4, r5 rsb r6, r7, r5, lsl #3 rsb ip, r7, r4, lsl #3 sub r4, r7, r4, lsl #3 sub r4, r4, r5, lsl #3 add r4, r4, #64 beq 2f add r5, r1, r2 vdup.8 d0, r4 lsl r4, r2, #1 vdup.8 d1, ip vld1.64 {d4, d5}, [r1], r4 vdup.8 d2, r6 vld1.64 {d6, d7}, [r5], r4 vdup.8 d3, r7 vext.8 d5, d4, d5, #1 vext.8 d7, d6, d7, #1 1: pld [r5] vmull.u8 q8, d4, d0 vmlal.u8 q8, d5, d1 vld1.64 {d4, d5}, [r1], r4 vmlal.u8 q8, d6, d2 vext.8 d5, d4, d5, #1 vmlal.u8 q8, d7, d3 vmull.u8 q9, d6, d0 subs r3, r3, #2 vmlal.u8 q9, d7, d1 vmlal.u8 q9, d4, d2 vmlal.u8 q9, d5, d3 vrshrn.u16 d16, q8, #6 vld1.64 {d6, d7}, [r5], r4 pld [r1] vrshrn.u16 d17, q9, #6 .ifc \type,avg vld1.64 {d20}, [lr,:64], r2 vld1.64 {d21}, [lr,:64], r2 vrhadd.u8 q8, q8, q10 .endif vext.8 d7, d6, d7, #1 vst1.64 {d16}, [r0,:64], r2 vst1.64 {d17}, [r0,:64], r2 bgt 1b pop {r4-r7, pc} 2: tst r6, r6 add ip, ip, r6 vdup.8 d0, r4 vdup.8 d1, ip beq 4f add r5, r1, r2 lsl r4, r2, #1 vld1.64 {d4}, [r1], r4 vld1.64 {d6}, [r5], r4 3: pld [r5] vmull.u8 q8, d4, d0 vmlal.u8 q8, d6, d1 vld1.64 {d4}, [r1], r4 vmull.u8 q9, d6, d0 vmlal.u8 q9, d4, d1 vld1.64 {d6}, [r5], r4 vrshrn.u16 d16, q8, #6 vrshrn.u16 d17, q9, #6 .ifc \type,avg vld1.64 {d20}, [lr,:64], r2 vld1.64 {d21}, [lr,:64], r2 vrhadd.u8 q8, q8, q10 .endif subs r3, r3, #2 pld [r1] vst1.64 {d16}, [r0,:64], r2 vst1.64 {d17}, [r0,:64], r2 bgt 3b pop {r4-r7, pc} 4: vld1.64 {d4, d5}, [r1], r2 vld1.64 {d6, d7}, [r1], r2 vext.8 d5, d4, d5, #1 vext.8 d7, d6, d7, #1 5: pld [r1] subs r3, r3, #2 vmull.u8 q8, d4, d0 vmlal.u8 q8, d5, d1 vld1.64 {d4, d5}, [r1], r2 vmull.u8 q9, d6, d0 vmlal.u8 q9, d7, d1 pld [r1] vext.8 d5, d4, d5, #1 vrshrn.u16 d16, q8, #6 vrshrn.u16 d17, q9, #6 .ifc \type,avg vld1.64 {d20}, [lr,:64], r2 vld1.64 {d21}, [lr,:64], r2 vrhadd.u8 q8, q8, q10 .endif vld1.64 {d6, d7}, [r1], r2 vext.8 d7, d6, d7, #1 vst1.64 {d16}, [r0,:64], r2 vst1.64 {d17}, [r0,:64], r2 bgt 5b pop {r4-r7, pc} endfunc .endm /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ .macro h264_chroma_mc4 type function ff_\type\()_h264_chroma_mc4_neon, export=1 push {r4-r7, lr} ldrd r4, [sp, #20] .ifc \type,avg mov lr, r0 .endif pld [r1] pld [r1, r2] muls r7, r4, r5 rsb r6, r7, r5, lsl #3 rsb ip, r7, r4, lsl #3 sub r4, r7, r4, lsl #3 sub r4, r4, r5, lsl #3 add r4, r4, #64 beq 2f add r5, r1, r2 vdup.8 d0, r4 lsl r4, r2, #1 vdup.8 d1, ip vld1.64 {d4}, [r1], r4 vdup.8 d2, r6 vld1.64 {d6}, [r5], r4 vdup.8 d3, r7 vext.8 d5, d4, d5, #1 vext.8 d7, d6, d7, #1 vtrn.32 d4, d5 vtrn.32 d6, d7 vtrn.32 d0, d1 vtrn.32 d2, d3 1: pld [r5] vmull.u8 q8, d4, d0 vmlal.u8 q8, d6, d2 vld1.64 {d4}, [r1], r4 vext.8 d5, d4, d5, #1 vtrn.32 d4, d5 vmull.u8 q9, d6, d0 vmlal.u8 q9, d4, d2 vld1.64 {d6}, [r5], r4 vadd.i16 d16, d16, d17 vadd.i16 d17, d18, d19 vrshrn.u16 d16, q8, #6 subs r3, r3, #2 pld [r1] .ifc \type,avg vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2 vrhadd.u8 d16, d16, d20 .endif vext.8 d7, d6, d7, #1 vtrn.32 d6, d7 vst1.32 {d16[0]}, [r0,:32], r2 vst1.32 {d16[1]}, [r0,:32], r2 bgt 1b pop {r4-r7, pc} 2: tst r6, r6 add ip, ip, r6 vdup.8 d0, r4 vdup.8 d1, ip vtrn.32 d0, d1 beq 4f vext.32 d1, d0, d1, #1 add r5, r1, r2 lsl r4, r2, #1 vld1.32 {d4[0]}, [r1], r4 vld1.32 {d4[1]}, [r5], r4 3: pld [r5] vmull.u8 q8, d4, d0 vld1.32 {d4[0]}, [r1], r4 vmull.u8 q9, d4, d1 vld1.32 {d4[1]}, [r5], r4 vadd.i16 d16, d16, d17 vadd.i16 d17, d18, d19 vrshrn.u16 d16, q8, #6 .ifc \type,avg vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2 vrhadd.u8 d16, d16, d20 .endif subs r3, r3, #2 pld [r1] vst1.32 {d16[0]}, [r0,:32], r2 vst1.32 {d16[1]}, [r0,:32], r2 bgt 3b pop {r4-r7, pc} 4: vld1.64 {d4}, [r1], r2 vld1.64 {d6}, [r1], r2 vext.8 d5, d4, d5, #1 vext.8 d7, d6, d7, #1 vtrn.32 d4, d5 vtrn.32 d6, d7 5: vmull.u8 q8, d4, d0 vmull.u8 q9, d6, d0 subs r3, r3, #2 vld1.64 {d4}, [r1], r2 vext.8 d5, d4, d5, #1 vtrn.32 d4, d5 vadd.i16 d16, d16, d17 vadd.i16 d17, d18, d19 pld [r1] vrshrn.u16 d16, q8, #6 .ifc \type,avg vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2 vrhadd.u8 d16, d16, d20 .endif vld1.64 {d6}, [r1], r2 vext.8 d7, d6, d7, #1 vtrn.32 d6, d7 pld [r1] vst1.32 {d16[0]}, [r0,:32], r2 vst1.32 {d16[1]}, [r0,:32], r2 bgt 5b pop {r4-r7, pc} endfunc .endm .macro h264_chroma_mc2 type function ff_\type\()_h264_chroma_mc2_neon, export=1 push {r4-r6, lr} ldr r4, [sp, #16] ldr lr, [sp, #20] pld [r1] pld [r1, r2] orrs r5, r4, lr beq 2f mul r5, r4, lr rsb r6, r5, lr, lsl #3 rsb r12, r5, r4, lsl #3 sub r4, r5, r4, lsl #3 sub r4, r4, lr, lsl #3 add r4, r4, #64 vdup.8 d0, r4 vdup.8 d2, r12 vdup.8 d1, r6 vdup.8 d3, r5 vtrn.16 q0, q1 1: vld1.32 {d4[0]}, [r1], r2 vld1.32 {d4[1]}, [r1], r2 vrev64.32 d5, d4 vld1.32 {d5[1]}, [r1] vext.8 q3, q2, q2, #1 vtrn.16 q2, q3 vmull.u8 q8, d4, d0 vmlal.u8 q8, d5, d1 .ifc \type,avg vld1.16 {d18[0]}, [r0,:16], r2 vld1.16 {d18[1]}, [r0,:16] sub r0, r0, r2 .endif vtrn.32 d16, d17 vadd.i16 d16, d16, d17 vrshrn.u16 d16, q8, #6 .ifc \type,avg vrhadd.u8 d16, d16, d18 .endif vst1.16 {d16[0]}, [r0,:16], r2 vst1.16 {d16[1]}, [r0,:16], r2 subs r3, r3, #2 bgt 1b pop {r4-r6, pc} 2: .ifc \type,put ldrh r5, [r1], r2 strh r5, [r0], r2 ldrh r6, [r1], r2 strh r6, [r0], r2 .else vld1.16 {d16[0]}, [r1], r2 vld1.16 {d16[1]}, [r1], r2 vld1.16 {d18[0]}, [r0,:16], r2 vld1.16 {d18[1]}, [r0,:16] sub r0, r0, r2 vrhadd.u8 d16, d16, d18 vst1.16 {d16[0]}, [r0,:16], r2 vst1.16 {d16[1]}, [r0,:16], r2 .endif subs r3, r3, #2 bgt 2b pop {r4-r6, pc} endfunc .endm .text .align h264_chroma_mc8 put h264_chroma_mc8 avg h264_chroma_mc4 put h264_chroma_mc4 avg h264_chroma_mc2 put h264_chroma_mc2 avg /* H.264 loop filter */ .macro h264_loop_filter_start ldr ip, [sp] tst r2, r2 ldr ip, [ip] tstne r3, r3 vmov.32 d24[0], ip and ip, ip, ip, lsl #16 bxeq lr ands ip, ip, ip, lsl #8 bxlt lr .endm .macro align_push_regs and ip, sp, #15 add ip, ip, #32 sub sp, sp, ip vst1.64 {d12-d15}, [sp,:128] sub sp, sp, #32 vst1.64 {d8-d11}, [sp,:128] .endm .macro align_pop_regs vld1.64 {d8-d11}, [sp,:128]! vld1.64 {d12-d15}, [sp,:128], ip .endm .macro h264_loop_filter_luma vdup.8 q11, r2 @ alpha vmovl.u8 q12, d24 vabd.u8 q6, q8, q0 @ abs(p0 - q0) vmovl.u16 q12, d24 vabd.u8 q14, q9, q8 @ abs(p1 - p0) vsli.16 q12, q12, #8 vabd.u8 q15, q1, q0 @ abs(q1 - q0) vsli.32 q12, q12, #16 vclt.u8 q6, q6, q11 @ < alpha vdup.8 q11, r3 @ beta vclt.s8 q7, q12, #0 vclt.u8 q14, q14, q11 @ < beta vclt.u8 q15, q15, q11 @ < beta vbic q6, q6, q7 vabd.u8 q4, q10, q8 @ abs(p2 - p0) vand q6, q6, q14 vabd.u8 q5, q2, q0 @ abs(q2 - q0) vclt.u8 q4, q4, q11 @ < beta vand q6, q6, q15 vclt.u8 q5, q5, q11 @ < beta vand q4, q4, q6 vand q5, q5, q6 vand q12, q12, q6 vrhadd.u8 q14, q8, q0 vsub.i8 q6, q12, q4 vqadd.u8 q7, q9, q12 vhadd.u8 q10, q10, q14 vsub.i8 q6, q6, q5 vhadd.u8 q14, q2, q14 vmin.u8 q7, q7, q10 vqsub.u8 q11, q9, q12 vqadd.u8 q2, q1, q12 vmax.u8 q7, q7, q11 vqsub.u8 q11, q1, q12 vmin.u8 q14, q2, q14 vmovl.u8 q2, d0 vmax.u8 q14, q14, q11 vmovl.u8 q10, d1 vsubw.u8 q2, q2, d16 vsubw.u8 q10, q10, d17 vshl.i16 q2, q2, #2 vshl.i16 q10, q10, #2 vaddw.u8 q2, q2, d18 vaddw.u8 q10, q10, d19 vsubw.u8 q2, q2, d2 vsubw.u8 q10, q10, d3 vrshrn.i16 d4, q2, #3 vrshrn.i16 d5, q10, #3 vbsl q4, q7, q9 vbsl q5, q14, q1 vneg.s8 q7, q6 vmovl.u8 q14, d16 vmin.s8 q2, q2, q6 vmovl.u8 q6, d17 vmax.s8 q2, q2, q7 vmovl.u8 q11, d0 vmovl.u8 q12, d1 vaddw.s8 q14, q14, d4 vaddw.s8 q6, q6, d5 vsubw.s8 q11, q11, d4 vsubw.s8 q12, q12, d5 vqmovun.s16 d16, q14 vqmovun.s16 d17, q6 vqmovun.s16 d0, q11 vqmovun.s16 d1, q12 .endm function ff_h264_v_loop_filter_luma_neon, export=1 h264_loop_filter_start vld1.64 {d0, d1}, [r0,:128], r1 vld1.64 {d2, d3}, [r0,:128], r1 vld1.64 {d4, d5}, [r0,:128], r1 sub r0, r0, r1, lsl #2 sub r0, r0, r1, lsl #1 vld1.64 {d20,d21}, [r0,:128], r1 vld1.64 {d18,d19}, [r0,:128], r1 vld1.64 {d16,d17}, [r0,:128], r1 align_push_regs h264_loop_filter_luma sub r0, r0, r1, lsl #1 vst1.64 {d8, d9}, [r0,:128], r1 vst1.64 {d16,d17}, [r0,:128], r1 vst1.64 {d0, d1}, [r0,:128], r1 vst1.64 {d10,d11}, [r0,:128] align_pop_regs bx lr endfunc function ff_h264_h_loop_filter_luma_neon, export=1 h264_loop_filter_start sub r0, r0, #4 vld1.64 {d6}, [r0], r1 vld1.64 {d20}, [r0], r1 vld1.64 {d18}, [r0], r1 vld1.64 {d16}, [r0], r1 vld1.64 {d0}, [r0], r1 vld1.64 {d2}, [r0], r1 vld1.64 {d4}, [r0], r1 vld1.64 {d26}, [r0], r1 vld1.64 {d7}, [r0], r1 vld1.64 {d21}, [r0], r1 vld1.64 {d19}, [r0], r1 vld1.64 {d17}, [r0], r1 vld1.64 {d1}, [r0], r1 vld1.64 {d3}, [r0], r1 vld1.64 {d5}, [r0], r1 vld1.64 {d27}, [r0], r1 transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13 align_push_regs h264_loop_filter_luma transpose_4x4 q4, q8, q0, q5 sub r0, r0, r1, lsl #4 add r0, r0, #2 vst1.32 {d8[0]}, [r0], r1 vst1.32 {d16[0]}, [r0], r1 vst1.32 {d0[0]}, [r0], r1 vst1.32 {d10[0]}, [r0], r1 vst1.32 {d8[1]}, [r0], r1 vst1.32 {d16[1]}, [r0], r1 vst1.32 {d0[1]}, [r0], r1 vst1.32 {d10[1]}, [r0], r1 vst1.32 {d9[0]}, [r0], r1 vst1.32 {d17[0]}, [r0], r1 vst1.32 {d1[0]}, [r0], r1 vst1.32 {d11[0]}, [r0], r1 vst1.32 {d9[1]}, [r0], r1 vst1.32 {d17[1]}, [r0], r1 vst1.32 {d1[1]}, [r0], r1 vst1.32 {d11[1]}, [r0], r1 align_pop_regs bx lr endfunc .macro h264_loop_filter_chroma vdup.8 d22, r2 @ alpha vmovl.u8 q12, d24 vabd.u8 d26, d16, d0 @ abs(p0 - q0) vmovl.u8 q2, d0 vabd.u8 d28, d18, d16 @ abs(p1 - p0) vsubw.u8 q2, q2, d16 vsli.16 d24, d24, #8 vshl.i16 q2, q2, #2 vabd.u8 d30, d2, d0 @ abs(q1 - q0) vaddw.u8 q2, q2, d18 vclt.u8 d26, d26, d22 @ < alpha vsubw.u8 q2, q2, d2 vdup.8 d22, r3 @ beta vclt.s8 d25, d24, #0 vrshrn.i16 d4, q2, #3 vclt.u8 d28, d28, d22 @ < beta vbic d26, d26, d25 vclt.u8 d30, d30, d22 @ < beta vand d26, d26, d28 vneg.s8 d25, d24 vand d26, d26, d30 vmin.s8 d4, d4, d24 vmovl.u8 q14, d16 vand d4, d4, d26 vmax.s8 d4, d4, d25 vmovl.u8 q11, d0 vaddw.s8 q14, q14, d4 vsubw.s8 q11, q11, d4 vqmovun.s16 d16, q14 vqmovun.s16 d0, q11 .endm function ff_h264_v_loop_filter_chroma_neon, export=1 h264_loop_filter_start sub r0, r0, r1, lsl #1 vld1.64 {d18}, [r0,:64], r1 vld1.64 {d16}, [r0,:64], r1 vld1.64 {d0}, [r0,:64], r1 vld1.64 {d2}, [r0,:64] h264_loop_filter_chroma sub r0, r0, r1, lsl #1 vst1.64 {d16}, [r0,:64], r1 vst1.64 {d0}, [r0,:64], r1 bx lr endfunc function ff_h264_h_loop_filter_chroma_neon, export=1 h264_loop_filter_start sub r0, r0, #2 vld1.32 {d18[0]}, [r0], r1 vld1.32 {d16[0]}, [r0], r1 vld1.32 {d0[0]}, [r0], r1 vld1.32 {d2[0]}, [r0], r1 vld1.32 {d18[1]}, [r0], r1 vld1.32 {d16[1]}, [r0], r1 vld1.32 {d0[1]}, [r0], r1 vld1.32 {d2[1]}, [r0], r1 vtrn.16 d18, d0 vtrn.16 d16, d2 vtrn.8 d18, d16 vtrn.8 d0, d2 h264_loop_filter_chroma vtrn.16 d18, d0 vtrn.16 d16, d2 vtrn.8 d18, d16 vtrn.8 d0, d2 sub r0, r0, r1, lsl #3 vst1.32 {d18[0]}, [r0], r1 vst1.32 {d16[0]}, [r0], r1 vst1.32 {d0[0]}, [r0], r1 vst1.32 {d2[0]}, [r0], r1 vst1.32 {d18[1]}, [r0], r1 vst1.32 {d16[1]}, [r0], r1 vst1.32 {d0[1]}, [r0], r1 vst1.32 {d2[1]}, [r0], r1 bx lr endfunc /* H.264 qpel MC */ .macro lowpass_const r movw \r, #5 movt \r, #20 vmov.32 d6[0], \r .endm .macro lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1 .if \narrow t0 .req q0 t1 .req q8 .else t0 .req \d0 t1 .req \d1 .endif vext.8 d2, \r0, \r1, #2 vext.8 d3, \r0, \r1, #3 vaddl.u8 q1, d2, d3 vext.8 d4, \r0, \r1, #1 vext.8 d5, \r0, \r1, #4 vaddl.u8 q2, d4, d5 vext.8 d30, \r0, \r1, #5 vaddl.u8 t0, \r0, d30 vext.8 d18, \r2, \r3, #2 vmla.i16 t0, q1, d6[1] vext.8 d19, \r2, \r3, #3 vaddl.u8 q9, d18, d19 vext.8 d20, \r2, \r3, #1 vmls.i16 t0, q2, d6[0] vext.8 d21, \r2, \r3, #4 vaddl.u8 q10, d20, d21 vext.8 d31, \r2, \r3, #5 vaddl.u8 t1, \r2, d31 vmla.i16 t1, q9, d6[1] vmls.i16 t1, q10, d6[0] .if \narrow vqrshrun.s16 \d0, t0, #5 vqrshrun.s16 \d1, t1, #5 .endif .unreq t0 .unreq t1 .endm .macro lowpass_8_1 r0, r1, d0, narrow=1 .if \narrow t0 .req q0 .else t0 .req \d0 .endif vext.8 d2, \r0, \r1, #2 vext.8 d3, \r0, \r1, #3 vaddl.u8 q1, d2, d3 vext.8 d4, \r0, \r1, #1 vext.8 d5, \r0, \r1, #4 vaddl.u8 q2, d4, d5 vext.8 d30, \r0, \r1, #5 vaddl.u8 t0, \r0, d30 vmla.i16 t0, q1, d6[1] vmls.i16 t0, q2, d6[0] .if \narrow vqrshrun.s16 \d0, t0, #5 .endif .unreq t0 .endm .macro lowpass_8.16 r0, r1, l0, h0, l1, h1, d vext.16 q1, \r0, \r1, #2 vext.16 q0, \r0, \r1, #3 vaddl.s16 q9, d2, d0 vext.16 q2, \r0, \r1, #1 vaddl.s16 q1, d3, d1 vext.16 q3, \r0, \r1, #4 vaddl.s16 q10, d4, d6 vext.16 \r1, \r0, \r1, #5 vaddl.s16 q2, d5, d7 vaddl.s16 q0, \h0, \h1 vaddl.s16 q8, \l0, \l1 vshl.i32 q3, q9, #4 vshl.i32 q9, q9, #2 vshl.i32 q15, q10, #2 vadd.i32 q9, q9, q3 vadd.i32 q10, q10, q15 vshl.i32 q3, q1, #4 vshl.i32 q1, q1, #2 vshl.i32 q15, q2, #2 vadd.i32 q1, q1, q3 vadd.i32 q2, q2, q15 vadd.i32 q9, q9, q8 vsub.i32 q9, q9, q10 vadd.i32 q1, q1, q0 vsub.i32 q1, q1, q2 vrshrn.s32 d18, q9, #10 vrshrn.s32 d19, q1, #10 vqmovun.s16 \d, q9 .endm function put_h264_qpel16_h_lowpass_neon_packed mov r4, lr mov ip, #16 mov r3, #8 bl put_h264_qpel8_h_lowpass_neon sub r1, r1, r2, lsl #4 add r1, r1, #8 mov ip, #16 mov lr, r4 b put_h264_qpel8_h_lowpass_neon endfunc .macro h264_qpel_h_lowpass type function \type\()_h264_qpel16_h_lowpass_neon push {lr} mov ip, #16 bl \type\()_h264_qpel8_h_lowpass_neon sub r0, r0, r3, lsl #4 sub r1, r1, r2, lsl #4 add r0, r0, #8 add r1, r1, #8 mov ip, #16 pop {lr} endfunc function \type\()_h264_qpel8_h_lowpass_neon 1: vld1.64 {d0, d1}, [r1], r2 vld1.64 {d16,d17}, [r1], r2 subs ip, ip, #2 lowpass_8 d0, d1, d16, d17, d0, d16 .ifc \type,avg vld1.8 {d2}, [r0,:64], r3 vrhadd.u8 d0, d0, d2 vld1.8 {d3}, [r0,:64] vrhadd.u8 d16, d16, d3 sub r0, r0, r3 .endif vst1.64 {d0}, [r0,:64], r3 vst1.64 {d16}, [r0,:64], r3 bne 1b bx lr endfunc .endm h264_qpel_h_lowpass put h264_qpel_h_lowpass avg .macro h264_qpel_h_lowpass_l2 type function \type\()_h264_qpel16_h_lowpass_l2_neon push {lr} mov ip, #16 bl \type\()_h264_qpel8_h_lowpass_l2_neon sub r0, r0, r2, lsl #4 sub r1, r1, r2, lsl #4 sub r3, r3, r2, lsl #4 add r0, r0, #8 add r1, r1, #8 add r3, r3, #8 mov ip, #16 pop {lr} endfunc function \type\()_h264_qpel8_h_lowpass_l2_neon 1: vld1.64 {d0, d1}, [r1], r2 vld1.64 {d16,d17}, [r1], r2 vld1.64 {d28}, [r3], r2 vld1.64 {d29}, [r3], r2 subs ip, ip, #2 lowpass_8 d0, d1, d16, d17, d0, d1 vrhadd.u8 q0, q0, q14 .ifc \type,avg vld1.8 {d2}, [r0,:64], r2 vrhadd.u8 d0, d0, d2 vld1.8 {d3}, [r0,:64] vrhadd.u8 d1, d1, d3 sub r0, r0, r2 .endif vst1.64 {d0}, [r0,:64], r2 vst1.64 {d1}, [r0,:64], r2 bne 1b bx lr endfunc .endm h264_qpel_h_lowpass_l2 put h264_qpel_h_lowpass_l2 avg function put_h264_qpel16_v_lowpass_neon_packed mov r4, lr mov r2, #8 bl put_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #2 bl put_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 bl put_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #2 mov lr, r4 b put_h264_qpel8_v_lowpass_neon endfunc .macro h264_qpel_v_lowpass type function \type\()_h264_qpel16_v_lowpass_neon mov r4, lr bl \type\()_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_v_lowpass_neon sub r0, r0, r2, lsl #4 add r0, r0, #8 sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 bl \type\()_h264_qpel8_v_lowpass_neon sub r1, r1, r3, lsl #2 mov lr, r4 endfunc function \type\()_h264_qpel8_v_lowpass_neon vld1.64 {d8}, [r1], r3 vld1.64 {d10}, [r1], r3 vld1.64 {d12}, [r1], r3 vld1.64 {d14}, [r1], r3 vld1.64 {d22}, [r1], r3 vld1.64 {d24}, [r1], r3 vld1.64 {d26}, [r1], r3 vld1.64 {d28}, [r1], r3 vld1.64 {d9}, [r1], r3 vld1.64 {d11}, [r1], r3 vld1.64 {d13}, [r1], r3 vld1.64 {d15}, [r1], r3 vld1.64 {d23}, [r1] transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14 lowpass_8 d8, d9, d10, d11, d8, d10 lowpass_8 d12, d13, d14, d15, d12, d14 lowpass_8 d22, d23, d24, d25, d22, d24 lowpass_8 d26, d27, d28, d29, d26, d28 transpose_8x8 d8, d10, d12, d14, d22, d24, d26, d28 .ifc \type,avg vld1.8 {d9}, [r0,:64], r2 vrhadd.u8 d8, d8, d9 vld1.8 {d11}, [r0,:64], r2 vrhadd.u8 d10, d10, d11 vld1.8 {d13}, [r0,:64], r2 vrhadd.u8 d12, d12, d13 vld1.8 {d15}, [r0,:64], r2 vrhadd.u8 d14, d14, d15 vld1.8 {d23}, [r0,:64], r2 vrhadd.u8 d22, d22, d23 vld1.8 {d25}, [r0,:64], r2 vrhadd.u8 d24, d24, d25 vld1.8 {d27}, [r0,:64], r2 vrhadd.u8 d26, d26, d27 vld1.8 {d29}, [r0,:64], r2 vrhadd.u8 d28, d28, d29 sub r0, r0, r2, lsl #3 .endif vst1.64 {d8}, [r0,:64], r2 vst1.64 {d10}, [r0,:64], r2 vst1.64 {d12}, [r0,:64], r2 vst1.64 {d14}, [r0,:64], r2 vst1.64 {d22}, [r0,:64], r2 vst1.64 {d24}, [r0,:64], r2 vst1.64 {d26}, [r0,:64], r2 vst1.64 {d28}, [r0,:64], r2 bx lr endfunc .endm h264_qpel_v_lowpass put h264_qpel_v_lowpass avg .macro h264_qpel_v_lowpass_l2 type function \type\()_h264_qpel16_v_lowpass_l2_neon mov r4, lr bl \type\()_h264_qpel8_v_lowpass_l2_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_v_lowpass_l2_neon sub r0, r0, r3, lsl #4 sub ip, ip, r2, lsl #4 add r0, r0, #8 add ip, ip, #8 sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 bl \type\()_h264_qpel8_v_lowpass_l2_neon sub r1, r1, r3, lsl #2 mov lr, r4 endfunc function \type\()_h264_qpel8_v_lowpass_l2_neon vld1.64 {d8}, [r1], r3 vld1.64 {d10}, [r1], r3 vld1.64 {d12}, [r1], r3 vld1.64 {d14}, [r1], r3 vld1.64 {d22}, [r1], r3 vld1.64 {d24}, [r1], r3 vld1.64 {d26}, [r1], r3 vld1.64 {d28}, [r1], r3 vld1.64 {d9}, [r1], r3 vld1.64 {d11}, [r1], r3 vld1.64 {d13}, [r1], r3 vld1.64 {d15}, [r1], r3 vld1.64 {d23}, [r1] transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14 lowpass_8 d8, d9, d10, d11, d8, d9 lowpass_8 d12, d13, d14, d15, d12, d13 lowpass_8 d22, d23, d24, d25, d22, d23 lowpass_8 d26, d27, d28, d29, d26, d27 transpose_8x8 d8, d9, d12, d13, d22, d23, d26, d27 vld1.64 {d0}, [ip], r2 vld1.64 {d1}, [ip], r2 vld1.64 {d2}, [ip], r2 vld1.64 {d3}, [ip], r2 vld1.64 {d4}, [ip], r2 vrhadd.u8 q0, q0, q4 vld1.64 {d5}, [ip], r2 vrhadd.u8 q1, q1, q6 vld1.64 {d10}, [ip], r2 vrhadd.u8 q2, q2, q11 vld1.64 {d11}, [ip], r2 vrhadd.u8 q5, q5, q13 .ifc \type,avg vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d0, d0, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d1, d1, d17 vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d2, d2, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d3, d3, d17 vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d4, d4, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d5, d5, d17 vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d10, d10, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d11, d11, d17 sub r0, r0, r3, lsl #3 .endif vst1.64 {d0}, [r0,:64], r3 vst1.64 {d1}, [r0,:64], r3 vst1.64 {d2}, [r0,:64], r3 vst1.64 {d3}, [r0,:64], r3 vst1.64 {d4}, [r0,:64], r3 vst1.64 {d5}, [r0,:64], r3 vst1.64 {d10}, [r0,:64], r3 vst1.64 {d11}, [r0,:64], r3 bx lr endfunc .endm h264_qpel_v_lowpass_l2 put h264_qpel_v_lowpass_l2 avg function put_h264_qpel8_hv_lowpass_neon_top lowpass_const ip mov ip, #12 1: vld1.64 {d0, d1}, [r1], r3 vld1.64 {d16,d17}, [r1], r3 subs ip, ip, #2 lowpass_8 d0, d1, d16, d17, q11, q12, narrow=0 vst1.64 {d22-d25}, [r4,:128]! bne 1b vld1.64 {d0, d1}, [r1] lowpass_8_1 d0, d1, q12, narrow=0 mov ip, #-16 add r4, r4, ip vld1.64 {d30,d31}, [r4,:128], ip vld1.64 {d20,d21}, [r4,:128], ip vld1.64 {d18,d19}, [r4,:128], ip vld1.64 {d16,d17}, [r4,:128], ip vld1.64 {d14,d15}, [r4,:128], ip vld1.64 {d12,d13}, [r4,:128], ip vld1.64 {d10,d11}, [r4,:128], ip vld1.64 {d8, d9}, [r4,:128], ip vld1.64 {d6, d7}, [r4,:128], ip vld1.64 {d4, d5}, [r4,:128], ip vld1.64 {d2, d3}, [r4,:128], ip vld1.64 {d0, d1}, [r4,:128] swap4 d1, d3, d5, d7, d8, d10, d12, d14 transpose16_4x4 q0, q1, q2, q3, q4, q5, q6, q7 swap4 d17, d19, d21, d31, d24, d26, d28, d22 transpose16_4x4 q8, q9, q10, q15, q12, q13, q14, q11 vst1.64 {d30,d31}, [r4,:128]! vst1.64 {d6, d7}, [r4,:128]! vst1.64 {d20,d21}, [r4,:128]! vst1.64 {d4, d5}, [r4,:128]! vst1.64 {d18,d19}, [r4,:128]! vst1.64 {d2, d3}, [r4,:128]! vst1.64 {d16,d17}, [r4,:128]! vst1.64 {d0, d1}, [r4,:128] lowpass_8.16 q4, q12, d8, d9, d24, d25, d8 lowpass_8.16 q5, q13, d10, d11, d26, d27, d9 lowpass_8.16 q6, q14, d12, d13, d28, d29, d10 lowpass_8.16 q7, q11, d14, d15, d22, d23, d11 vld1.64 {d16,d17}, [r4,:128], ip vld1.64 {d30,d31}, [r4,:128], ip lowpass_8.16 q8, q15, d16, d17, d30, d31, d12 vld1.64 {d16,d17}, [r4,:128], ip vld1.64 {d30,d31}, [r4,:128], ip lowpass_8.16 q8, q15, d16, d17, d30, d31, d13 vld1.64 {d16,d17}, [r4,:128], ip vld1.64 {d30,d31}, [r4,:128], ip lowpass_8.16 q8, q15, d16, d17, d30, d31, d14 vld1.64 {d16,d17}, [r4,:128], ip vld1.64 {d30,d31}, [r4,:128] lowpass_8.16 q8, q15, d16, d17, d30, d31, d15 transpose_8x8 d12, d13, d14, d15, d8, d9, d10, d11 bx lr endfunc .macro h264_qpel8_hv_lowpass type function \type\()_h264_qpel8_hv_lowpass_neon mov r10, lr bl put_h264_qpel8_hv_lowpass_neon_top .ifc \type,avg vld1.8 {d0}, [r0,:64], r2 vrhadd.u8 d12, d12, d0 vld1.8 {d1}, [r0,:64], r2 vrhadd.u8 d13, d13, d1 vld1.8 {d2}, [r0,:64], r2 vrhadd.u8 d14, d14, d2 vld1.8 {d3}, [r0,:64], r2 vrhadd.u8 d15, d15, d3 vld1.8 {d4}, [r0,:64], r2 vrhadd.u8 d8, d8, d4 vld1.8 {d5}, [r0,:64], r2 vrhadd.u8 d9, d9, d5 vld1.8 {d6}, [r0,:64], r2 vrhadd.u8 d10, d10, d6 vld1.8 {d7}, [r0,:64], r2 vrhadd.u8 d11, d11, d7 sub r0, r0, r2, lsl #3 .endif vst1.64 {d12}, [r0,:64], r2 vst1.64 {d13}, [r0,:64], r2 vst1.64 {d14}, [r0,:64], r2 vst1.64 {d15}, [r0,:64], r2 vst1.64 {d8}, [r0,:64], r2 vst1.64 {d9}, [r0,:64], r2 vst1.64 {d10}, [r0,:64], r2 vst1.64 {d11}, [r0,:64], r2 mov lr, r10 bx lr endfunc .endm h264_qpel8_hv_lowpass put h264_qpel8_hv_lowpass avg .macro h264_qpel8_hv_lowpass_l2 type function \type\()_h264_qpel8_hv_lowpass_l2_neon mov r10, lr bl put_h264_qpel8_hv_lowpass_neon_top vld1.64 {d0, d1}, [r2,:128]! vld1.64 {d2, d3}, [r2,:128]! vrhadd.u8 q0, q0, q6 vld1.64 {d4, d5}, [r2,:128]! vrhadd.u8 q1, q1, q7 vld1.64 {d6, d7}, [r2,:128]! vrhadd.u8 q2, q2, q4 vrhadd.u8 q3, q3, q5 .ifc \type,avg vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d0, d0, d16 vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d1, d1, d17 vld1.8 {d18}, [r0,:64], r3 vrhadd.u8 d2, d2, d18 vld1.8 {d19}, [r0,:64], r3 vrhadd.u8 d3, d3, d19 vld1.8 {d20}, [r0,:64], r3 vrhadd.u8 d4, d4, d20 vld1.8 {d21}, [r0,:64], r3 vrhadd.u8 d5, d5, d21 vld1.8 {d22}, [r0,:64], r3 vrhadd.u8 d6, d6, d22 vld1.8 {d23}, [r0,:64], r3 vrhadd.u8 d7, d7, d23 sub r0, r0, r3, lsl #3 .endif vst1.64 {d0}, [r0,:64], r3 vst1.64 {d1}, [r0,:64], r3 vst1.64 {d2}, [r0,:64], r3 vst1.64 {d3}, [r0,:64], r3 vst1.64 {d4}, [r0,:64], r3 vst1.64 {d5}, [r0,:64], r3 vst1.64 {d6}, [r0,:64], r3 vst1.64 {d7}, [r0,:64], r3 mov lr, r10 bx lr endfunc .endm h264_qpel8_hv_lowpass_l2 put h264_qpel8_hv_lowpass_l2 avg .macro h264_qpel16_hv type function \type\()_h264_qpel16_hv_lowpass_neon mov r9, lr bl \type\()_h264_qpel8_hv_lowpass_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_hv_lowpass_neon sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 sub r0, r0, r2, lsl #4 add r0, r0, #8 bl \type\()_h264_qpel8_hv_lowpass_neon sub r1, r1, r3, lsl #2 mov lr, r9 b \type\()_h264_qpel8_hv_lowpass_neon endfunc function \type\()_h264_qpel16_hv_lowpass_l2_neon mov r9, lr sub r2, r4, #256 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 sub r0, r0, r3, lsl #4 add r0, r0, #8 bl \type\()_h264_qpel8_hv_lowpass_l2_neon sub r1, r1, r3, lsl #2 mov lr, r9 b \type\()_h264_qpel8_hv_lowpass_l2_neon endfunc .endm h264_qpel16_hv put h264_qpel16_hv avg .macro h264_qpel8 type function ff_\type\()_h264_qpel8_mc10_neon, export=1 lowpass_const r3 mov r3, r1 sub r1, r1, #2 mov ip, #8 b \type\()_h264_qpel8_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel8_mc20_neon, export=1 lowpass_const r3 sub r1, r1, #2 mov r3, r2 mov ip, #8 b \type\()_h264_qpel8_h_lowpass_neon endfunc function ff_\type\()_h264_qpel8_mc30_neon, export=1 lowpass_const r3 add r3, r1, #1 sub r1, r1, #2 mov ip, #8 b \type\()_h264_qpel8_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel8_mc01_neon, export=1 push {lr} mov ip, r1 \type\()_h264_qpel8_mc01: lowpass_const r3 mov r3, r2 sub r1, r1, r2, lsl #1 vpush {d8-d15} bl \type\()_h264_qpel8_v_lowpass_l2_neon vpop {d8-d15} pop {pc} endfunc function ff_\type\()_h264_qpel8_mc11_neon, export=1 push {r0, r1, r11, lr} \type\()_h264_qpel8_mc11: lowpass_const r3 mov r11, sp bic sp, sp, #15 sub sp, sp, #64 mov r0, sp sub r1, r1, #2 mov r3, #8 mov ip, #8 vpush {d8-d15} bl put_h264_qpel8_h_lowpass_neon ldrd r0, [r11] mov r3, r2 add ip, sp, #64 sub r1, r1, r2, lsl #1 mov r2, #8 bl \type\()_h264_qpel8_v_lowpass_l2_neon vpop {d8-d15} add sp, r11, #8 pop {r11, pc} endfunc function ff_\type\()_h264_qpel8_mc21_neon, export=1 push {r0, r1, r4, r10, r11, lr} \type\()_h264_qpel8_mc21: lowpass_const r3 mov r11, sp bic sp, sp, #15 sub sp, sp, #(8*8+16*12) sub r1, r1, #2 mov r3, #8 mov r0, sp mov ip, #8 vpush {d8-d15} bl put_h264_qpel8_h_lowpass_neon mov r4, r0 ldrd r0, [r11] sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 sub r2, r4, #64 bl \type\()_h264_qpel8_hv_lowpass_l2_neon vpop {d8-d15} add sp, r11, #8 pop {r4, r10, r11, pc} endfunc function ff_\type\()_h264_qpel8_mc31_neon, export=1 add r1, r1, #1 push {r0, r1, r11, lr} sub r1, r1, #1 b \type\()_h264_qpel8_mc11 endfunc function ff_\type\()_h264_qpel8_mc02_neon, export=1 push {lr} lowpass_const r3 sub r1, r1, r2, lsl #1 mov r3, r2 vpush {d8-d15} bl \type\()_h264_qpel8_v_lowpass_neon vpop {d8-d15} pop {pc} endfunc function ff_\type\()_h264_qpel8_mc12_neon, export=1 push {r0, r1, r4, r10, r11, lr} \type\()_h264_qpel8_mc12: lowpass_const r3 mov r11, sp bic sp, sp, #15 sub sp, sp, #(8*8+16*12) sub r1, r1, r2, lsl #1 mov r3, r2 mov r2, #8 mov r0, sp vpush {d8-d15} bl put_h264_qpel8_v_lowpass_neon mov r4, r0 ldrd r0, [r11] sub r1, r1, r3, lsl #1 sub r1, r1, #2 sub r2, r4, #64 bl \type\()_h264_qpel8_hv_lowpass_l2_neon vpop {d8-d15} add sp, r11, #8 pop {r4, r10, r11, pc} endfunc function ff_\type\()_h264_qpel8_mc22_neon, export=1 push {r4, r10, r11, lr} mov r11, sp bic sp, sp, #15 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 sub sp, sp, #(16*12) mov r4, sp vpush {d8-d15} bl \type\()_h264_qpel8_hv_lowpass_neon vpop {d8-d15} mov sp, r11 pop {r4, r10, r11, pc} endfunc function ff_\type\()_h264_qpel8_mc32_neon, export=1 push {r0, r1, r4, r10, r11, lr} add r1, r1, #1 b \type\()_h264_qpel8_mc12 endfunc function ff_\type\()_h264_qpel8_mc03_neon, export=1 push {lr} add ip, r1, r2 b \type\()_h264_qpel8_mc01 endfunc function ff_\type\()_h264_qpel8_mc13_neon, export=1 push {r0, r1, r11, lr} add r1, r1, r2 b \type\()_h264_qpel8_mc11 endfunc function ff_\type\()_h264_qpel8_mc23_neon, export=1 push {r0, r1, r4, r10, r11, lr} add r1, r1, r2 b \type\()_h264_qpel8_mc21 endfunc function ff_\type\()_h264_qpel8_mc33_neon, export=1 add r1, r1, #1 push {r0, r1, r11, lr} add r1, r1, r2 sub r1, r1, #1 b \type\()_h264_qpel8_mc11 endfunc .endm h264_qpel8 put h264_qpel8 avg .macro h264_qpel16 type function ff_\type\()_h264_qpel16_mc10_neon, export=1 lowpass_const r3 mov r3, r1 sub r1, r1, #2 b \type\()_h264_qpel16_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel16_mc20_neon, export=1 lowpass_const r3 sub r1, r1, #2 mov r3, r2 b \type\()_h264_qpel16_h_lowpass_neon endfunc function ff_\type\()_h264_qpel16_mc30_neon, export=1 lowpass_const r3 add r3, r1, #1 sub r1, r1, #2 b \type\()_h264_qpel16_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel16_mc01_neon, export=1 push {r4, lr} mov ip, r1 \type\()_h264_qpel16_mc01: lowpass_const r3 mov r3, r2 sub r1, r1, r2, lsl #1 vpush {d8-d15} bl \type\()_h264_qpel16_v_lowpass_l2_neon vpop {d8-d15} pop {r4, pc} endfunc function ff_\type\()_h264_qpel16_mc11_neon, export=1 push {r0, r1, r4, r11, lr} \type\()_h264_qpel16_mc11: lowpass_const r3 mov r11, sp bic sp, sp, #15 sub sp, sp, #256 mov r0, sp sub r1, r1, #2 mov r3, #16 vpush {d8-d15} bl put_h264_qpel16_h_lowpass_neon ldrd r0, [r11] mov r3, r2 add ip, sp, #64 sub r1, r1, r2, lsl #1 mov r2, #16 bl \type\()_h264_qpel16_v_lowpass_l2_neon vpop {d8-d15} add sp, r11, #8 pop {r4, r11, pc} endfunc function ff_\type\()_h264_qpel16_mc21_neon, export=1 push {r0, r1, r4-r5, r9-r11, lr} \type\()_h264_qpel16_mc21: lowpass_const r3 mov r11, sp bic sp, sp, #15 sub sp, sp, #(16*16+16*12) sub r1, r1, #2 mov r0, sp vpush {d8-d15} bl put_h264_qpel16_h_lowpass_neon_packed mov r4, r0 ldrd r0, [r11] sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 bl \type\()_h264_qpel16_hv_lowpass_l2_neon vpop {d8-d15} add sp, r11, #8 pop {r4-r5, r9-r11, pc} endfunc function ff_\type\()_h264_qpel16_mc31_neon, export=1 add r1, r1, #1 push {r0, r1, r4, r11, lr} sub r1, r1, #1 b \type\()_h264_qpel16_mc11 endfunc function ff_\type\()_h264_qpel16_mc02_neon, export=1 push {r4, lr} lowpass_const r3 sub r1, r1, r2, lsl #1 mov r3, r2 vpush {d8-d15} bl \type\()_h264_qpel16_v_lowpass_neon vpop {d8-d15} pop {r4, pc} endfunc function ff_\type\()_h264_qpel16_mc12_neon, export=1 push {r0, r1, r4-r5, r9-r11, lr} \type\()_h264_qpel16_mc12: lowpass_const r3 mov r11, sp bic sp, sp, #15 sub sp, sp, #(16*16+16*12) sub r1, r1, r2, lsl #1 mov r0, sp mov r3, r2 vpush {d8-d15} bl put_h264_qpel16_v_lowpass_neon_packed mov r4, r0 ldrd r0, [r11] sub r1, r1, r3, lsl #1 sub r1, r1, #2 mov r2, r3 bl \type\()_h264_qpel16_hv_lowpass_l2_neon vpop {d8-d15} add sp, r11, #8 pop {r4-r5, r9-r11, pc} endfunc function ff_\type\()_h264_qpel16_mc22_neon, export=1 push {r4, r9-r11, lr} lowpass_const r3 mov r11, sp bic sp, sp, #15 sub r1, r1, r2, lsl #1 sub r1, r1, #2 mov r3, r2 sub sp, sp, #(16*12) mov r4, sp vpush {d8-d15} bl \type\()_h264_qpel16_hv_lowpass_neon vpop {d8-d15} mov sp, r11 pop {r4, r9-r11, pc} endfunc function ff_\type\()_h264_qpel16_mc32_neon, export=1 push {r0, r1, r4-r5, r9-r11, lr} add r1, r1, #1 b \type\()_h264_qpel16_mc12 endfunc function ff_\type\()_h264_qpel16_mc03_neon, export=1 push {r4, lr} add ip, r1, r2 b \type\()_h264_qpel16_mc01 endfunc function ff_\type\()_h264_qpel16_mc13_neon, export=1 push {r0, r1, r4, r11, lr} add r1, r1, r2 b \type\()_h264_qpel16_mc11 endfunc function ff_\type\()_h264_qpel16_mc23_neon, export=1 push {r0, r1, r4-r5, r9-r11, lr} add r1, r1, r2 b \type\()_h264_qpel16_mc21 endfunc function ff_\type\()_h264_qpel16_mc33_neon, export=1 add r1, r1, #1 push {r0, r1, r4, r11, lr} add r1, r1, r2 sub r1, r1, #1 b \type\()_h264_qpel16_mc11 endfunc .endm h264_qpel16 put h264_qpel16 avg @ Biweighted prediction .macro biweight_16 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q2, q8 vmov q3, q8 1: subs ip, ip, #2 vld1.8 {d20-d21},[r0,:128], r2 \macd q2, d0, d20 pld [r0] \macd q3, d0, d21 vld1.8 {d22-d23},[r1,:128], r2 \macs q2, d1, d22 pld [r1] \macs q3, d1, d23 vmov q12, q8 vld1.8 {d28-d29},[r0,:128], r2 vmov q13, q8 \macd q12, d0, d28 pld [r0] \macd q13, d0, d29 vld1.8 {d30-d31},[r1,:128], r2 \macs q12, d1, d30 pld [r1] \macs q13, d1, d31 vshl.s16 q2, q2, q9 vshl.s16 q3, q3, q9 vqmovun.s16 d4, q2 vqmovun.s16 d5, q3 vshl.s16 q12, q12, q9 vshl.s16 q13, q13, q9 vqmovun.s16 d24, q12 vqmovun.s16 d25, q13 vmov q3, q8 vst1.8 {d4- d5}, [r6,:128], r2 vmov q2, q8 vst1.8 {d24-d25},[r6,:128], r2 bne 1b pop {r4-r6, pc} .endm .macro biweight_8 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q1, q8 vmov q10, q8 1: subs ip, ip, #2 vld1.8 {d4},[r0,:64], r2 \macd q1, d0, d4 pld [r0] vld1.8 {d5},[r1,:64], r2 \macs q1, d1, d5 pld [r1] vld1.8 {d6},[r0,:64], r2 \macd q10, d0, d6 pld [r0] vld1.8 {d7},[r1,:64], r2 \macs q10, d1, d7 pld [r1] vshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 vshl.s16 q10, q10, q9 vqmovun.s16 d4, q10 vmov q10, q8 vst1.8 {d2},[r6,:64], r2 vmov q1, q8 vst1.8 {d4},[r6,:64], r2 bne 1b pop {r4-r6, pc} .endm .macro biweight_4 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q1, q8 vmov q10, q8 1: subs ip, ip, #4 vld1.32 {d4[0]},[r0,:32], r2 vld1.32 {d4[1]},[r0,:32], r2 \macd q1, d0, d4 pld [r0] vld1.32 {d5[0]},[r1,:32], r2 vld1.32 {d5[1]},[r1,:32], r2 \macs q1, d1, d5 pld [r1] blt 2f vld1.32 {d6[0]},[r0,:32], r2 vld1.32 {d6[1]},[r0,:32], r2 \macd q10, d0, d6 pld [r0] vld1.32 {d7[0]},[r1,:32], r2 vld1.32 {d7[1]},[r1,:32], r2 \macs q10, d1, d7 pld [r1] vshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 vshl.s16 q10, q10, q9 vqmovun.s16 d4, q10 vmov q10, q8 vst1.32 {d2[0]},[r6,:32], r2 vst1.32 {d2[1]},[r6,:32], r2 vmov q1, q8 vst1.32 {d4[0]},[r6,:32], r2 vst1.32 {d4[1]},[r6,:32], r2 bne 1b pop {r4-r6, pc} 2: vshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 vst1.32 {d2[0]},[r6,:32], r2 vst1.32 {d2[1]},[r6,:32], r2 pop {r4-r6, pc} .endm .macro biweight_func w function biweight_h264_pixels_\w\()_neon push {r4-r6, lr} add r4, sp, #16 ldm r4, {r4-r6} lsr lr, r4, #31 add r6, r6, #1 eors lr, lr, r5, lsr #30 orr r6, r6, #1 vdup.16 q9, r3 lsl r6, r6, r3 vmvn q9, q9 vdup.16 q8, r6 mov r6, r0 beq 10f subs lr, lr, #1 beq 20f subs lr, lr, #1 beq 30f b 40f 10: biweight_\w vmlal.u8, vmlal.u8 20: rsb r4, r4, #0 biweight_\w vmlal.u8, vmlsl.u8 30: rsb r4, r4, #0 rsb r5, r5, #0 biweight_\w vmlsl.u8, vmlsl.u8 40: rsb r5, r5, #0 biweight_\w vmlsl.u8, vmlal.u8 endfunc .endm .macro biweight_entry w, h, b=1 function ff_biweight_h264_pixels_\w\()x\h\()_neon, export=1 mov ip, #\h .if \b b biweight_h264_pixels_\w\()_neon .endif endfunc .endm biweight_entry 16, 8 biweight_entry 16, 16, b=0 biweight_func 16 biweight_entry 8, 16 biweight_entry 8, 4 biweight_entry 8, 8, b=0 biweight_func 8 biweight_entry 4, 8 biweight_entry 4, 2 biweight_entry 4, 4, b=0 biweight_func 4 @ Weighted prediction .macro weight_16 add vdup.8 d0, r3 1: subs ip, ip, #2 vld1.8 {d20-d21},[r0,:128], r1 vmull.u8 q2, d0, d20 pld [r0] vmull.u8 q3, d0, d21 vld1.8 {d28-d29},[r0,:128], r1 vmull.u8 q12, d0, d28 pld [r0] vmull.u8 q13, d0, d29 \add q2, q8, q2 vrshl.s16 q2, q2, q9 \add q3, q8, q3 vrshl.s16 q3, q3, q9 vqmovun.s16 d4, q2 vqmovun.s16 d5, q3 \add q12, q8, q12 vrshl.s16 q12, q12, q9 \add q13, q8, q13 vrshl.s16 q13, q13, q9 vqmovun.s16 d24, q12 vqmovun.s16 d25, q13 vst1.8 {d4- d5}, [r4,:128], r1 vst1.8 {d24-d25},[r4,:128], r1 bne 1b pop {r4, pc} .endm .macro weight_8 add vdup.8 d0, r3 1: subs ip, ip, #2 vld1.8 {d4},[r0,:64], r1 vmull.u8 q1, d0, d4 pld [r0] vld1.8 {d6},[r0,:64], r1 vmull.u8 q10, d0, d6 \add q1, q8, q1 pld [r0] vrshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 \add q10, q8, q10 vrshl.s16 q10, q10, q9 vqmovun.s16 d4, q10 vst1.8 {d2},[r4,:64], r1 vst1.8 {d4},[r4,:64], r1 bne 1b pop {r4, pc} .endm .macro weight_4 add vdup.8 d0, r3 vmov q1, q8 vmov q10, q8 1: subs ip, ip, #4 vld1.32 {d4[0]},[r0,:32], r1 vld1.32 {d4[1]},[r0,:32], r1 vmull.u8 q1, d0, d4 pld [r0] blt 2f vld1.32 {d6[0]},[r0,:32], r1 vld1.32 {d6[1]},[r0,:32], r1 vmull.u8 q10, d0, d6 pld [r0] \add q1, q8, q1 vrshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 \add q10, q8, q10 vrshl.s16 q10, q10, q9 vqmovun.s16 d4, q10 vmov q10, q8 vst1.32 {d2[0]},[r4,:32], r1 vst1.32 {d2[1]},[r4,:32], r1 vmov q1, q8 vst1.32 {d4[0]},[r4,:32], r1 vst1.32 {d4[1]},[r4,:32], r1 bne 1b pop {r4, pc} 2: \add q1, q8, q1 vrshl.s16 q1, q1, q9 vqmovun.s16 d2, q1 vst1.32 {d2[0]},[r4,:32], r1 vst1.32 {d2[1]},[r4,:32], r1 pop {r4, pc} .endm .macro weight_func w function weight_h264_pixels_\w\()_neon push {r4, lr} ldr r4, [sp, #8] cmp r2, #1 lsl r4, r4, r2 vdup.16 q8, r4 mov r4, r0 ble 20f rsb lr, r2, #1 vdup.16 q9, lr cmp r3, #0 blt 10f weight_\w vhadd.s16 10: rsb r3, r3, #0 weight_\w vhsub.s16 20: rsb lr, r2, #0 vdup.16 q9, lr cmp r3, #0 blt 10f weight_\w vadd.s16 10: rsb r3, r3, #0 weight_\w vsub.s16 endfunc .endm .macro weight_entry w, h, b=1 function ff_weight_h264_pixels_\w\()x\h\()_neon, export=1 mov ip, #\h .if \b b weight_h264_pixels_\w\()_neon .endif endfunc .endm weight_entry 16, 8 weight_entry 16, 16, b=0 weight_func 16 weight_entry 8, 16 weight_entry 8, 4 weight_entry 8, 8, b=0 weight_func 8 weight_entry 4, 8 weight_entry 4, 2 weight_entry 4, 4, b=0 weight_func 4
123linslouis-android-video-cutter
jni/libavcodec/arm/h264dsp_neon.S
Unix Assembly
asf20
63,257
/* * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "libavutil/attributes.h" #include "libavcodec/dcadsp.h" void ff_dca_lfe_fir_neon(float *out, const float *in, const float *coefs, int decifactor, float scale, float bias); void av_cold ff_dcadsp_init_arm(DCADSPContext *s) { if (HAVE_NEON) s->lfe_fir = ff_dca_lfe_fir_neon; }
123linslouis-android-video-cutter
jni/libavcodec/arm/dcadsp_init_arm.c
C
asf20
1,163
/* * Simple IDCT * * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2006 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define ROW_SHIFT 11 #define COL_SHIFT 20 #define W13 (W1 | (W3 << 16)) #define W26 (W2 | (W6 << 16)) #define W57 (W5 | (W7 << 16)) .text .align w13: .long W13 w26: .long W26 w57: .long W57 function idct_row_armv5te str lr, [sp, #-4]! ldrd v1, [a1, #8] ldrd a3, [a1] /* a3 = row[1:0], a4 = row[3:2] */ orrs v1, v1, v2 cmpeq v1, a4 cmpeq v1, a3, lsr #16 beq row_dc_only mov v1, #(1<<(ROW_SHIFT-1)) mov ip, #16384 sub ip, ip, #1 /* ip = W4 */ smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */ ldr ip, w26 /* ip = W2 | (W6 << 16) */ smultb a2, ip, a4 smulbb lr, ip, a4 add v2, v1, a2 sub v3, v1, a2 sub v4, v1, lr add v1, v1, lr ldr ip, w13 /* ip = W1 | (W3 << 16) */ ldr lr, w57 /* lr = W5 | (W7 << 16) */ smulbt v5, ip, a3 smultt v6, lr, a4 smlatt v5, ip, a4, v5 smultt a2, ip, a3 smulbt v7, lr, a3 sub v6, v6, a2 smulbt a2, ip, a4 smultt fp, lr, a3 sub v7, v7, a2 smulbt a2, lr, a4 ldrd a3, [a1, #8] /* a3=row[5:4] a4=row[7:6] */ sub fp, fp, a2 orrs a2, a3, a4 beq 1f smlabt v5, lr, a3, v5 smlabt v6, ip, a3, v6 smlatt v5, lr, a4, v5 smlabt v6, lr, a4, v6 smlatt v7, lr, a3, v7 smlatt fp, ip, a3, fp smulbt a2, ip, a4 smlatt v7, ip, a4, v7 sub fp, fp, a2 ldr ip, w26 /* ip = W2 | (W6 << 16) */ mov a2, #16384 sub a2, a2, #1 /* a2 = W4 */ smulbb a2, a2, a3 /* a2 = W4*row[4] */ smultb lr, ip, a4 /* lr = W6*row[6] */ add v1, v1, a2 /* v1 += W4*row[4] */ add v1, v1, lr /* v1 += W6*row[6] */ add v4, v4, a2 /* v4 += W4*row[4] */ sub v4, v4, lr /* v4 -= W6*row[6] */ smulbb lr, ip, a4 /* lr = W2*row[6] */ sub v2, v2, a2 /* v2 -= W4*row[4] */ sub v2, v2, lr /* v2 -= W2*row[6] */ sub v3, v3, a2 /* v3 -= W4*row[4] */ add v3, v3, lr /* v3 += W2*row[6] */ 1: add a2, v1, v5 mov a3, a2, lsr #11 bic a3, a3, #0x1f0000 sub a2, v2, v6 mov a2, a2, lsr #11 add a3, a3, a2, lsl #16 add a2, v3, v7 mov a4, a2, lsr #11 bic a4, a4, #0x1f0000 add a2, v4, fp mov a2, a2, lsr #11 add a4, a4, a2, lsl #16 strd a3, [a1] sub a2, v4, fp mov a3, a2, lsr #11 bic a3, a3, #0x1f0000 sub a2, v3, v7 mov a2, a2, lsr #11 add a3, a3, a2, lsl #16 add a2, v2, v6 mov a4, a2, lsr #11 bic a4, a4, #0x1f0000 sub a2, v1, v5 mov a2, a2, lsr #11 add a4, a4, a2, lsl #16 strd a3, [a1, #8] ldr pc, [sp], #4 row_dc_only: orr a3, a3, a3, lsl #16 bic a3, a3, #0xe000 mov a3, a3, lsl #3 mov a4, a3 strd a3, [a1] strd a3, [a1, #8] ldr pc, [sp], #4 endfunc .macro idct_col ldr a4, [a1] /* a4 = col[1:0] */ mov ip, #16384 sub ip, ip, #1 /* ip = W4 */ #if 0 mov v1, #(1<<(COL_SHIFT-1)) smlabt v2, ip, a4, v1 /* v2 = W4*col[1] + (1<<(COL_SHIFT-1)) */ smlabb v1, ip, a4, v1 /* v1 = W4*col[0] + (1<<(COL_SHIFT-1)) */ ldr a4, [a1, #(16*4)] #else mov v1, #((1<<(COL_SHIFT-1))/W4) /* this matches the C version */ add v2, v1, a4, asr #16 rsb v2, v2, v2, lsl #14 mov a4, a4, lsl #16 add v1, v1, a4, asr #16 ldr a4, [a1, #(16*4)] rsb v1, v1, v1, lsl #14 #endif smulbb lr, ip, a4 smulbt a3, ip, a4 sub v3, v1, lr sub v5, v1, lr add v7, v1, lr add v1, v1, lr sub v4, v2, a3 sub v6, v2, a3 add fp, v2, a3 ldr ip, w26 ldr a4, [a1, #(16*2)] add v2, v2, a3 smulbb lr, ip, a4 smultb a3, ip, a4 add v1, v1, lr sub v7, v7, lr add v3, v3, a3 sub v5, v5, a3 smulbt lr, ip, a4 smultt a3, ip, a4 add v2, v2, lr sub fp, fp, lr add v4, v4, a3 ldr a4, [a1, #(16*6)] sub v6, v6, a3 smultb lr, ip, a4 smulbb a3, ip, a4 add v1, v1, lr sub v7, v7, lr sub v3, v3, a3 add v5, v5, a3 smultt lr, ip, a4 smulbt a3, ip, a4 add v2, v2, lr sub fp, fp, lr sub v4, v4, a3 add v6, v6, a3 stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp} ldr ip, w13 ldr a4, [a1, #(16*1)] ldr lr, w57 smulbb v1, ip, a4 smultb v3, ip, a4 smulbb v5, lr, a4 smultb v7, lr, a4 smulbt v2, ip, a4 smultt v4, ip, a4 smulbt v6, lr, a4 smultt fp, lr, a4 rsb v4, v4, #0 ldr a4, [a1, #(16*3)] rsb v3, v3, #0 smlatb v1, ip, a4, v1 smlatb v3, lr, a4, v3 smulbb a3, ip, a4 smulbb a2, lr, a4 sub v5, v5, a3 sub v7, v7, a2 smlatt v2, ip, a4, v2 smlatt v4, lr, a4, v4 smulbt a3, ip, a4 smulbt a2, lr, a4 sub v6, v6, a3 ldr a4, [a1, #(16*5)] sub fp, fp, a2 smlabb v1, lr, a4, v1 smlabb v3, ip, a4, v3 smlatb v5, lr, a4, v5 smlatb v7, ip, a4, v7 smlabt v2, lr, a4, v2 smlabt v4, ip, a4, v4 smlatt v6, lr, a4, v6 ldr a3, [a1, #(16*7)] smlatt fp, ip, a4, fp smlatb v1, lr, a3, v1 smlabb v3, lr, a3, v3 smlatb v5, ip, a3, v5 smulbb a4, ip, a3 smlatt v2, lr, a3, v2 sub v7, v7, a4 smlabt v4, lr, a3, v4 smulbt a4, ip, a3 smlatt v6, ip, a3, v6 sub fp, fp, a4 .endm function idct_col_armv5te str lr, [sp, #-4]! idct_col ldmfd sp!, {a3, a4} adds a2, a3, v1 mov a2, a2, lsr #20 orrmi a2, a2, #0xf000 add ip, a4, v2 mov ip, ip, asr #20 orr a2, a2, ip, lsl #16 str a2, [a1] subs a3, a3, v1 mov a2, a3, lsr #20 orrmi a2, a2, #0xf000 sub a4, a4, v2 mov a4, a4, asr #20 orr a2, a2, a4, lsl #16 ldmfd sp!, {a3, a4} str a2, [a1, #(16*7)] subs a2, a3, v3 mov a2, a2, lsr #20 orrmi a2, a2, #0xf000 sub ip, a4, v4 mov ip, ip, asr #20 orr a2, a2, ip, lsl #16 str a2, [a1, #(16*1)] adds a3, a3, v3 mov a2, a3, lsr #20 orrmi a2, a2, #0xf000 add a4, a4, v4 mov a4, a4, asr #20 orr a2, a2, a4, lsl #16 ldmfd sp!, {a3, a4} str a2, [a1, #(16*6)] adds a2, a3, v5 mov a2, a2, lsr #20 orrmi a2, a2, #0xf000 add ip, a4, v6 mov ip, ip, asr #20 orr a2, a2, ip, lsl #16 str a2, [a1, #(16*2)] subs a3, a3, v5 mov a2, a3, lsr #20 orrmi a2, a2, #0xf000 sub a4, a4, v6 mov a4, a4, asr #20 orr a2, a2, a4, lsl #16 ldmfd sp!, {a3, a4} str a2, [a1, #(16*5)] adds a2, a3, v7 mov a2, a2, lsr #20 orrmi a2, a2, #0xf000 add ip, a4, fp mov ip, ip, asr #20 orr a2, a2, ip, lsl #16 str a2, [a1, #(16*3)] subs a3, a3, v7 mov a2, a3, lsr #20 orrmi a2, a2, #0xf000 sub a4, a4, fp mov a4, a4, asr #20 orr a2, a2, a4, lsl #16 str a2, [a1, #(16*4)] ldr pc, [sp], #4 endfunc function idct_col_put_armv5te str lr, [sp, #-4]! idct_col ldmfd sp!, {a3, a4} ldr lr, [sp, #32] add a2, a3, v1 movs a2, a2, asr #20 movmi a2, #0 cmp a2, #255 movgt a2, #255 add ip, a4, v2 movs ip, ip, asr #20 movmi ip, #0 cmp ip, #255 movgt ip, #255 orr a2, a2, ip, lsl #8 sub a3, a3, v1 movs a3, a3, asr #20 movmi a3, #0 cmp a3, #255 movgt a3, #255 sub a4, a4, v2 movs a4, a4, asr #20 movmi a4, #0 cmp a4, #255 ldr v1, [sp, #28] movgt a4, #255 strh a2, [v1] add a2, v1, #2 str a2, [sp, #28] orr a2, a3, a4, lsl #8 rsb v2, lr, lr, lsl #3 ldmfd sp!, {a3, a4} strh a2, [v2, v1]! sub a2, a3, v3 movs a2, a2, asr #20 movmi a2, #0 cmp a2, #255 movgt a2, #255 sub ip, a4, v4 movs ip, ip, asr #20 movmi ip, #0 cmp ip, #255 movgt ip, #255 orr a2, a2, ip, lsl #8 strh a2, [v1, lr]! add a3, a3, v3 movs a2, a3, asr #20 movmi a2, #0 cmp a2, #255 movgt a2, #255 add a4, a4, v4 movs a4, a4, asr #20 movmi a4, #0 cmp a4, #255 movgt a4, #255 orr a2, a2, a4, lsl #8 ldmfd sp!, {a3, a4} strh a2, [v2, -lr]! add a2, a3, v5 movs a2, a2, asr #20 movmi a2, #0 cmp a2, #255 movgt a2, #255 add ip, a4, v6 movs ip, ip, asr #20 movmi ip, #0 cmp ip, #255 movgt ip, #255 orr a2, a2, ip, lsl #8 strh a2, [v1, lr]! sub a3, a3, v5 movs a2, a3, asr #20 movmi a2, #0 cmp a2, #255 movgt a2, #255 sub a4, a4, v6 movs a4, a4, asr #20 movmi a4, #0 cmp a4, #255 movgt a4, #255 orr a2, a2, a4, lsl #8 ldmfd sp!, {a3, a4} strh a2, [v2, -lr]! add a2, a3, v7 movs a2, a2, asr #20 movmi a2, #0 cmp a2, #255 movgt a2, #255 add ip, a4, fp movs ip, ip, asr #20 movmi ip, #0 cmp ip, #255 movgt ip, #255 orr a2, a2, ip, lsl #8 strh a2, [v1, lr] sub a3, a3, v7 movs a2, a3, asr #20 movmi a2, #0 cmp a2, #255 movgt a2, #255 sub a4, a4, fp movs a4, a4, asr #20 movmi a4, #0 cmp a4, #255 movgt a4, #255 orr a2, a2, a4, lsl #8 strh a2, [v2, -lr] ldr pc, [sp], #4 endfunc function idct_col_add_armv5te str lr, [sp, #-4]! idct_col ldr lr, [sp, #36] ldmfd sp!, {a3, a4} ldrh ip, [lr] add a2, a3, v1 mov a2, a2, asr #20 sub a3, a3, v1 and v1, ip, #255 adds a2, a2, v1 movmi a2, #0 cmp a2, #255 movgt a2, #255 add v1, a4, v2 mov v1, v1, asr #20 adds v1, v1, ip, lsr #8 movmi v1, #0 cmp v1, #255 movgt v1, #255 orr a2, a2, v1, lsl #8 ldr v1, [sp, #32] sub a4, a4, v2 rsb v2, v1, v1, lsl #3 ldrh ip, [v2, lr]! strh a2, [lr] mov a3, a3, asr #20 and a2, ip, #255 adds a3, a3, a2 movmi a3, #0 cmp a3, #255 movgt a3, #255 mov a4, a4, asr #20 adds a4, a4, ip, lsr #8 movmi a4, #0 cmp a4, #255 movgt a4, #255 add a2, lr, #2 str a2, [sp, #28] orr a2, a3, a4, lsl #8 strh a2, [v2] ldmfd sp!, {a3, a4} ldrh ip, [lr, v1]! sub a2, a3, v3 mov a2, a2, asr #20 add a3, a3, v3 and v3, ip, #255 adds a2, a2, v3 movmi a2, #0 cmp a2, #255 movgt a2, #255 sub v3, a4, v4 mov v3, v3, asr #20 adds v3, v3, ip, lsr #8 movmi v3, #0 cmp v3, #255 movgt v3, #255 orr a2, a2, v3, lsl #8 add a4, a4, v4 ldrh ip, [v2, -v1]! strh a2, [lr] mov a3, a3, asr #20 and a2, ip, #255 adds a3, a3, a2 movmi a3, #0 cmp a3, #255 movgt a3, #255 mov a4, a4, asr #20 adds a4, a4, ip, lsr #8 movmi a4, #0 cmp a4, #255 movgt a4, #255 orr a2, a3, a4, lsl #8 strh a2, [v2] ldmfd sp!, {a3, a4} ldrh ip, [lr, v1]! add a2, a3, v5 mov a2, a2, asr #20 sub a3, a3, v5 and v3, ip, #255 adds a2, a2, v3 movmi a2, #0 cmp a2, #255 movgt a2, #255 add v3, a4, v6 mov v3, v3, asr #20 adds v3, v3, ip, lsr #8 movmi v3, #0 cmp v3, #255 movgt v3, #255 orr a2, a2, v3, lsl #8 sub a4, a4, v6 ldrh ip, [v2, -v1]! strh a2, [lr] mov a3, a3, asr #20 and a2, ip, #255 adds a3, a3, a2 movmi a3, #0 cmp a3, #255 movgt a3, #255 mov a4, a4, asr #20 adds a4, a4, ip, lsr #8 movmi a4, #0 cmp a4, #255 movgt a4, #255 orr a2, a3, a4, lsl #8 strh a2, [v2] ldmfd sp!, {a3, a4} ldrh ip, [lr, v1]! add a2, a3, v7 mov a2, a2, asr #20 sub a3, a3, v7 and v3, ip, #255 adds a2, a2, v3 movmi a2, #0 cmp a2, #255 movgt a2, #255 add v3, a4, fp mov v3, v3, asr #20 adds v3, v3, ip, lsr #8 movmi v3, #0 cmp v3, #255 movgt v3, #255 orr a2, a2, v3, lsl #8 sub a4, a4, fp ldrh ip, [v2, -v1]! strh a2, [lr] mov a3, a3, asr #20 and a2, ip, #255 adds a3, a3, a2 movmi a3, #0 cmp a3, #255 movgt a3, #255 mov a4, a4, asr #20 adds a4, a4, ip, lsr #8 movmi a4, #0 cmp a4, #255 movgt a4, #255 orr a2, a3, a4, lsl #8 strh a2, [v2] ldr pc, [sp], #4 endfunc function ff_simple_idct_armv5te, export=1 stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr} bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te sub a1, a1, #(16*7) bl idct_col_armv5te add a1, a1, #4 bl idct_col_armv5te add a1, a1, #4 bl idct_col_armv5te add a1, a1, #4 bl idct_col_armv5te ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} endfunc function ff_simple_idct_add_armv5te, export=1 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr} mov a1, a3 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te sub a1, a1, #(16*7) bl idct_col_add_armv5te add a1, a1, #4 bl idct_col_add_armv5te add a1, a1, #4 bl idct_col_add_armv5te add a1, a1, #4 bl idct_col_add_armv5te add sp, sp, #8 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} endfunc function ff_simple_idct_put_armv5te, export=1 stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr} mov a1, a3 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te add a1, a1, #16 bl idct_row_armv5te sub a1, a1, #(16*7) bl idct_col_put_armv5te add a1, a1, #4 bl idct_col_put_armv5te add a1, a1, #4 bl idct_col_put_armv5te add a1, a1, #4 bl idct_col_put_armv5te add sp, sp, #8 ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc} endfunc
123linslouis-android-video-cutter
jni/libavcodec/arm/simple_idct_armv5te.S
Motorola 68K Assembly
asf20
19,254
/* * Optimization of some functions from mpegvideo.c for armv5te * Copyright (c) 2007 Siarhei Siamashka <ssvb@users.sourceforge.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "asm.S" /* * Special optimized version of dct_unquantize_h263_helper_c, it * requires the block to be at least 8 bytes aligned, and may process * more elements than requested. But it is guaranteed to never * process more than 64 elements provided that count argument is <= 64, * so it is safe. This function is optimized for a common distribution * of values for nCoeffs (they are mostly multiple of 8 plus one or * two extra elements). So this function processes data as 8 elements * per loop iteration and contains optional 2 elements processing in * the end. * * Inner loop should take 6 cycles per element on arm926ej-s (Nokia 770) */ function ff_dct_unquantize_h263_armv5te, export=1 push {r4-r9,lr} mov ip, #0 subs r3, r3, #2 ble 2f ldrd r4, [r0, #0] 1: ldrd r6, [r0, #8] rsbs r9, ip, r4, asr #16 addgt r9, r2, #0 rsblt r9, r2, #0 smlatbne r9, r4, r1, r9 rsbs lr, ip, r5, asr #16 addgt lr, r2, #0 rsblt lr, r2, #0 smlatbne lr, r5, r1, lr rsbs r8, ip, r4, asl #16 addgt r8, r2, #0 rsblt r8, r2, #0 smlabbne r4, r4, r1, r8 rsbs r8, ip, r5, asl #16 addgt r8, r2, #0 rsblt r8, r2, #0 smlabbne r5, r5, r1, r8 strh r4, [r0], #2 strh r9, [r0], #2 strh r5, [r0], #2 strh lr, [r0], #2 rsbs r9, ip, r6, asr #16 addgt r9, r2, #0 rsblt r9, r2, #0 smlatbne r9, r6, r1, r9 rsbs lr, ip, r7, asr #16 addgt lr, r2, #0 rsblt lr, r2, #0 smlatbne lr, r7, r1, lr rsbs r8, ip, r6, asl #16 addgt r8, r2, #0 rsblt r8, r2, #0 smlabbne r6, r6, r1, r8 rsbs r8, ip, r7, asl #16 addgt r8, r2, #0 rsblt r8, r2, #0 smlabbne r7, r7, r1, r8 strh r6, [r0], #2 strh r9, [r0], #2 strh r7, [r0], #2 strh lr, [r0], #2 subs r3, r3, #8 ldrgtd r4, [r0, #0] /* load data early to avoid load/use pipeline stall */ bgt 1b adds r3, r3, #2 pople {r4-r9,pc} 2: ldrsh r9, [r0, #0] ldrsh lr, [r0, #2] mov r8, r2 cmp r9, #0 rsblt r8, r2, #0 smlabbne r9, r9, r1, r8 mov r8, r2 cmp lr, #0 rsblt r8, r2, #0 smlabbne lr, lr, r1, r8 strh r9, [r0], #2 strh lr, [r0], #2 pop {r4-r9,pc} endfunc
123linslouis-android-video-cutter
jni/libavcodec/arm/mpegvideo_armv5te_s.S
Unix Assembly
asf20
4,058
/* * simple_idct_arm.S * Copyright (C) 2002 Frederic 'dilb' Boulay * * Author: Frederic Boulay <dilb@handhelds.org> * * The function defined in this file is derived from the simple_idct function * from the libavcodec library part of the FFmpeg project. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" /* useful constants for the algorithm, they are save in __constant_ptr__ at */ /* the end of the source code.*/ #define W1 22725 #define W2 21407 #define W3 19266 #define W4 16383 #define W5 12873 #define W6 8867 #define W7 4520 #define MASK_MSHW 0xFFFF0000 /* offsets of the constants in the vector */ #define offW1 0 #define offW2 4 #define offW3 8 #define offW4 12 #define offW5 16 #define offW6 20 #define offW7 24 #define offMASK_MSHW 28 #define ROW_SHIFT 11 #define ROW_SHIFT2MSHW (16-11) #define COL_SHIFT 20 #define ROW_SHIFTED_1 1024 /* 1<< (ROW_SHIFT-1) */ #define COL_SHIFTED_1 524288 /* 1<< (COL_SHIFT-1) */ .text function ff_simple_idct_arm, export=1 @@ void simple_idct_arm(int16_t *block) @@ save stack for reg needed (take all of them), @@ R0-R3 are scratch regs, so no need to save them, but R0 contains the pointer to block @@ so it must not be overwritten, if it is not saved!! @@ R12 is another scratch register, so it should not be saved too @@ save all registers stmfd sp!, {r4-r11, r14} @ R14 is also called LR @@ at this point, R0=block, other registers are free. add r14, r0, #112 @ R14=&block[8*7], better start from the last row, and decrease the value until row=0, i.e. R12=block. adr r12, __constant_ptr__ @ R12=__constant_ptr__, the vector containing the constants, probably not necessary to reserve a register for it @@ add 2 temporary variables in the stack: R0 and R14 sub sp, sp, #8 @ allow 2 local variables str r0, [sp, #0] @ save block in sp[0] @@ stack status @@ sp+4 free @@ sp+0 R0 (block) @@ at this point, R0=block, R14=&block[56], R12=__const_ptr_, R1-R11 free __row_loop: @@ read the row and check if it is null, almost null, or not, according to strongarm specs, it is not necessary to optimize ldr accesses (i.e. split 32bits in 2 16bits words), at least it gives more usable registers :) ldr r1, [r14, #0] @ R1=(int32)(R12)[0]=ROWr32[0] (relative row cast to a 32b pointer) ldr r2, [r14, #4] @ R2=(int32)(R12)[1]=ROWr32[1] ldr r3, [r14, #8] @ R3=ROWr32[2] ldr r4, [r14, #12] @ R4=ROWr32[3] @@ check if the words are null, if all of them are null, then proceed with next row (branch __end_row_loop), @@ if ROWr16[0] is the only one not null, then proceed with this special case (branch __almost_empty_row) @@ else follow the complete algorithm. @@ at this point, R0=block, R14=&block[n], R12=__const_ptr_, R1=ROWr32[0], R2=ROWr32[1], @@ R3=ROWr32[2], R4=ROWr32[3], R5-R11 free orr r5, r4, r3 @ R5=R4 | R3 orr r5, r5, r2 @ R5=R4 | R3 | R2 orrs r6, r5, r1 @ Test R5 | R1 (the aim is to check if everything is null) beq __end_row_loop mov r7, r1, asr #16 @ R7=R1>>16=ROWr16[1] (evaluate it now, as it could be useful later) ldrsh r6, [r14, #0] @ R6=ROWr16[0] orrs r5, r5, r7 @ R5=R4 | R3 | R2 | R7 beq __almost_empty_row __b_evaluation: @@ at this point, R0=block (temp), R1(free), R2=ROWr32[1], R3=ROWr32[2], R4=ROWr32[3], @@ R5=(temp), R6=ROWr16[0], R7=ROWr16[1], R8-R11 free, @@ R12=__const_ptr_, R14=&block[n] @@ to save some registers/calls, proceed with b0-b3 first, followed by a0-a3 @@ MUL16(b0, W1, row[1]); @@ MUL16(b1, W3, row[1]); @@ MUL16(b2, W5, row[1]); @@ MUL16(b3, W7, row[1]); @@ MAC16(b0, W3, row[3]); @@ MAC16(b1, -W7, row[3]); @@ MAC16(b2, -W1, row[3]); @@ MAC16(b3, -W5, row[3]); ldr r8, [r12, #offW1] @ R8=W1 mov r2, r2, asr #16 @ R2=ROWr16[3] mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldr r9, [r12, #offW3] @ R9=W3 ldr r10, [r12, #offW5] @ R10=W5 mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldr r11, [r12, #offW7] @ R11=W7 mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) teq r2, #0 @ if null avoid muls mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) rsbne r2, r2, #0 @ R2=-ROWr16[3] mlane r1, r11, r2, r1 @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) mlane r5, r8, r2, r5 @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) mlane r7, r10, r2, r7 @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) @@ at this point, R0=b0, R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3], @@ R5=b2, R6=ROWr16[0], R7=b3, R8=W1, R9=W3, R10=W5, R11=W7, @@ R12=__const_ptr_, R14=&block[n] @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3]; @@ if (temp != 0) {} orrs r2, r3, r4 @ R2=ROWr32[2] | ROWr32[3] beq __end_b_evaluation @@ at this point, R0=b0, R1=b1, R2 (free), R3=ROWr32[2], R4=ROWr32[3], @@ R5=b2, R6=ROWr16[0], R7=b3, R8=W1, R9=W3, R10=W5, R11=W7, @@ R12=__const_ptr_, R14=&block[n] @@ MAC16(b0, W5, row[5]); @@ MAC16(b2, W7, row[5]); @@ MAC16(b3, W3, row[5]); @@ MAC16(b1, -W1, row[5]); @@ MAC16(b0, W7, row[7]); @@ MAC16(b2, W3, row[7]); @@ MAC16(b3, -W1, row[7]); @@ MAC16(b1, -W5, row[7]); mov r3, r3, asr #16 @ R3=ROWr16[5] teq r3, #0 @ if null avoid muls mlane r0, r10, r3, r0 @ R0+=W5*ROWr16[5]=b0 mov r4, r4, asr #16 @ R4=ROWr16[7] mlane r5, r11, r3, r5 @ R5+=W7*ROWr16[5]=b2 mlane r7, r9, r3, r7 @ R7+=W3*ROWr16[5]=b3 rsbne r3, r3, #0 @ R3=-ROWr16[5] mlane r1, r8, r3, r1 @ R7-=W1*ROWr16[5]=b1 @@ R3 is free now teq r4, #0 @ if null avoid muls mlane r0, r11, r4, r0 @ R0+=W7*ROWr16[7]=b0 mlane r5, r9, r4, r5 @ R5+=W3*ROWr16[7]=b2 rsbne r4, r4, #0 @ R4=-ROWr16[7] mlane r7, r8, r4, r7 @ R7-=W1*ROWr16[7]=b3 mlane r1, r10, r4, r1 @ R1-=W5*ROWr16[7]=b1 @@ R4 is free now __end_b_evaluation: @@ at this point, R0=b0, R1=b1, R2=ROWr32[2] | ROWr32[3] (tmp), R3 (free), R4 (free), @@ R5=b2, R6=ROWr16[0], R7=b3, R8 (free), R9 (free), R10 (free), R11 (free), @@ R12=__const_ptr_, R14=&block[n] __a_evaluation: @@ a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1)); @@ a1 = a0 + W6 * row[2]; @@ a2 = a0 - W6 * row[2]; @@ a3 = a0 - W2 * row[2]; @@ a0 = a0 + W2 * row[2]; ldr r9, [r12, #offW4] @ R9=W4 mul r6, r9, r6 @ R6=W4*ROWr16[0] ldr r10, [r12, #offW6] @ R10=W6 ldrsh r4, [r14, #4] @ R4=ROWr16[2] (a3 not defined yet) add r6, r6, #ROW_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(ROW_SHIFT-1) (a0) mul r11, r10, r4 @ R11=W6*ROWr16[2] ldr r8, [r12, #offW2] @ R8=W2 sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2) @@ temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3]; @@ if (temp != 0) {} teq r2, #0 beq __end_bef_a_evaluation add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1) mul r11, r8, r4 @ R11=W2*ROWr16[2] sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3) add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0) @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3, @@ R5=b2, R6=a0, R7=b3, R8=W2, R9=W4, R10=W6, R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ a0 += W4*row[4] @@ a1 -= W4*row[4] @@ a2 -= W4*row[4] @@ a3 += W4*row[4] ldrsh r11, [r14, #8] @ R11=ROWr16[4] teq r11, #0 @ if null avoid muls mulne r11, r9, r11 @ R11=W4*ROWr16[4] @@ R9 is free now ldrsh r9, [r14, #12] @ R9=ROWr16[6] addne r6, r6, r11 @ R6+=W4*ROWr16[4] (a0) subne r2, r2, r11 @ R2-=W4*ROWr16[4] (a1) subne r3, r3, r11 @ R3-=W4*ROWr16[4] (a2) addne r4, r4, r11 @ R4+=W4*ROWr16[4] (a3) @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead teq r9, #0 @ if null avoid muls mulne r11, r10, r9 @ R11=W6*ROWr16[6] addne r6, r6, r11 @ R6+=W6*ROWr16[6] (a0) mulne r10, r8, r9 @ R10=W2*ROWr16[6] @@ a0 += W6*row[6]; @@ a3 -= W6*row[6]; @@ a1 -= W2*row[6]; @@ a2 += W2*row[6]; subne r4, r4, r11 @ R4-=W6*ROWr16[6] (a3) subne r2, r2, r10 @ R2-=W2*ROWr16[6] (a1) addne r3, r3, r10 @ R3+=W2*ROWr16[6] (a2) __end_a_evaluation: @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3, @@ R5=b2, R6=a0, R7=b3, R8 (free), R9 (free), R10 (free), R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ row[0] = (a0 + b0) >> ROW_SHIFT; @@ row[1] = (a1 + b1) >> ROW_SHIFT; @@ row[2] = (a2 + b2) >> ROW_SHIFT; @@ row[3] = (a3 + b3) >> ROW_SHIFT; @@ row[4] = (a3 - b3) >> ROW_SHIFT; @@ row[5] = (a2 - b2) >> ROW_SHIFT; @@ row[6] = (a1 - b1) >> ROW_SHIFT; @@ row[7] = (a0 - b0) >> ROW_SHIFT; add r8, r6, r0 @ R8=a0+b0 add r9, r2, r1 @ R9=a1+b1 @@ put 2 16 bits half-words in a 32bits word @@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only Little Endian compliant then!!!) ldr r10, [r12, #offMASK_MSHW] @ R10=0xFFFF0000 and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a1+b1)<<5) mvn r11, r10 @ R11= NOT R10= 0x0000FFFF and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a0+b0)>>11) orr r8, r8, r9 str r8, [r14, #0] add r8, r3, r5 @ R8=a2+b2 add r9, r4, r7 @ R9=a3+b3 and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a3+b3)<<5) and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a2+b2)>>11) orr r8, r8, r9 str r8, [r14, #4] sub r8, r4, r7 @ R8=a3-b3 sub r9, r3, r5 @ R9=a2-b2 and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a2-b2)<<5) and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a3-b3)>>11) orr r8, r8, r9 str r8, [r14, #8] sub r8, r2, r1 @ R8=a1-b1 sub r9, r6, r0 @ R9=a0-b0 and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a0-b0)<<5) and r8, r11, r8, asr #ROW_SHIFT @ R8=0x0000FFFF & ((a1-b1)>>11) orr r8, r8, r9 str r8, [r14, #12] bal __end_row_loop __almost_empty_row: @@ the row was empty, except ROWr16[0], now, management of this special case @@ at this point, R0=block, R14=&block[n], R12=__const_ptr_, R1=ROWr32[0], R2=ROWr32[1], @@ R3=ROWr32[2], R4=ROWr32[3], R5=(temp), R6=ROWr16[0], R7=ROWr16[1], @@ R8=0xFFFF (temp), R9-R11 free mov r8, #0x10000 @ R8=0xFFFF (2 steps needed!) it saves a ldr call (because of delay run). sub r8, r8, #1 @ R8 is now ready. and r5, r8, r6, lsl #3 @ R5=R8 & (R6<<3)= (ROWr16[0]<<3) & 0xFFFF orr r5, r5, r5, lsl #16 @ R5=R5 | (R5<<16) str r5, [r14, #0] @ R14[0]=ROWr32[0]=R5 str r5, [r14, #4] @ R14[4]=ROWr32[1]=R5 str r5, [r14, #8] @ R14[8]=ROWr32[2]=R5 str r5, [r14, #12] @ R14[12]=ROWr32[3]=R5 __end_row_loop: @@ at this point, R0-R11 (free) @@ R12=__const_ptr_, R14=&block[n] ldr r0, [sp, #0] @ R0=block teq r0, r14 @ compare current &block[8*n] to block, when block is reached, the loop is finished. sub r14, r14, #16 bne __row_loop @@ at this point, R0=block, R1-R11 (free) @@ R12=__const_ptr_, R14=&block[n] add r14, r0, #14 @ R14=&block[7], better start from the last col, and decrease the value until col=0, i.e. R14=block. __col_loop: __b_evaluation2: @@ at this point, R0=block (temp), R1-R11 (free) @@ R12=__const_ptr_, R14=&block[n] @@ proceed with b0-b3 first, followed by a0-a3 @@ MUL16(b0, W1, col[8x1]); @@ MUL16(b1, W3, col[8x1]); @@ MUL16(b2, W5, col[8x1]); @@ MUL16(b3, W7, col[8x1]); @@ MAC16(b0, W3, col[8x3]); @@ MAC16(b1, -W7, col[8x3]); @@ MAC16(b2, -W1, col[8x3]); @@ MAC16(b3, -W5, col[8x3]); ldr r8, [r12, #offW1] @ R8=W1 ldrsh r7, [r14, #16] mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldr r9, [r12, #offW3] @ R9=W3 ldr r10, [r12, #offW5] @ R10=W5 mul r1, r9, r7 @ R1=W3*ROWr16[1]=b1 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldr r11, [r12, #offW7] @ R11=W7 mul r5, r10, r7 @ R5=W5*ROWr16[1]=b2 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) ldrsh r2, [r14, #48] mul r7, r11, r7 @ R7=W7*ROWr16[1]=b3 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) teq r2, #0 @ if 0, then avoid muls mlane r0, r9, r2, r0 @ R0+=W3*ROWr16[3]=b0 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) rsbne r2, r2, #0 @ R2=-ROWr16[3] mlane r1, r11, r2, r1 @ R1-=W7*ROWr16[3]=b1 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) mlane r5, r8, r2, r5 @ R5-=W1*ROWr16[3]=b2 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) mlane r7, r10, r2, r7 @ R7-=W5*ROWr16[3]=b3 (ROWr16[3] must be the second arg, to have the possibility to save 1 cycle) @@ at this point, R0=b0, R1=b1, R2 (free), R3 (free), R4 (free), @@ R5=b2, R6 (free), R7=b3, R8=W1, R9=W3, R10=W5, R11=W7, @@ R12=__const_ptr_, R14=&block[n] @@ MAC16(b0, W5, col[5x8]); @@ MAC16(b2, W7, col[5x8]); @@ MAC16(b3, W3, col[5x8]); @@ MAC16(b1, -W1, col[5x8]); @@ MAC16(b0, W7, col[7x8]); @@ MAC16(b2, W3, col[7x8]); @@ MAC16(b3, -W1, col[7x8]); @@ MAC16(b1, -W5, col[7x8]); ldrsh r3, [r14, #80] @ R3=COLr16[5x8] teq r3, #0 @ if 0 then avoid muls mlane r0, r10, r3, r0 @ R0+=W5*ROWr16[5x8]=b0 mlane r5, r11, r3, r5 @ R5+=W7*ROWr16[5x8]=b2 mlane r7, r9, r3, r7 @ R7+=W3*ROWr16[5x8]=b3 rsbne r3, r3, #0 @ R3=-ROWr16[5x8] ldrsh r4, [r14, #112] @ R4=COLr16[7x8] mlane r1, r8, r3, r1 @ R7-=W1*ROWr16[5x8]=b1 @@ R3 is free now teq r4, #0 @ if 0 then avoid muls mlane r0, r11, r4, r0 @ R0+=W7*ROWr16[7x8]=b0 mlane r5, r9, r4, r5 @ R5+=W3*ROWr16[7x8]=b2 rsbne r4, r4, #0 @ R4=-ROWr16[7x8] mlane r7, r8, r4, r7 @ R7-=W1*ROWr16[7x8]=b3 mlane r1, r10, r4, r1 @ R1-=W5*ROWr16[7x8]=b1 @@ R4 is free now __end_b_evaluation2: @@ at this point, R0=b0, R1=b1, R2 (free), R3 (free), R4 (free), @@ R5=b2, R6 (free), R7=b3, R8 (free), R9 (free), R10 (free), R11 (free), @@ R12=__const_ptr_, R14=&block[n] __a_evaluation2: @@ a0 = (W4 * col[8x0]) + (1 << (COL_SHIFT - 1)); @@ a1 = a0 + W6 * row[2]; @@ a2 = a0 - W6 * row[2]; @@ a3 = a0 - W2 * row[2]; @@ a0 = a0 + W2 * row[2]; ldrsh r6, [r14, #0] ldr r9, [r12, #offW4] @ R9=W4 mul r6, r9, r6 @ R6=W4*ROWr16[0] ldr r10, [r12, #offW6] @ R10=W6 ldrsh r4, [r14, #32] @ R4=ROWr16[2] (a3 not defined yet) add r6, r6, #COL_SHIFTED_1 @ R6=W4*ROWr16[0] + 1<<(COL_SHIFT-1) (a0) mul r11, r10, r4 @ R11=W6*ROWr16[2] ldr r8, [r12, #offW2] @ R8=W2 add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1) sub r3, r6, r11 @ R3=a0-W6*ROWr16[2] (a2) mul r11, r8, r4 @ R11=W2*ROWr16[2] sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3) add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0) @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3, @@ R5=b2, R6=a0, R7=b3, R8=W2, R9=W4, R10=W6, R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ a0 += W4*row[4] @@ a1 -= W4*row[4] @@ a2 -= W4*row[4] @@ a3 += W4*row[4] ldrsh r11, [r14, #64] @ R11=ROWr16[4] teq r11, #0 @ if null avoid muls mulne r11, r9, r11 @ R11=W4*ROWr16[4] @@ R9 is free now addne r6, r6, r11 @ R6+=W4*ROWr16[4] (a0) subne r2, r2, r11 @ R2-=W4*ROWr16[4] (a1) subne r3, r3, r11 @ R3-=W4*ROWr16[4] (a2) ldrsh r9, [r14, #96] @ R9=ROWr16[6] addne r4, r4, r11 @ R4+=W4*ROWr16[4] (a3) @@ W6 alone is no more useful, save W2*ROWr16[6] in it instead teq r9, #0 @ if null avoid muls mulne r11, r10, r9 @ R11=W6*ROWr16[6] addne r6, r6, r11 @ R6+=W6*ROWr16[6] (a0) mulne r10, r8, r9 @ R10=W2*ROWr16[6] @@ a0 += W6*row[6]; @@ a3 -= W6*row[6]; @@ a1 -= W2*row[6]; @@ a2 += W2*row[6]; subne r4, r4, r11 @ R4-=W6*ROWr16[6] (a3) subne r2, r2, r10 @ R2-=W2*ROWr16[6] (a1) addne r3, r3, r10 @ R3+=W2*ROWr16[6] (a2) __end_a_evaluation2: @@ at this point, R0=b0, R1=b1, R2=a1, R3=a2, R4=a3, @@ R5=b2, R6=a0, R7=b3, R8 (free), R9 (free), R10 (free), R11 (free), @@ R12=__const_ptr_, R14=&block[n] @@ col[0 ] = ((a0 + b0) >> COL_SHIFT); @@ col[8 ] = ((a1 + b1) >> COL_SHIFT); @@ col[16] = ((a2 + b2) >> COL_SHIFT); @@ col[24] = ((a3 + b3) >> COL_SHIFT); @@ col[32] = ((a3 - b3) >> COL_SHIFT); @@ col[40] = ((a2 - b2) >> COL_SHIFT); @@ col[48] = ((a1 - b1) >> COL_SHIFT); @@ col[56] = ((a0 - b0) >> COL_SHIFT); @@@@@ no optimization here @@@@@ add r8, r6, r0 @ R8=a0+b0 add r9, r2, r1 @ R9=a1+b1 mov r8, r8, asr #COL_SHIFT mov r9, r9, asr #COL_SHIFT strh r8, [r14, #0] strh r9, [r14, #16] add r8, r3, r5 @ R8=a2+b2 add r9, r4, r7 @ R9=a3+b3 mov r8, r8, asr #COL_SHIFT mov r9, r9, asr #COL_SHIFT strh r8, [r14, #32] strh r9, [r14, #48] sub r8, r4, r7 @ R8=a3-b3 sub r9, r3, r5 @ R9=a2-b2 mov r8, r8, asr #COL_SHIFT mov r9, r9, asr #COL_SHIFT strh r8, [r14, #64] strh r9, [r14, #80] sub r8, r2, r1 @ R8=a1-b1 sub r9, r6, r0 @ R9=a0-b0 mov r8, r8, asr #COL_SHIFT mov r9, r9, asr #COL_SHIFT strh r8, [r14, #96] strh r9, [r14, #112] __end_col_loop: @@ at this point, R0-R11 (free) @@ R12=__const_ptr_, R14=&block[n] ldr r0, [sp, #0] @ R0=block teq r0, r14 @ compare current &block[n] to block, when block is reached, the loop is finished. sub r14, r14, #2 bne __col_loop __end_simple_idct_arm: @@ restore registers to previous status! add sp, sp, #8 @@ the local variables! ldmfd sp!, {r4-r11, r15} @@ update PC with LR content. @@ kind of sub-function, here not to overload the common case. __end_bef_a_evaluation: add r2, r6, r11 @ R2=a0+W6*ROWr16[2] (a1) mul r11, r8, r4 @ R11=W2*ROWr16[2] sub r4, r6, r11 @ R4=a0-W2*ROWr16[2] (a3) add r6, r6, r11 @ R6=a0+W2*ROWr16[2] (a0) bal __end_a_evaluation __constant_ptr__: @@ see #defines at the beginning of the source code for values. .align .word W1 .word W2 .word W3 .word W4 .word W5 .word W6 .word W7 .word MASK_MSHW
123linslouis-android-video-cutter
jni/libavcodec/arm/simple_idct_arm.S
Unix Assembly
asf20
22,063
/* * Copyright (c) 2002 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/avcodec.h" #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" #include "mpegvideo_arm.h" void MPV_common_init_arm(MpegEncContext *s) { /* IWMMXT support is a superset of armv5te, so * allow optimized functions for armv5te unless * a better iwmmxt function exists */ #if HAVE_ARMV5TE MPV_common_init_armv5te(s); #endif #if HAVE_IWMMXT MPV_common_init_iwmmxt(s); #endif }
123linslouis-android-video-cutter
jni/libavcodec/arm/mpegvideo_arm.c
C
asf20
1,243
/* * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" .macro vp6_edge_filter vdup.16 q3, r2 @ t vmov.i16 q13, #1 vsubl.u8 q0, d20, d18 @ p[ 0] - p[-s] vsubl.u8 q1, d16, d22 @ p[-2*s] - p[ s] vsubl.u8 q14, d21, d19 vsubl.u8 q15, d17, d23 vadd.i16 q2, q0, q0 @ 2*(p[0]-p[-s]) vadd.i16 d29, d28, d28 vadd.i16 q0, q0, q1 @ p[0]-p[-s] + p[-2*s]-p[s] vadd.i16 d28, d28, d30 vadd.i16 q0, q0, q2 @ 3*(p[0]-p[-s]) + p[-2*s]-p[s] vadd.i16 d28, d28, d29 vrshr.s16 q0, q0, #3 @ v vrshr.s16 d28, d28, #3 vsub.i16 q8, q3, q13 @ t-1 vabs.s16 q1, q0 @ V vshr.s16 q2, q0, #15 @ s vabs.s16 d30, d28 vshr.s16 d29, d28, #15 vsub.i16 q12, q1, q3 @ V-t vsub.i16 d31, d30, d6 vsub.i16 q12, q12, q13 @ V-t-1 vsub.i16 d31, d31, d26 vcge.u16 q12, q12, q8 @ V-t-1 >= t-1 vcge.u16 d31, d31, d16 vadd.i16 q13, q3, q3 @ 2*t vadd.i16 d16, d6, d6 vsub.i16 q13, q13, q1 @ 2*t - V vsub.i16 d16, d16, d30 vadd.i16 q13, q13, q2 @ += s vadd.i16 d16, d16, d29 veor q13, q13, q2 @ ^= s veor d16, d16, d29 vbif q0, q13, q12 vbif d28, d16, d31 vmovl.u8 q1, d20 vmovl.u8 q15, d21 vaddw.u8 q2, q0, d18 vaddw.u8 q3, q14, d19 vsub.i16 q1, q1, q0 vsub.i16 d30, d30, d28 vqmovun.s16 d18, q2 vqmovun.s16 d19, q3 vqmovun.s16 d20, q1 vqmovun.s16 d21, q15 .endm function ff_vp6_edge_filter_ver_neon, export=1 sub r0, r0, r1, lsl #1 vld1.8 {q8}, [r0], r1 @ p[-2*s] vld1.8 {q9}, [r0], r1 @ p[-s] vld1.8 {q10}, [r0], r1 @ p[0] vld1.8 {q11}, [r0] @ p[s] vp6_edge_filter sub r0, r0, r1, lsl #1 sub r1, r1, #8 vst1.8 {d18}, [r0]! vst1.32 {d19[0]}, [r0], r1 vst1.8 {d20}, [r0]! vst1.32 {d21[0]}, [r0] bx lr endfunc function ff_vp6_edge_filter_hor_neon, export=1 sub r3, r0, #1 sub r0, r0, #2 vld1.32 {d16[0]}, [r0], r1 vld1.32 {d18[0]}, [r0], r1 vld1.32 {d20[0]}, [r0], r1 vld1.32 {d22[0]}, [r0], r1 vld1.32 {d16[1]}, [r0], r1 vld1.32 {d18[1]}, [r0], r1 vld1.32 {d20[1]}, [r0], r1 vld1.32 {d22[1]}, [r0], r1 vld1.32 {d17[0]}, [r0], r1 vld1.32 {d19[0]}, [r0], r1 vld1.32 {d21[0]}, [r0], r1 vld1.32 {d23[0]}, [r0], r1 vtrn.8 q8, q9 vtrn.8 q10, q11 vtrn.16 q8, q10 vtrn.16 q9, q11 vp6_edge_filter vtrn.8 q9, q10 vst1.16 {d18[0]}, [r3], r1 vst1.16 {d20[0]}, [r3], r1 vst1.16 {d18[1]}, [r3], r1 vst1.16 {d20[1]}, [r3], r1 vst1.16 {d18[2]}, [r3], r1 vst1.16 {d20[2]}, [r3], r1 vst1.16 {d18[3]}, [r3], r1 vst1.16 {d20[3]}, [r3], r1 vst1.16 {d19[0]}, [r3], r1 vst1.16 {d21[0]}, [r3], r1 vst1.16 {d19[1]}, [r3], r1 vst1.16 {d21[1]}, [r3], r1 bx lr endfunc
123linslouis-android-video-cutter
jni/libavcodec/arm/vp56dsp_neon.S
Unix Assembly
asf20
4,897
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdint.h> #include "libavcodec/h264pred.h" void ff_pred16x16_vert_neon(uint8_t *src, int stride); void ff_pred16x16_hor_neon(uint8_t *src, int stride); void ff_pred16x16_plane_neon(uint8_t *src, int stride); void ff_pred16x16_dc_neon(uint8_t *src, int stride); void ff_pred16x16_128_dc_neon(uint8_t *src, int stride); void ff_pred16x16_left_dc_neon(uint8_t *src, int stride); void ff_pred16x16_top_dc_neon(uint8_t *src, int stride); void ff_pred8x8_vert_neon(uint8_t *src, int stride); void ff_pred8x8_hor_neon(uint8_t *src, int stride); void ff_pred8x8_plane_neon(uint8_t *src, int stride); void ff_pred8x8_dc_neon(uint8_t *src, int stride); void ff_pred8x8_128_dc_neon(uint8_t *src, int stride); void ff_pred8x8_left_dc_neon(uint8_t *src, int stride); void ff_pred8x8_top_dc_neon(uint8_t *src, int stride); void ff_pred8x8_l0t_dc_neon(uint8_t *src, int stride); void ff_pred8x8_0lt_dc_neon(uint8_t *src, int stride); void ff_pred8x8_l00_dc_neon(uint8_t *src, int stride); void ff_pred8x8_0l0_dc_neon(uint8_t *src, int stride); #if HAVE_NEON static void ff_h264_pred_init_neon(H264PredContext *h, int codec_id) { h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon; h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon; h->pred8x8[PLANE_PRED8x8 ] = ff_pred8x8_plane_neon; h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon; if (codec_id != CODEC_ID_RV40) { h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon; h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon; h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon; h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon; h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon; h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon; h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon; } h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon; h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon; h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon; h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon; h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon; h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon; if (codec_id != CODEC_ID_SVQ3 && codec_id != CODEC_ID_RV40) h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon; } #endif void ff_h264_pred_init_arm(H264PredContext *h, int codec_id) { if (HAVE_NEON) ff_h264_pred_init_neon(h, codec_id); }
123linslouis-android-video-cutter
jni/libavcodec/arm/h264pred_init_arm.c
C
asf20
3,320
/* * Copyright (c) 2008 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" preserve8 .text function ff_h264_idct_add_neon, export=1 vld1.64 {d0-d3}, [r1,:128] vswp d1, d2 vadd.i16 d4, d0, d1 vshr.s16 q8, q1, #1 vsub.i16 d5, d0, d1 vadd.i16 d6, d2, d17 vsub.i16 d7, d16, d3 vadd.i16 q0, q2, q3 vsub.i16 q1, q2, q3 vtrn.16 d0, d1 vtrn.16 d3, d2 vtrn.32 d0, d3 vtrn.32 d1, d2 vadd.i16 d4, d0, d3 vld1.32 {d18[0]}, [r0,:32], r2 vswp d1, d3 vshr.s16 q8, q1, #1 vld1.32 {d19[1]}, [r0,:32], r2 vsub.i16 d5, d0, d1 vld1.32 {d18[1]}, [r0,:32], r2 vadd.i16 d6, d16, d3 vld1.32 {d19[0]}, [r0,:32], r2 vsub.i16 d7, d2, d17 sub r0, r0, r2, lsl #2 vadd.i16 q0, q2, q3 vsub.i16 q1, q2, q3 vrshr.s16 q0, q0, #6 vrshr.s16 q1, q1, #6 vaddw.u8 q0, q0, d18 vaddw.u8 q1, q1, d19 vqmovun.s16 d0, q0 vqmovun.s16 d1, q1 vst1.32 {d0[0]}, [r0,:32], r2 vst1.32 {d1[1]}, [r0,:32], r2 vst1.32 {d0[1]}, [r0,:32], r2 vst1.32 {d1[0]}, [r0,:32], r2 bx lr endfunc function ff_h264_idct_dc_add_neon, export=1 vld1.16 {d2[],d3[]}, [r1,:16] vrshr.s16 q1, q1, #6 vld1.32 {d0[0]}, [r0,:32], r2 vld1.32 {d0[1]}, [r0,:32], r2 vaddw.u8 q2, q1, d0 vld1.32 {d1[0]}, [r0,:32], r2 vld1.32 {d1[1]}, [r0,:32], r2 vaddw.u8 q1, q1, d1 vqmovun.s16 d0, q2 vqmovun.s16 d1, q1 sub r0, r0, r2, lsl #2 vst1.32 {d0[0]}, [r0,:32], r2 vst1.32 {d0[1]}, [r0,:32], r2 vst1.32 {d1[0]}, [r0,:32], r2 vst1.32 {d1[1]}, [r0,:32], r2 bx lr endfunc function ff_h264_idct_add16_neon, export=1 push {r4-r8,lr} mov r4, r0 mov r5, r1 mov r1, r2 mov r2, r3 ldr r6, [sp, #24] movrel r7, scan8 mov ip, #16 1: ldrb r8, [r7], #1 ldr r0, [r5], #4 ldrb r8, [r6, r8] subs r8, r8, #1 blt 2f ldrsh lr, [r1] add r0, r0, r4 movne lr, #0 cmp lr, #0 adrne lr, ff_h264_idct_dc_add_neon adreq lr, ff_h264_idct_add_neon blx lr 2: subs ip, ip, #1 add r1, r1, #32 bne 1b pop {r4-r8,pc} endfunc function ff_h264_idct_add16intra_neon, export=1 push {r4-r8,lr} mov r4, r0 mov r5, r1 mov r1, r2 mov r2, r3 ldr r6, [sp, #24] movrel r7, scan8 mov ip, #16 1: ldrb r8, [r7], #1 ldr r0, [r5], #4 ldrb r8, [r6, r8] add r0, r0, r4 cmp r8, #0 ldrsh r8, [r1] adrne lr, ff_h264_idct_add_neon adreq lr, ff_h264_idct_dc_add_neon cmpeq r8, #0 blxne lr subs ip, ip, #1 add r1, r1, #32 bne 1b pop {r4-r8,pc} endfunc function ff_h264_idct_add8_neon, export=1 push {r4-r10,lr} ldm r0, {r4,r9} add r5, r1, #16*4 add r1, r2, #16*32 mov r2, r3 ldr r6, [sp, #32] movrel r7, scan8+16 mov ip, #8 1: ldrb r8, [r7], #1 ldr r0, [r5], #4 ldrb r8, [r6, r8] tst ip, #4 addeq r0, r0, r4 addne r0, r0, r9 cmp r8, #0 ldrsh r8, [r1] adrne lr, ff_h264_idct_add_neon adreq lr, ff_h264_idct_dc_add_neon cmpeq r8, #0 blxne lr subs ip, ip, #1 add r1, r1, #32 bne 1b pop {r4-r10,pc} endfunc .section .rodata scan8: .byte 4+1*8, 5+1*8, 4+2*8, 5+2*8 .byte 6+1*8, 7+1*8, 6+2*8, 7+2*8 .byte 4+3*8, 5+3*8, 4+4*8, 5+4*8 .byte 6+3*8, 7+3*8, 6+4*8, 7+4*8 .byte 1+1*8, 2+1*8 .byte 1+2*8, 2+2*8 .byte 1+4*8, 2+4*8 .byte 1+5*8, 2+5*8
123linslouis-android-video-cutter
jni/libavcodec/arm/h264idct_neon.S
Unix Assembly
asf20
6,129
/* * Optimization of some functions from mpegvideo.c for armv5te * Copyright (c) 2007 Siarhei Siamashka <ssvb@users.sourceforge.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/avcodec.h" #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" #include "mpegvideo_arm.h" void ff_dct_unquantize_h263_armv5te(DCTELEM *block, int qmul, int qadd, int count); #ifdef ENABLE_ARM_TESTS /** * h263 dequantizer supplementary function, it is performance critical and needs to * have optimized implementations for each architecture. Is also used as a reference * implementation in regression tests */ static inline void dct_unquantize_h263_helper_c(DCTELEM *block, int qmul, int qadd, int count) { int i, level; for (i = 0; i < count; i++) { level = block[i]; if (level) { if (level < 0) { level = level * qmul - qadd; } else { level = level * qmul + qadd; } block[i] = level; } } } #endif static void dct_unquantize_h263_intra_armv5te(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int level, qmul, qadd; int nCoeffs; assert(s->block_last_index[n]>=0); qmul = qscale << 1; if (!s->h263_aic) { if (n < 4) level = block[0] * s->y_dc_scale; else level = block[0] * s->c_dc_scale; qadd = (qscale - 1) | 1; }else{ qadd = 0; level = block[0]; } if(s->ac_pred) nCoeffs=63; else nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1); block[0] = level; } static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int qmul, qadd; int nCoeffs; assert(s->block_last_index[n]>=0); qadd = (qscale - 1) | 1; qmul = qscale << 1; nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1); } void MPV_common_init_armv5te(MpegEncContext *s) { s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te; }
123linslouis-android-video-cutter
jni/libavcodec/arm/mpegvideo_armv5te.c
C
asf20
3,061
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" preserve8 .text .macro call_2x_pixels type, subp function ff_\type\()_pixels16\subp\()_armv6, export=1 push {r0-r3, lr} bl ff_\type\()_pixels8\subp\()_armv6 pop {r0-r3, lr} add r0, r0, #8 add r1, r1, #8 b ff_\type\()_pixels8\subp\()_armv6 endfunc .endm call_2x_pixels avg call_2x_pixels put, _x2 call_2x_pixels put, _y2 call_2x_pixels put, _x2_no_rnd call_2x_pixels put, _y2_no_rnd function ff_put_pixels16_armv6, export=1 push {r4-r11} 1: ldr r5, [r1, #4] ldr r6, [r1, #8] ldr r7, [r1, #12] ldr r4, [r1], r2 strd r6, r7, [r0, #8] ldr r9, [r1, #4] strd r4, r5, [r0], r2 ldr r10, [r1, #8] ldr r11, [r1, #12] ldr r8, [r1], r2 strd r10, r11, [r0, #8] subs r3, r3, #2 strd r8, r9, [r0], r2 bne 1b pop {r4-r11} bx lr endfunc function ff_put_pixels8_armv6, export=1 push {r4-r7} 1: ldr r5, [r1, #4] ldr r4, [r1], r2 ldr r7, [r1, #4] strd r4, r5, [r0], r2 ldr r6, [r1], r2 subs r3, r3, #2 strd r6, r7, [r0], r2 bne 1b pop {r4-r7} bx lr endfunc function ff_put_pixels8_x2_armv6, export=1 push {r4-r11, lr} mov r12, #1 orr r12, r12, r12, lsl #8 orr r12, r12, r12, lsl #16 1: ldr r4, [r1] subs r3, r3, #2 ldr r5, [r1, #4] ldr r7, [r1, #5] lsr r6, r4, #8 ldr r8, [r1, r2]! orr r6, r6, r5, lsl #24 ldr r9, [r1, #4] ldr r11, [r1, #5] lsr r10, r8, #8 add r1, r1, r2 orr r10, r10, r9, lsl #24 eor r14, r4, r6 uhadd8 r4, r4, r6 eor r6, r5, r7 uhadd8 r5, r5, r7 and r14, r14, r12 and r6, r6, r12 uadd8 r4, r4, r14 eor r14, r8, r10 uadd8 r5, r5, r6 eor r6, r9, r11 uhadd8 r8, r8, r10 and r14, r14, r12 uhadd8 r9, r9, r11 and r6, r6, r12 uadd8 r8, r8, r14 strd r4, r5, [r0], r2 uadd8 r9, r9, r6 strd r8, r9, [r0], r2 bne 1b pop {r4-r11, pc} endfunc function ff_put_pixels8_y2_armv6, export=1 push {r4-r11} mov r12, #1 orr r12, r12, r12, lsl #8 orr r12, r12, r12, lsl #16 ldr r4, [r1] ldr r5, [r1, #4] ldr r6, [r1, r2]! ldr r7, [r1, #4] 1: subs r3, r3, #2 uhadd8 r8, r4, r6 eor r10, r4, r6 uhadd8 r9, r5, r7 eor r11, r5, r7 and r10, r10, r12 ldr r4, [r1, r2]! uadd8 r8, r8, r10 and r11, r11, r12 uadd8 r9, r9, r11 ldr r5, [r1, #4] uhadd8 r10, r4, r6 eor r6, r4, r6 uhadd8 r11, r5, r7 and r6, r6, r12 eor r7, r5, r7 uadd8 r10, r10, r6 and r7, r7, r12 ldr r6, [r1, r2]! uadd8 r11, r11, r7 strd r8, r9, [r0], r2 ldr r7, [r1, #4] strd r10, r11, [r0], r2 bne 1b pop {r4-r11} bx lr endfunc function ff_put_pixels8_x2_no_rnd_armv6, export=1 push {r4-r9, lr} 1: subs r3, r3, #2 ldr r4, [r1] ldr r5, [r1, #4] ldr r7, [r1, #5] ldr r8, [r1, r2]! ldr r9, [r1, #4] ldr r14, [r1, #5] add r1, r1, r2 lsr r6, r4, #8 orr r6, r6, r5, lsl #24 lsr r12, r8, #8 orr r12, r12, r9, lsl #24 uhadd8 r4, r4, r6 uhadd8 r5, r5, r7 uhadd8 r8, r8, r12 uhadd8 r9, r9, r14 stm r0, {r4,r5} add r0, r0, r2 stm r0, {r8,r9} add r0, r0, r2 bne 1b pop {r4-r9, pc} endfunc function ff_put_pixels8_y2_no_rnd_armv6, export=1 push {r4-r9, lr} ldr r4, [r1] ldr r5, [r1, #4] ldr r6, [r1, r2]! ldr r7, [r1, #4] 1: subs r3, r3, #2 uhadd8 r8, r4, r6 ldr r4, [r1, r2]! uhadd8 r9, r5, r7 ldr r5, [r1, #4] uhadd8 r12, r4, r6 ldr r6, [r1, r2]! uhadd8 r14, r5, r7 ldr r7, [r1, #4] stm r0, {r8,r9} add r0, r0, r2 stm r0, {r12,r14} add r0, r0, r2 bne 1b pop {r4-r9, pc} endfunc function ff_avg_pixels8_armv6, export=1 pld [r1, r2] push {r4-r10, lr} mov lr, #1 orr lr, lr, lr, lsl #8 orr lr, lr, lr, lsl #16 ldrd r4, r5, [r0] ldr r10, [r1, #4] ldr r9, [r1], r2 subs r3, r3, #2 1: pld [r1, r2] eor r8, r4, r9 uhadd8 r4, r4, r9 eor r12, r5, r10 ldrd r6, r7, [r0, r2] uhadd8 r5, r5, r10 and r8, r8, lr ldr r10, [r1, #4] and r12, r12, lr uadd8 r4, r4, r8 ldr r9, [r1], r2 eor r8, r6, r9 uadd8 r5, r5, r12 pld [r1, r2, lsl #1] eor r12, r7, r10 uhadd8 r6, r6, r9 strd r4, r5, [r0], r2 uhadd8 r7, r7, r10 beq 2f and r8, r8, lr ldrd r4, r5, [r0, r2] uadd8 r6, r6, r8 ldr r10, [r1, #4] and r12, r12, lr subs r3, r3, #2 uadd8 r7, r7, r12 ldr r9, [r1], r2 strd r6, r7, [r0], r2 b 1b 2: and r8, r8, lr and r12, r12, lr uadd8 r6, r6, r8 uadd8 r7, r7, r12 strd r6, r7, [r0], r2 pop {r4-r10, pc} endfunc function ff_add_pixels_clamped_armv6, export=1 push {r4-r8,lr} mov r3, #8 1: ldm r0!, {r4,r5,r12,lr} ldrd r6, r7, [r1] pkhbt r8, r4, r5, lsl #16 pkhtb r5, r5, r4, asr #16 pkhbt r4, r12, lr, lsl #16 pkhtb lr, lr, r12, asr #16 pld [r1, r2] uxtab16 r8, r8, r6 uxtab16 r5, r5, r6, ror #8 uxtab16 r4, r4, r7 uxtab16 lr, lr, r7, ror #8 usat16 r8, #8, r8 usat16 r5, #8, r5 usat16 r4, #8, r4 usat16 lr, #8, lr orr r6, r8, r5, lsl #8 orr r7, r4, lr, lsl #8 subs r3, r3, #1 strd r6, r7, [r1], r2 bgt 1b pop {r4-r8,pc} endfunc function ff_get_pixels_armv6, export=1 pld [r1, r2] push {r4-r8, lr} mov lr, #8 1: ldrd r4, r5, [r1], r2 subs lr, lr, #1 uxtb16 r6, r4 uxtb16 r4, r4, ror #8 uxtb16 r12, r5 uxtb16 r8, r5, ror #8 pld [r1, r2] pkhbt r5, r6, r4, lsl #16 pkhtb r6, r4, r6, asr #16 pkhbt r7, r12, r8, lsl #16 pkhtb r12, r8, r12, asr #16 stm r0!, {r5,r6,r7,r12} bgt 1b pop {r4-r8, pc} endfunc function ff_diff_pixels_armv6, export=1 pld [r1, r3] pld [r2, r3] push {r4-r9, lr} mov lr, #8 1: ldrd r4, r5, [r1], r3 ldrd r6, r7, [r2], r3 uxtb16 r8, r4 uxtb16 r4, r4, ror #8 uxtb16 r9, r6 uxtb16 r6, r6, ror #8 pld [r1, r3] ssub16 r9, r8, r9 ssub16 r6, r4, r6 uxtb16 r8, r5 uxtb16 r5, r5, ror #8 pld [r2, r3] pkhbt r4, r9, r6, lsl #16 pkhtb r6, r6, r9, asr #16 uxtb16 r9, r7 uxtb16 r7, r7, ror #8 ssub16 r9, r8, r9 ssub16 r5, r5, r7 subs lr, lr, #1 pkhbt r8, r9, r5, lsl #16 pkhtb r9, r5, r9, asr #16 stm r0!, {r4,r6,r8,r9} bgt 1b pop {r4-r9, pc} endfunc function ff_pix_abs16_armv6, export=1 ldr r0, [sp] push {r4-r9, lr} mov r12, #0 mov lr, #0 ldm r1, {r4-r7} ldr r8, [r2] 1: ldr r9, [r2, #4] pld [r1, r3] usada8 r12, r4, r8, r12 ldr r8, [r2, #8] pld [r2, r3] usada8 lr, r5, r9, lr ldr r9, [r2, #12] usada8 r12, r6, r8, r12 subs r0, r0, #1 usada8 lr, r7, r9, lr beq 2f add r1, r1, r3 ldm r1, {r4-r7} add r2, r2, r3 ldr r8, [r2] b 1b 2: add r0, r12, lr pop {r4-r9, pc} endfunc function ff_pix_abs16_x2_armv6, export=1 ldr r12, [sp] push {r4-r11, lr} mov r0, #0 mov lr, #1 orr lr, lr, lr, lsl #8 orr lr, lr, lr, lsl #16 1: ldr r8, [r2] ldr r9, [r2, #4] lsr r10, r8, #8 ldr r4, [r1] lsr r6, r9, #8 orr r10, r10, r9, lsl #24 ldr r5, [r2, #8] eor r11, r8, r10 uhadd8 r7, r8, r10 orr r6, r6, r5, lsl #24 and r11, r11, lr uadd8 r7, r7, r11 ldr r8, [r1, #4] usada8 r0, r4, r7, r0 eor r7, r9, r6 lsr r10, r5, #8 and r7, r7, lr uhadd8 r4, r9, r6 ldr r6, [r2, #12] uadd8 r4, r4, r7 pld [r1, r3] orr r10, r10, r6, lsl #24 usada8 r0, r8, r4, r0 ldr r4, [r1, #8] eor r11, r5, r10 ldrb r7, [r2, #16] and r11, r11, lr uhadd8 r8, r5, r10 ldr r5, [r1, #12] uadd8 r8, r8, r11 pld [r2, r3] lsr r10, r6, #8 usada8 r0, r4, r8, r0 orr r10, r10, r7, lsl #24 subs r12, r12, #1 eor r11, r6, r10 add r1, r1, r3 uhadd8 r9, r6, r10 and r11, r11, lr uadd8 r9, r9, r11 add r2, r2, r3 usada8 r0, r5, r9, r0 bgt 1b pop {r4-r11, pc} endfunc .macro usad_y2 p0, p1, p2, p3, n0, n1, n2, n3 ldr \n0, [r2] eor \n1, \p0, \n0 uhadd8 \p0, \p0, \n0 and \n1, \n1, lr ldr \n2, [r1] uadd8 \p0, \p0, \n1 ldr \n1, [r2, #4] usada8 r0, \p0, \n2, r0 pld [r1, r3] eor \n3, \p1, \n1 uhadd8 \p1, \p1, \n1 and \n3, \n3, lr ldr \p0, [r1, #4] uadd8 \p1, \p1, \n3 ldr \n2, [r2, #8] usada8 r0, \p1, \p0, r0 pld [r2, r3] eor \p0, \p2, \n2 uhadd8 \p2, \p2, \n2 and \p0, \p0, lr ldr \p1, [r1, #8] uadd8 \p2, \p2, \p0 ldr \n3, [r2, #12] usada8 r0, \p2, \p1, r0 eor \p1, \p3, \n3 uhadd8 \p3, \p3, \n3 and \p1, \p1, lr ldr \p0, [r1, #12] uadd8 \p3, \p3, \p1 add r1, r1, r3 usada8 r0, \p3, \p0, r0 add r2, r2, r3 .endm function ff_pix_abs16_y2_armv6, export=1 pld [r1] pld [r2] ldr r12, [sp] push {r4-r11, lr} mov r0, #0 mov lr, #1 orr lr, lr, lr, lsl #8 orr lr, lr, lr, lsl #16 ldr r4, [r2] ldr r5, [r2, #4] ldr r6, [r2, #8] ldr r7, [r2, #12] add r2, r2, r3 1: usad_y2 r4, r5, r6, r7, r8, r9, r10, r11 subs r12, r12, #2 usad_y2 r8, r9, r10, r11, r4, r5, r6, r7 bgt 1b pop {r4-r11, pc} endfunc function ff_pix_abs8_armv6, export=1 pld [r2, r3] ldr r12, [sp] push {r4-r9, lr} mov r0, #0 mov lr, #0 ldrd r4, r5, [r1], r3 1: subs r12, r12, #2 ldr r7, [r2, #4] ldr r6, [r2], r3 ldrd r8, r9, [r1], r3 usada8 r0, r4, r6, r0 pld [r2, r3] usada8 lr, r5, r7, lr ldr r7, [r2, #4] ldr r6, [r2], r3 beq 2f ldrd r4, r5, [r1], r3 usada8 r0, r8, r6, r0 pld [r2, r3] usada8 lr, r9, r7, lr b 1b 2: usada8 r0, r8, r6, r0 usada8 lr, r9, r7, lr add r0, r0, lr pop {r4-r9, pc} endfunc function ff_sse16_armv6, export=1 ldr r12, [sp] push {r4-r9, lr} mov r0, #0 1: ldrd r4, r5, [r1] ldr r8, [r2] uxtb16 lr, r4 uxtb16 r4, r4, ror #8 uxtb16 r9, r8 uxtb16 r8, r8, ror #8 ldr r7, [r2, #4] usub16 lr, lr, r9 usub16 r4, r4, r8 smlad r0, lr, lr, r0 uxtb16 r6, r5 uxtb16 lr, r5, ror #8 uxtb16 r8, r7 uxtb16 r9, r7, ror #8 smlad r0, r4, r4, r0 ldrd r4, r5, [r1, #8] usub16 r6, r6, r8 usub16 r8, lr, r9 ldr r7, [r2, #8] smlad r0, r6, r6, r0 uxtb16 lr, r4 uxtb16 r4, r4, ror #8 uxtb16 r9, r7 uxtb16 r7, r7, ror #8 smlad r0, r8, r8, r0 ldr r8, [r2, #12] usub16 lr, lr, r9 usub16 r4, r4, r7 smlad r0, lr, lr, r0 uxtb16 r6, r5 uxtb16 r5, r5, ror #8 uxtb16 r9, r8 uxtb16 r8, r8, ror #8 smlad r0, r4, r4, r0 usub16 r6, r6, r9 usub16 r5, r5, r8 smlad r0, r6, r6, r0 add r1, r1, r3 add r2, r2, r3 subs r12, r12, #1 smlad r0, r5, r5, r0 bgt 1b pop {r4-r9, pc} endfunc function ff_pix_norm1_armv6, export=1 push {r4-r6, lr} mov r12, #16 mov lr, #0 1: ldm r0, {r2-r5} uxtb16 r6, r2 uxtb16 r2, r2, ror #8 smlad lr, r6, r6, lr uxtb16 r6, r3 smlad lr, r2, r2, lr uxtb16 r3, r3, ror #8 smlad lr, r6, r6, lr uxtb16 r6, r4 smlad lr, r3, r3, lr uxtb16 r4, r4, ror #8 smlad lr, r6, r6, lr uxtb16 r6, r5 smlad lr, r4, r4, lr uxtb16 r5, r5, ror #8 smlad lr, r6, r6, lr subs r12, r12, #1 add r0, r0, r1 smlad lr, r5, r5, lr bgt 1b mov r0, lr pop {r4-r6, pc} endfunc function ff_pix_sum_armv6, export=1 push {r4-r7, lr} mov r12, #16 mov r2, #0 mov r3, #0 mov lr, #0 ldr r4, [r0] 1: subs r12, r12, #1 ldr r5, [r0, #4] usada8 r2, r4, lr, r2 ldr r6, [r0, #8] usada8 r3, r5, lr, r3 ldr r7, [r0, #12] usada8 r2, r6, lr, r2 beq 2f ldr r4, [r0, r1]! usada8 r3, r7, lr, r3 bgt 1b 2: usada8 r3, r7, lr, r3 add r0, r2, r3 pop {r4-r7, pc} endfunc
123linslouis-android-video-cutter
jni/libavcodec/arm/dsputil_armv6.S
Unix Assembly
asf20
20,998
/* * Simple IDCT * * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2007 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */ #define ROW_SHIFT 11 #define COL_SHIFT 20 #define W13 (W1 | (W3 << 16)) #define W26 (W2 | (W6 << 16)) #define W42 (W4 | (W2 << 16)) #define W42n (-W4&0xffff | (-W2 << 16)) #define W46 (W4 | (W6 << 16)) #define W57 (W5 | (W7 << 16)) .text .align w13: .long W13 w26: .long W26 w42: .long W42 w42n: .long W42n w46: .long W46 w57: .long W57 /* Compute partial IDCT of single row. shift = left-shift amount r0 = source address r2 = row[2,0] <= 2 cycles r3 = row[3,1] ip = w42 <= 2 cycles Output in registers r4--r11 */ .macro idct_row shift ldr lr, w46 /* lr = W4 | (W6 << 16) */ mov r1, #(1<<(\shift-1)) smlad r4, r2, ip, r1 smlsd r7, r2, ip, r1 ldr ip, w13 /* ip = W1 | (W3 << 16) */ ldr r10,w57 /* r10 = W5 | (W7 << 16) */ smlad r5, r2, lr, r1 smlsd r6, r2, lr, r1 smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */ smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */ ldr lr, [r0, #12] /* lr = row[7,5] */ pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */ pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */ smusdx r9, r2, r3 /* r9 = -B1 = W7*row[3] - W3*row[1] */ smlad r8, lr, r10,r8 /* B0 += W5*row[5] + W7*row[7] */ smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */ ldr r3, w42n /* r3 = -W4 | (-W2 << 16) */ smlad r10,lr, r2, r10 /* B2 += W7*row[5] + W3*row[7] */ ldr r2, [r0, #4] /* r2 = row[6,4] */ smlsdx r11,lr, ip, r11 /* B3 += W3*row[5] - W1*row[7] */ ldr ip, w46 /* ip = W4 | (W6 << 16) */ smlad r9, lr, r1, r9 /* B1 -= W1*row[5] + W5*row[7] */ smlad r5, r2, r3, r5 /* A1 += -W4*row[4] - W2*row[6] */ smlsd r6, r2, r3, r6 /* A2 += -W4*row[4] + W2*row[6] */ smlad r4, r2, ip, r4 /* A0 += W4*row[4] + W6*row[6] */ smlsd r7, r2, ip, r7 /* A3 += W4*row[4] - W6*row[6] */ .endm /* Compute partial IDCT of half row. shift = left-shift amount r2 = row[2,0] r3 = row[3,1] ip = w42 Output in registers r4--r11 */ .macro idct_row4 shift ldr lr, w46 /* lr = W4 | (W6 << 16) */ ldr r10,w57 /* r10 = W5 | (W7 << 16) */ mov r1, #(1<<(\shift-1)) smlad r4, r2, ip, r1 smlsd r7, r2, ip, r1 ldr ip, w13 /* ip = W1 | (W3 << 16) */ smlad r5, r2, lr, r1 smlsd r6, r2, lr, r1 smusdx r11,r3, r10 /* r11 = B3 = W7*row[1] - W5*row[3] */ smuad r8, r3, ip /* r8 = B0 = W1*row[1] + W3*row[3] */ pkhtb r2, ip, r10,asr #16 /* r3 = W7 | (W3 << 16) */ pkhbt r1, ip, r10,lsl #16 /* r1 = W1 | (W5 << 16) */ smusdx r9, r2, r3 /* r9 = -B1 = W7*row[3] - W3*row[1] */ smusdx r10,r3, r1 /* r10 = B2 = W5*row[1] - W1*row[3] */ .endm /* Compute final part of IDCT single row without shift. Input in registers r4--r11 Output in registers ip, r4--r6, lr, r8--r10 */ .macro idct_finish add ip, r4, r8 /* r1 = A0 + B0 */ sub lr, r4, r8 /* r2 = A0 - B0 */ sub r4, r5, r9 /* r2 = A1 + B1 */ add r8, r5, r9 /* r2 = A1 - B1 */ add r5, r6, r10 /* r1 = A2 + B2 */ sub r9, r6, r10 /* r1 = A2 - B2 */ add r6, r7, r11 /* r2 = A3 + B3 */ sub r10,r7, r11 /* r2 = A3 - B3 */ .endm /* Compute final part of IDCT single row. shift = right-shift amount Input/output in registers r4--r11 */ .macro idct_finish_shift shift add r3, r4, r8 /* r3 = A0 + B0 */ sub r2, r4, r8 /* r2 = A0 - B0 */ mov r4, r3, asr #\shift mov r8, r2, asr #\shift sub r3, r5, r9 /* r3 = A1 + B1 */ add r2, r5, r9 /* r2 = A1 - B1 */ mov r5, r3, asr #\shift mov r9, r2, asr #\shift add r3, r6, r10 /* r3 = A2 + B2 */ sub r2, r6, r10 /* r2 = A2 - B2 */ mov r6, r3, asr #\shift mov r10,r2, asr #\shift add r3, r7, r11 /* r3 = A3 + B3 */ sub r2, r7, r11 /* r2 = A3 - B3 */ mov r7, r3, asr #\shift mov r11,r2, asr #\shift .endm /* Compute final part of IDCT single row, saturating results at 8 bits. shift = right-shift amount Input/output in registers r4--r11 */ .macro idct_finish_shift_sat shift add r3, r4, r8 /* r3 = A0 + B0 */ sub ip, r4, r8 /* ip = A0 - B0 */ usat r4, #8, r3, asr #\shift usat r8, #8, ip, asr #\shift sub r3, r5, r9 /* r3 = A1 + B1 */ add ip, r5, r9 /* ip = A1 - B1 */ usat r5, #8, r3, asr #\shift usat r9, #8, ip, asr #\shift add r3, r6, r10 /* r3 = A2 + B2 */ sub ip, r6, r10 /* ip = A2 - B2 */ usat r6, #8, r3, asr #\shift usat r10,#8, ip, asr #\shift add r3, r7, r11 /* r3 = A3 + B3 */ sub ip, r7, r11 /* ip = A3 - B3 */ usat r7, #8, r3, asr #\shift usat r11,#8, ip, asr #\shift .endm /* Compute IDCT of single row, storing as column. r0 = source r1 = dest */ function idct_row_armv6 push {lr} ldr lr, [r0, #12] /* lr = row[7,5] */ ldr ip, [r0, #4] /* ip = row[6,4] */ ldr r3, [r0, #8] /* r3 = row[3,1] */ ldr r2, [r0] /* r2 = row[2,0] */ orrs lr, lr, ip cmpeq lr, r3 cmpeq lr, r2, lsr #16 beq 1f push {r1} ldr ip, w42 /* ip = W4 | (W2 << 16) */ cmp lr, #0 beq 2f idct_row ROW_SHIFT b 3f 2: idct_row4 ROW_SHIFT 3: pop {r1} idct_finish_shift ROW_SHIFT strh r4, [r1] strh r5, [r1, #(16*2)] strh r6, [r1, #(16*4)] strh r7, [r1, #(16*6)] strh r11,[r1, #(16*1)] strh r10,[r1, #(16*3)] strh r9, [r1, #(16*5)] strh r8, [r1, #(16*7)] pop {pc} 1: mov r2, r2, lsl #3 strh r2, [r1] strh r2, [r1, #(16*2)] strh r2, [r1, #(16*4)] strh r2, [r1, #(16*6)] strh r2, [r1, #(16*1)] strh r2, [r1, #(16*3)] strh r2, [r1, #(16*5)] strh r2, [r1, #(16*7)] pop {pc} endfunc /* Compute IDCT of single column, read as row. r0 = source r1 = dest */ function idct_col_armv6 push {r1, lr} ldr r2, [r0] /* r2 = row[2,0] */ ldr ip, w42 /* ip = W4 | (W2 << 16) */ ldr r3, [r0, #8] /* r3 = row[3,1] */ idct_row COL_SHIFT pop {r1} idct_finish_shift COL_SHIFT strh r4, [r1] strh r5, [r1, #(16*1)] strh r6, [r1, #(16*2)] strh r7, [r1, #(16*3)] strh r11,[r1, #(16*4)] strh r10,[r1, #(16*5)] strh r9, [r1, #(16*6)] strh r8, [r1, #(16*7)] pop {pc} endfunc /* Compute IDCT of single column, read as row, store saturated 8-bit. r0 = source r1 = dest r2 = line size */ function idct_col_put_armv6 push {r1, r2, lr} ldr r2, [r0] /* r2 = row[2,0] */ ldr ip, w42 /* ip = W4 | (W2 << 16) */ ldr r3, [r0, #8] /* r3 = row[3,1] */ idct_row COL_SHIFT pop {r1, r2} idct_finish_shift_sat COL_SHIFT strb r4, [r1], r2 strb r5, [r1], r2 strb r6, [r1], r2 strb r7, [r1], r2 strb r11,[r1], r2 strb r10,[r1], r2 strb r9, [r1], r2 strb r8, [r1], r2 sub r1, r1, r2, lsl #3 pop {pc} endfunc /* Compute IDCT of single column, read as row, add/store saturated 8-bit. r0 = source r1 = dest r2 = line size */ function idct_col_add_armv6 push {r1, r2, lr} ldr r2, [r0] /* r2 = row[2,0] */ ldr ip, w42 /* ip = W4 | (W2 << 16) */ ldr r3, [r0, #8] /* r3 = row[3,1] */ idct_row COL_SHIFT pop {r1, r2} idct_finish ldrb r3, [r1] ldrb r7, [r1, r2] ldrb r11,[r1, r2, lsl #2] add ip, r3, ip, asr #COL_SHIFT usat ip, #8, ip add r4, r7, r4, asr #COL_SHIFT strb ip, [r1], r2 ldrb ip, [r1, r2] usat r4, #8, r4 ldrb r11,[r1, r2, lsl #2] add r5, ip, r5, asr #COL_SHIFT usat r5, #8, r5 strb r4, [r1], r2 ldrb r3, [r1, r2] ldrb ip, [r1, r2, lsl #2] strb r5, [r1], r2 ldrb r7, [r1, r2] ldrb r4, [r1, r2, lsl #2] add r6, r3, r6, asr #COL_SHIFT usat r6, #8, r6 add r10,r7, r10,asr #COL_SHIFT usat r10,#8, r10 add r9, r11,r9, asr #COL_SHIFT usat r9, #8, r9 add r8, ip, r8, asr #COL_SHIFT usat r8, #8, r8 add lr, r4, lr, asr #COL_SHIFT usat lr, #8, lr strb r6, [r1], r2 strb r10,[r1], r2 strb r9, [r1], r2 strb r8, [r1], r2 strb lr, [r1], r2 sub r1, r1, r2, lsl #3 pop {pc} endfunc /* Compute 8 IDCT row transforms. func = IDCT row->col function width = width of columns in bytes */ .macro idct_rows func width bl \func add r0, r0, #(16*2) add r1, r1, #\width bl \func add r0, r0, #(16*2) add r1, r1, #\width bl \func add r0, r0, #(16*2) add r1, r1, #\width bl \func sub r0, r0, #(16*5) add r1, r1, #\width bl \func add r0, r0, #(16*2) add r1, r1, #\width bl \func add r0, r0, #(16*2) add r1, r1, #\width bl \func add r0, r0, #(16*2) add r1, r1, #\width bl \func sub r0, r0, #(16*7) .endm /* void ff_simple_idct_armv6(DCTELEM *data); */ function ff_simple_idct_armv6, export=1 push {r4-r11, lr} sub sp, sp, #128 mov r1, sp idct_rows idct_row_armv6, 2 mov r1, r0 mov r0, sp idct_rows idct_col_armv6, 2 add sp, sp, #128 pop {r4-r11, pc} endfunc /* ff_simple_idct_add_armv6(uint8_t *dest, int line_size, DCTELEM *data); */ function ff_simple_idct_add_armv6, export=1 push {r0, r1, r4-r11, lr} sub sp, sp, #128 mov r0, r2 mov r1, sp idct_rows idct_row_armv6, 2 mov r0, sp ldr r1, [sp, #128] ldr r2, [sp, #(128+4)] idct_rows idct_col_add_armv6, 1 add sp, sp, #(128+8) pop {r4-r11, pc} endfunc /* ff_simple_idct_put_armv6(uint8_t *dest, int line_size, DCTELEM *data); */ function ff_simple_idct_put_armv6, export=1 push {r0, r1, r4-r11, lr} sub sp, sp, #128 mov r0, r2 mov r1, sp idct_rows idct_row_armv6, 2 mov r0, sp ldr r1, [sp, #128] ldr r2, [sp, #(128+4)] idct_rows idct_col_put_armv6, 1 add sp, sp, #(128+8) pop {r4-r11, pc} endfunc
123linslouis-android-video-cutter
jni/libavcodec/arm/simple_idct_armv6.S
Unix Assembly
asf20
13,383
/* * ARM NEON optimised FFT * * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * Copyright (c) 2009 Naotoshi Nojiri * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" #define M_SQRT1_2 0.70710678118654752440 .text function fft4_neon vld1.32 {d0-d3}, [r0,:128] vext.32 q8, q1, q1, #1 @ i2,r3 d3=i3,r2 vsub.f32 d6, d0, d1 @ r0-r1,i0-i1 vsub.f32 d7, d16, d17 @ r3-r2,i2-i3 vadd.f32 d4, d0, d1 @ r0+r1,i0+i1 vadd.f32 d5, d2, d3 @ i2+i3,r2+r3 vadd.f32 d1, d6, d7 vsub.f32 d3, d6, d7 vadd.f32 d0, d4, d5 vsub.f32 d2, d4, d5 vst1.32 {d0-d3}, [r0,:128] bx lr endfunc function fft8_neon mov r1, r0 vld1.32 {d0-d3}, [r1,:128]! vld1.32 {d16-d19}, [r1,:128] movw r2, #0x04f3 @ sqrt(1/2) movt r2, #0x3f35 eor r3, r2, #1<<31 vdup.32 d31, r2 vext.32 q11, q1, q1, #1 @ i2,r3,i3,r2 vadd.f32 d4, d16, d17 @ r4+r5,i4+i5 vmov d28, r3, r2 vadd.f32 d5, d18, d19 @ r6+r7,i6+i7 vsub.f32 d17, d16, d17 @ r4-r5,i4-i5 vsub.f32 d19, d18, d19 @ r6-r7,i6-i7 vrev64.32 d29, d28 vadd.f32 d20, d0, d1 @ r0+r1,i0+i1 vadd.f32 d21, d2, d3 @ r2+r3,i2+i3 vmul.f32 d26, d17, d28 @ -a2r*w,a2i*w vext.32 q3, q2, q2, #1 vmul.f32 d27, d19, d29 @ a3r*w,-a3i*w vsub.f32 d23, d22, d23 @ i2-i3,r3-r2 vsub.f32 d22, d0, d1 @ r0-r1,i0-i1 vmul.f32 d24, d17, d31 @ a2r*w,a2i*w vmul.f32 d25, d19, d31 @ a3r*w,a3i*w vadd.f32 d0, d20, d21 vsub.f32 d2, d20, d21 vadd.f32 d1, d22, d23 vrev64.32 q13, q13 vsub.f32 d3, d22, d23 vsub.f32 d6, d6, d7 vadd.f32 d24, d24, d26 @ a2r+a2i,a2i-a2r t1,t2 vadd.f32 d25, d25, d27 @ a3r-a3i,a3i+a3r t5,t6 vadd.f32 d7, d4, d5 vsub.f32 d18, d2, d6 vext.32 q13, q12, q12, #1 vadd.f32 d2, d2, d6 vsub.f32 d16, d0, d7 vadd.f32 d5, d25, d24 vsub.f32 d4, d26, d27 vadd.f32 d0, d0, d7 vsub.f32 d17, d1, d5 vsub.f32 d19, d3, d4 vadd.f32 d3, d3, d4 vadd.f32 d1, d1, d5 vst1.32 {d16-d19}, [r1,:128] vst1.32 {d0-d3}, [r0,:128] bx lr endfunc function fft16_neon movrel r1, mppm vld1.32 {d16-d19}, [r0,:128]! @ q8{r0,i0,r1,i1} q9{r2,i2,r3,i3} pld [r0, #32] vld1.32 {d2-d3}, [r1,:128] vext.32 q13, q9, q9, #1 vld1.32 {d22-d25}, [r0,:128]! @ q11{r4,i4,r5,i5} q12{r6,i5,r7,i7} vadd.f32 d4, d16, d17 vsub.f32 d5, d16, d17 vadd.f32 d18, d18, d19 vsub.f32 d19, d26, d27 vadd.f32 d20, d22, d23 vsub.f32 d22, d22, d23 vsub.f32 d23, d24, d25 vadd.f32 q8, q2, q9 @ {r0,i0,r1,i1} vadd.f32 d21, d24, d25 vmul.f32 d24, d22, d2 vsub.f32 q9, q2, q9 @ {r2,i2,r3,i3} vmul.f32 d25, d23, d3 vuzp.32 d16, d17 @ {r0,r1,i0,i1} vmul.f32 q1, q11, d2[1] vuzp.32 d18, d19 @ {r2,r3,i2,i3} vrev64.32 q12, q12 vadd.f32 q11, q12, q1 @ {t1a,t2a,t5,t6} vld1.32 {d24-d27}, [r0,:128]! @ q12{r8,i8,r9,i9} q13{r10,i10,r11,i11} vzip.32 q10, q11 vld1.32 {d28-d31}, [r0,:128] @ q14{r12,i12,r13,i13} q15{r14,i14,r15,i15} vadd.f32 d0, d22, d20 vadd.f32 d1, d21, d23 vsub.f32 d2, d21, d23 vsub.f32 d3, d22, d20 sub r0, r0, #96 vext.32 q13, q13, q13, #1 vsub.f32 q10, q8, q0 @ {r4,r5,i4,i5} vadd.f32 q8, q8, q0 @ {r0,r1,i0,i1} vext.32 q15, q15, q15, #1 vsub.f32 q11, q9, q1 @ {r6,r7,i6,i7} vswp d25, d26 @ q12{r8,i8,i10,r11} q13{r9,i9,i11,r10} vadd.f32 q9, q9, q1 @ {r2,r3,i2,i3} vswp d29, d30 @ q14{r12,i12,i14,r15} q15{r13,i13,i15,r14} vadd.f32 q0, q12, q13 @ {t1,t2,t5,t6} vadd.f32 q1, q14, q15 @ {t1a,t2a,t5a,t6a} movrel r2, X(ff_cos_16) vsub.f32 q13, q12, q13 @ {t3,t4,t7,t8} vrev64.32 d1, d1 vsub.f32 q15, q14, q15 @ {t3a,t4a,t7a,t8a} vrev64.32 d3, d3 movrel r3, pmmp vswp d1, d26 @ q0{t1,t2,t3,t4} q13{t6,t5,t7,t8} vswp d3, d30 @ q1{t1a,t2a,t3a,t4a} q15{t6a,t5a,t7a,t8a} vadd.f32 q12, q0, q13 @ {r8,i8,r9,i9} vadd.f32 q14, q1, q15 @ {r12,i12,r13,i13} vld1.32 {d4-d5}, [r2,:64] vsub.f32 q13, q0, q13 @ {r10,i10,r11,i11} vsub.f32 q15, q1, q15 @ {r14,i14,r15,i15} vswp d25, d28 @ q12{r8,i8,r12,i12} q14{r9,i9,r13,i13} vld1.32 {d6-d7}, [r3,:128] vrev64.32 q1, q14 vmul.f32 q14, q14, d4[1] vmul.f32 q1, q1, q3 vmla.f32 q14, q1, d5[1] @ {t1a,t2a,t5a,t6a} vswp d27, d30 @ q13{r10,i10,r14,i14} q15{r11,i11,r15,i15} vzip.32 q12, q14 vadd.f32 d0, d28, d24 vadd.f32 d1, d25, d29 vsub.f32 d2, d25, d29 vsub.f32 d3, d28, d24 vsub.f32 q12, q8, q0 @ {r8,r9,i8,i9} vadd.f32 q8, q8, q0 @ {r0,r1,i0,i1} vsub.f32 q14, q10, q1 @ {r12,r13,i12,i13} mov r1, #32 vadd.f32 q10, q10, q1 @ {r4,r5,i4,i5} vrev64.32 q0, q13 vmul.f32 q13, q13, d5[0] vrev64.32 q1, q15 vmul.f32 q15, q15, d5[1] vst2.32 {d16-d17},[r0,:128], r1 vmul.f32 q0, q0, q3 vst2.32 {d20-d21},[r0,:128], r1 vmul.f32 q1, q1, q3 vmla.f32 q13, q0, d5[0] @ {t1,t2,t5,t6} vmla.f32 q15, q1, d4[1] @ {t1a,t2a,t5a,t6a} vst2.32 {d24-d25},[r0,:128], r1 vst2.32 {d28-d29},[r0,:128] vzip.32 q13, q15 sub r0, r0, #80 vadd.f32 d0, d30, d26 vadd.f32 d1, d27, d31 vsub.f32 d2, d27, d31 vsub.f32 d3, d30, d26 vsub.f32 q13, q9, q0 @ {r10,r11,i10,i11} vadd.f32 q9, q9, q0 @ {r2,r3,i2,i3} vsub.f32 q15, q11, q1 @ {r14,r15,i14,i15} vadd.f32 q11, q11, q1 @ {r6,r7,i6,i7} vst2.32 {d18-d19},[r0,:128], r1 vst2.32 {d22-d23},[r0,:128], r1 vst2.32 {d26-d27},[r0,:128], r1 vst2.32 {d30-d31},[r0,:128] bx lr endfunc function fft_pass_neon push {r4-r6,lr} mov r6, r2 @ n lsl r5, r2, #3 @ 2 * n * sizeof FFTSample lsl r4, r2, #4 @ 2 * n * sizeof FFTComplex lsl r2, r2, #5 @ 4 * n * sizeof FFTComplex add r3, r2, r4 add r4, r4, r0 @ &z[o1] add r2, r2, r0 @ &z[o2] add r3, r3, r0 @ &z[o3] vld1.32 {d20-d21},[r2,:128] @ {z[o2],z[o2+1]} movrel r12, pmmp vld1.32 {d22-d23},[r3,:128] @ {z[o3],z[o3+1]} add r5, r5, r1 @ wim vld1.32 {d6-d7}, [r12,:128] @ pmmp vswp d21, d22 vld1.32 {d4}, [r1,:64]! @ {wre[0],wre[1]} sub r5, r5, #4 @ wim-- vrev64.32 q1, q11 vmul.f32 q11, q11, d4[1] vmul.f32 q1, q1, q3 vld1.32 {d5[0]}, [r5,:32] @ d5[0] = wim[-1] vmla.f32 q11, q1, d5[0] @ {t1a,t2a,t5a,t6a} vld2.32 {d16-d17},[r0,:128] @ {z[0],z[1]} sub r6, r6, #1 @ n-- vld2.32 {d18-d19},[r4,:128] @ {z[o1],z[o1+1]} vzip.32 q10, q11 vadd.f32 d0, d22, d20 vadd.f32 d1, d21, d23 vsub.f32 d2, d21, d23 vsub.f32 d3, d22, d20 vsub.f32 q10, q8, q0 vadd.f32 q8, q8, q0 vsub.f32 q11, q9, q1 vadd.f32 q9, q9, q1 vst2.32 {d20-d21},[r2,:128]! @ {z[o2],z[o2+1]} vst2.32 {d16-d17},[r0,:128]! @ {z[0],z[1]} vst2.32 {d22-d23},[r3,:128]! @ {z[o3],z[o3+1]} vst2.32 {d18-d19},[r4,:128]! @ {z[o1],z[o1+1]} sub r5, r5, #8 @ wim -= 2 1: vld1.32 {d20-d21},[r2,:128] @ {z[o2],z[o2+1]} vld1.32 {d22-d23},[r3,:128] @ {z[o3],z[o3+1]} vswp d21, d22 vld1.32 {d4}, [r1]! @ {wre[0],wre[1]} vrev64.32 q0, q10 vmul.f32 q10, q10, d4[0] vrev64.32 q1, q11 vmul.f32 q11, q11, d4[1] vld1.32 {d5}, [r5] @ {wim[-1],wim[0]} vmul.f32 q0, q0, q3 sub r5, r5, #8 @ wim -= 2 vmul.f32 q1, q1, q3 vmla.f32 q10, q0, d5[1] @ {t1,t2,t5,t6} vmla.f32 q11, q1, d5[0] @ {t1a,t2a,t5a,t6a} vld2.32 {d16-d17},[r0,:128] @ {z[0],z[1]} subs r6, r6, #1 @ n-- vld2.32 {d18-d19},[r4,:128] @ {z[o1],z[o1+1]} vzip.32 q10, q11 vadd.f32 d0, d22, d20 vadd.f32 d1, d21, d23 vsub.f32 d2, d21, d23 vsub.f32 d3, d22, d20 vsub.f32 q10, q8, q0 vadd.f32 q8, q8, q0 vsub.f32 q11, q9, q1 vadd.f32 q9, q9, q1 vst2.32 {d20-d21}, [r2,:128]! @ {z[o2],z[o2+1]} vst2.32 {d16-d17}, [r0,:128]! @ {z[0],z[1]} vst2.32 {d22-d23}, [r3,:128]! @ {z[o3],z[o3+1]} vst2.32 {d18-d19}, [r4,:128]! @ {z[o1],z[o1+1]} bne 1b pop {r4-r6,pc} endfunc .macro def_fft n, n2, n4 .align 6 function fft\n\()_neon push {r4, lr} mov r4, r0 bl fft\n2\()_neon add r0, r4, #\n4*2*8 bl fft\n4\()_neon add r0, r4, #\n4*3*8 bl fft\n4\()_neon mov r0, r4 pop {r4, lr} movrel r1, X(ff_cos_\n) mov r2, #\n4/2 b fft_pass_neon endfunc .endm def_fft 32, 16, 8 def_fft 64, 32, 16 def_fft 128, 64, 32 def_fft 256, 128, 64 def_fft 512, 256, 128 def_fft 1024, 512, 256 def_fft 2048, 1024, 512 def_fft 4096, 2048, 1024 def_fft 8192, 4096, 2048 def_fft 16384, 8192, 4096 def_fft 32768, 16384, 8192 def_fft 65536, 32768, 16384 function ff_fft_calc_neon, export=1 ldr r2, [r0] sub r2, r2, #2 movrel r3, fft_tab_neon ldr r3, [r3, r2, lsl #2] mov r0, r1 bx r3 endfunc function ff_fft_permute_neon, export=1 push {r4,lr} mov r12, #1 ldr r2, [r0] @ nbits ldr r3, [r0, #20] @ tmp_buf ldr r0, [r0, #8] @ revtab lsl r12, r12, r2 mov r2, r12 1: vld1.32 {d0-d1}, [r1,:128]! ldr r4, [r0], #4 uxth lr, r4 uxth r4, r4, ror #16 add lr, r3, lr, lsl #3 add r4, r3, r4, lsl #3 vst1.32 {d0}, [lr,:64] vst1.32 {d1}, [r4,:64] subs r12, r12, #2 bgt 1b sub r1, r1, r2, lsl #3 1: vld1.32 {d0-d3}, [r3,:128]! vst1.32 {d0-d3}, [r1,:128]! subs r2, r2, #4 bgt 1b pop {r4,pc} endfunc .section .rodata .align 4 fft_tab_neon: .word fft4_neon .word fft8_neon .word fft16_neon .word fft32_neon .word fft64_neon .word fft128_neon .word fft256_neon .word fft512_neon .word fft1024_neon .word fft2048_neon .word fft4096_neon .word fft8192_neon .word fft16384_neon .word fft32768_neon .word fft65536_neon .size fft_tab_neon, . - fft_tab_neon .align 4 pmmp: .float +1.0, -1.0, -1.0, +1.0 mppm: .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
123linslouis-android-video-cutter
jni/libavcodec/arm/fft_neon.S
Unix Assembly
asf20
15,210
/* * Copyright (c) 2009 David Conrad * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "asm.S" .section .rodata .align 4 vp3_idct_constants: .short 64277, 60547, 54491, 46341, 36410, 25080, 12785 #define xC1S7 d0[0] #define xC2S6 d0[1] #define xC3S5 d0[2] #define xC4S4 d0[3] #define xC5S3 d1[0] #define xC6S2 d1[1] #define xC7S1 d1[2] .text .macro vp3_loop_filter vsubl.u8 q3, d18, d17 vsubl.u8 q2, d16, d19 vadd.i16 q1, q3, q3 vadd.i16 q2, q2, q3 vadd.i16 q0, q1, q2 vrshr.s16 q0, q0, #3 vmovl.u8 q9, d18 vdup.u16 q15, r2 vabs.s16 q1, q0 vshr.s16 q0, q0, #15 vqsub.u16 q2, q15, q1 vqsub.u16 q3, q2, q1 vsub.i16 q1, q2, q3 veor q1, q1, q0 vsub.i16 q0, q1, q0 vaddw.u8 q2, q0, d17 vsub.i16 q3, q9, q0 vqmovun.s16 d0, q2 vqmovun.s16 d1, q3 .endm function ff_vp3_v_loop_filter_neon, export=1 sub ip, r0, r1 sub r0, r0, r1, lsl #1 vld1.64 {d16}, [r0,:64], r1 vld1.64 {d17}, [r0,:64], r1 vld1.64 {d18}, [r0,:64], r1 vld1.64 {d19}, [r0,:64], r1 ldrb r2, [r2, #129*4] vp3_loop_filter vst1.64 {d0}, [ip,:64], r1 vst1.64 {d1}, [ip,:64], r1 bx lr endfunc function ff_vp3_h_loop_filter_neon, export=1 sub ip, r0, #1 sub r0, r0, #2 vld1.32 {d16[]}, [r0], r1 vld1.32 {d17[]}, [r0], r1 vld1.32 {d18[]}, [r0], r1 vld1.32 {d19[]}, [r0], r1 vld1.32 {d16[1]}, [r0], r1 vld1.32 {d17[1]}, [r0], r1 vld1.32 {d18[1]}, [r0], r1 vld1.32 {d19[1]}, [r0], r1 ldrb r2, [r2, #129*4] vtrn.8 d16, d17 vtrn.8 d18, d19 vtrn.16 d16, d18 vtrn.16 d17, d19 vp3_loop_filter vtrn.8 d0, d1 vst1.16 {d0[0]}, [ip], r1 vst1.16 {d1[0]}, [ip], r1 vst1.16 {d0[1]}, [ip], r1 vst1.16 {d1[1]}, [ip], r1 vst1.16 {d0[2]}, [ip], r1 vst1.16 {d1[2]}, [ip], r1 vst1.16 {d0[3]}, [ip], r1 vst1.16 {d1[3]}, [ip], r1 bx lr endfunc function vp3_idct_start_neon vpush {d8-d15} movrel r3, vp3_idct_constants vld1.64 {d0-d1}, [r3,:128] vld1.64 {d16-d19}, [r2,:128]! vld1.64 {d20-d23}, [r2,:128]! vld1.64 {d24-d27}, [r2,:128]! vadd.s16 q1, q8, q12 vsub.s16 q8, q8, q12 vld1.64 {d28-d31}, [r2,:128]! endfunc function vp3_idct_core_neon vmull.s16 q2, d18, xC1S7 // (ip[1] * C1) << 16 vmull.s16 q3, d19, xC1S7 vmull.s16 q4, d2, xC4S4 // ((ip[0] + ip[4]) * C4) << 16 vmull.s16 q5, d3, xC4S4 vmull.s16 q6, d16, xC4S4 // ((ip[0] - ip[4]) * C4) << 16 vmull.s16 q7, d17, xC4S4 vshrn.s32 d4, q2, #16 vshrn.s32 d5, q3, #16 vshrn.s32 d6, q4, #16 vshrn.s32 d7, q5, #16 vshrn.s32 d8, q6, #16 vshrn.s32 d9, q7, #16 vadd.s16 q12, q1, q3 // E = (ip[0] + ip[4]) * C4 vadd.s16 q8, q8, q4 // F = (ip[0] - ip[4]) * C4 vadd.s16 q1, q2, q9 // ip[1] * C1 vmull.s16 q2, d30, xC1S7 // (ip[7] * C1) << 16 vmull.s16 q3, d31, xC1S7 vmull.s16 q4, d30, xC7S1 // (ip[7] * C7) << 16 vmull.s16 q5, d31, xC7S1 vmull.s16 q6, d18, xC7S1 // (ip[1] * C7) << 16 vmull.s16 q7, d19, xC7S1 vshrn.s32 d4, q2, #16 vshrn.s32 d5, q3, #16 vshrn.s32 d6, q4, #16 // ip[7] * C7 vshrn.s32 d7, q5, #16 vshrn.s32 d8, q6, #16 // ip[1] * C7 vshrn.s32 d9, q7, #16 vadd.s16 q2, q2, q15 // ip[7] * C1 vadd.s16 q9, q1, q3 // A = ip[1] * C1 + ip[7] * C7 vsub.s16 q15, q4, q2 // B = ip[1] * C7 - ip[7] * C1 vmull.s16 q2, d22, xC5S3 // (ip[3] * C5) << 16 vmull.s16 q3, d23, xC5S3 vmull.s16 q4, d22, xC3S5 // (ip[3] * C3) << 16 vmull.s16 q5, d23, xC3S5 vmull.s16 q6, d26, xC5S3 // (ip[5] * C5) << 16 vmull.s16 q7, d27, xC5S3 vshrn.s32 d4, q2, #16 vshrn.s32 d5, q3, #16 vshrn.s32 d6, q4, #16 vshrn.s32 d7, q5, #16 vshrn.s32 d8, q6, #16 vshrn.s32 d9, q7, #16 vadd.s16 q3, q3, q11 // ip[3] * C3 vadd.s16 q4, q4, q13 // ip[5] * C5 vadd.s16 q1, q2, q11 // ip[3] * C5 vadd.s16 q11, q3, q4 // C = ip[3] * C3 + ip[5] * C5 vmull.s16 q2, d26, xC3S5 // (ip[5] * C3) << 16 vmull.s16 q3, d27, xC3S5 vmull.s16 q4, d20, xC2S6 // (ip[2] * C2) << 16 vmull.s16 q5, d21, xC2S6 vmull.s16 q6, d28, xC6S2 // (ip[6] * C6) << 16 vmull.s16 q7, d29, xC6S2 vshrn.s32 d4, q2, #16 vshrn.s32 d5, q3, #16 vshrn.s32 d6, q4, #16 vshrn.s32 d7, q5, #16 vshrn.s32 d8, q6, #16 // ip[6] * C6 vshrn.s32 d9, q7, #16 vadd.s16 q2, q2, q13 // ip[5] * C3 vadd.s16 q3, q3, q10 // ip[2] * C2 vsub.s16 q13, q2, q1 // D = ip[5] * C3 - ip[3] * C5 vsub.s16 q1, q9, q11 // (A - C) vadd.s16 q11, q9, q11 // Cd = A + C vsub.s16 q9, q15, q13 // (B - D) vadd.s16 q13, q15, q13 // Dd = B + D vadd.s16 q15, q3, q4 // G = ip[2] * C2 + ip[6] * C6 vmull.s16 q2, d2, xC4S4 // ((A - C) * C4) << 16 vmull.s16 q3, d3, xC4S4 vmull.s16 q4, d28, xC2S6 // (ip[6] * C2) << 16 vmull.s16 q5, d29, xC2S6 vmull.s16 q6, d20, xC6S2 // (ip[2] * C6) << 16 vmull.s16 q7, d21, xC6S2 vshrn.s32 d4, q2, #16 vshrn.s32 d5, q3, #16 vshrn.s32 d6, q4, #16 vshrn.s32 d7, q5, #16 vshrn.s32 d8, q6, #16 // ip[2] * C6 vmull.s16 q5, d18, xC4S4 // ((B - D) * C4) << 16 vmull.s16 q6, d19, xC4S4 vshrn.s32 d9, q7, #16 vadd.s16 q3, q3, q14 // ip[6] * C2 vadd.s16 q10, q1, q2 // Ad = (A - C) * C4 vsub.s16 q14, q4, q3 // H = ip[2] * C6 - ip[6] * C2 bx lr endfunc .macro VP3_IDCT_END type function vp3_idct_end_\type\()_neon .ifc \type, col vdup.16 q0, r3 vadd.s16 q12, q12, q0 vadd.s16 q8, q8, q0 .endif vshrn.s32 d2, q5, #16 vshrn.s32 d3, q6, #16 vadd.s16 q2, q12, q15 // Gd = E + G vadd.s16 q9, q1, q9 // (B - D) * C4 vsub.s16 q12, q12, q15 // Ed = E - G vsub.s16 q3, q8, q10 // Fd = F - Ad vadd.s16 q10, q8, q10 // Add = F + Ad vadd.s16 q4, q9, q14 // Hd = Bd + H vsub.s16 q14, q9, q14 // Bdd = Bd - H vadd.s16 q8, q2, q11 // [0] = Gd + Cd vsub.s16 q15, q2, q11 // [7] = Gd - Cd vadd.s16 q9, q10, q4 // [1] = Add + Hd vsub.s16 q10, q10, q4 // [2] = Add - Hd vadd.s16 q11, q12, q13 // [3] = Ed + Dd vsub.s16 q12, q12, q13 // [4] = Ed - Dd .ifc \type, row vtrn.16 q8, q9 .endif vadd.s16 q13, q3, q14 // [5] = Fd + Bdd vsub.s16 q14, q3, q14 // [6] = Fd - Bdd .ifc \type, row // 8x8 transpose vtrn.16 q10, q11 vtrn.16 q12, q13 vtrn.16 q14, q15 vtrn.32 q8, q10 vtrn.32 q9, q11 vtrn.32 q12, q14 vtrn.32 q13, q15 vswp d17, d24 vswp d19, d26 vadd.s16 q1, q8, q12 vswp d21, d28 vsub.s16 q8, q8, q12 vswp d23, d30 .endif bx lr endfunc .endm VP3_IDCT_END row VP3_IDCT_END col function ff_vp3_idct_neon, export=1 mov ip, lr mov r2, r0 bl vp3_idct_start_neon bl vp3_idct_end_row_neon mov r3, #8 bl vp3_idct_core_neon bl vp3_idct_end_col_neon mov lr, ip vpop {d8-d15} vshr.s16 q8, q8, #4 vshr.s16 q9, q9, #4 vshr.s16 q10, q10, #4 vshr.s16 q11, q11, #4 vshr.s16 q12, q12, #4 vst1.64 {d16-d19}, [r0,:128]! vshr.s16 q13, q13, #4 vshr.s16 q14, q14, #4 vst1.64 {d20-d23}, [r0,:128]! vshr.s16 q15, q15, #4 vst1.64 {d24-d27}, [r0,:128]! vst1.64 {d28-d31}, [r0,:128]! bx lr endfunc function ff_vp3_idct_put_neon, export=1 mov ip, lr bl vp3_idct_start_neon bl vp3_idct_end_row_neon mov r3, #8 add r3, r3, #2048 // convert signed pixel to unsigned bl vp3_idct_core_neon bl vp3_idct_end_col_neon mov lr, ip vpop {d8-d15} vqshrun.s16 d0, q8, #4 vqshrun.s16 d1, q9, #4 vqshrun.s16 d2, q10, #4 vqshrun.s16 d3, q11, #4 vst1.64 {d0}, [r0,:64], r1 vqshrun.s16 d4, q12, #4 vst1.64 {d1}, [r0,:64], r1 vqshrun.s16 d5, q13, #4 vst1.64 {d2}, [r0,:64], r1 vqshrun.s16 d6, q14, #4 vst1.64 {d3}, [r0,:64], r1 vqshrun.s16 d7, q15, #4 vst1.64 {d4}, [r0,:64], r1 vst1.64 {d5}, [r0,:64], r1 vst1.64 {d6}, [r0,:64], r1 vst1.64 {d7}, [r0,:64], r1 bx lr endfunc function ff_vp3_idct_add_neon, export=1 mov ip, lr bl vp3_idct_start_neon bl vp3_idct_end_row_neon mov r3, #8 bl vp3_idct_core_neon bl vp3_idct_end_col_neon mov lr, ip vpop {d8-d15} mov r2, r0 vld1.64 {d0}, [r0,:64], r1 vshr.s16 q8, q8, #4 vld1.64 {d1}, [r0,:64], r1 vshr.s16 q9, q9, #4 vld1.64 {d2}, [r0,:64], r1 vaddw.u8 q8, q8, d0 vld1.64 {d3}, [r0,:64], r1 vaddw.u8 q9, q9, d1 vld1.64 {d4}, [r0,:64], r1 vshr.s16 q10, q10, #4 vld1.64 {d5}, [r0,:64], r1 vshr.s16 q11, q11, #4 vld1.64 {d6}, [r0,:64], r1 vqmovun.s16 d0, q8 vld1.64 {d7}, [r0,:64], r1 vqmovun.s16 d1, q9 vaddw.u8 q10, q10, d2 vaddw.u8 q11, q11, d3 vshr.s16 q12, q12, #4 vshr.s16 q13, q13, #4 vqmovun.s16 d2, q10 vqmovun.s16 d3, q11 vaddw.u8 q12, q12, d4 vaddw.u8 q13, q13, d5 vshr.s16 q14, q14, #4 vshr.s16 q15, q15, #4 vst1.64 {d0}, [r2,:64], r1 vqmovun.s16 d4, q12 vst1.64 {d1}, [r2,:64], r1 vqmovun.s16 d5, q13 vst1.64 {d2}, [r2,:64], r1 vaddw.u8 q14, q14, d6 vst1.64 {d3}, [r2,:64], r1 vaddw.u8 q15, q15, d7 vst1.64 {d4}, [r2,:64], r1 vqmovun.s16 d6, q14 vst1.64 {d5}, [r2,:64], r1 vqmovun.s16 d7, q15 vst1.64 {d6}, [r2,:64], r1 vst1.64 {d7}, [r2,:64], r1 bx lr endfunc function ff_vp3_idct_dc_add_neon, export=1 ldrsh r2, [r2] movw r3, #46341 mul r2, r3, r2 smulwt r2, r3, r2 mov r3, r0 vdup.16 q15, r2 vrshr.s16 q15, q15, #4 vld1.8 {d0}, [r0,:64], r1 vld1.8 {d1}, [r0,:64], r1 vld1.8 {d2}, [r0,:64], r1 vaddw.u8 q8, q15, d0 vld1.8 {d3}, [r0,:64], r1 vaddw.u8 q9, q15, d1 vld1.8 {d4}, [r0,:64], r1 vaddw.u8 q10, q15, d2 vld1.8 {d5}, [r0,:64], r1 vaddw.u8 q11, q15, d3 vld1.8 {d6}, [r0,:64], r1 vaddw.u8 q12, q15, d4 vld1.8 {d7}, [r0,:64], r1 vaddw.u8 q13, q15, d5 vqmovun.s16 d0, q8 vaddw.u8 q14, q15, d6 vqmovun.s16 d1, q9 vaddw.u8 q15, q15, d7 vqmovun.s16 d2, q10 vst1.8 {d0}, [r3,:64], r1 vqmovun.s16 d3, q11 vst1.8 {d1}, [r3,:64], r1 vqmovun.s16 d4, q12 vst1.8 {d2}, [r3,:64], r1 vqmovun.s16 d5, q13 vst1.8 {d3}, [r3,:64], r1 vqmovun.s16 d6, q14 vst1.8 {d4}, [r3,:64], r1 vqmovun.s16 d7, q15 vst1.8 {d5}, [r3,:64], r1 vst1.8 {d6}, [r3,:64], r1 vst1.8 {d7}, [r3,:64], r1 bx lr endfunc
123linslouis-android-video-cutter
jni/libavcodec/arm/vp3dsp_neon.S
Unix Assembly
asf20
14,171
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdint.h> #include "libavcodec/avcodec.h" #include "libavcodec/dsputil.h" #include "dsputil_arm.h" void ff_simple_idct_armv6(DCTELEM *data); void ff_simple_idct_put_armv6(uint8_t *dest, int line_size, DCTELEM *data); void ff_simple_idct_add_armv6(uint8_t *dest, int line_size, DCTELEM *data); void ff_put_pixels16_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels16_x2_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels16_y2_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels16_x2_no_rnd_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels16_y2_no_rnd_armv6(uint8_t *, const uint8_t *, int, int); void ff_avg_pixels16_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels8_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels8_x2_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels8_y2_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels8_x2_no_rnd_armv6(uint8_t *, const uint8_t *, int, int); void ff_put_pixels8_y2_no_rnd_armv6(uint8_t *, const uint8_t *, int, int); void ff_avg_pixels8_armv6(uint8_t *, const uint8_t *, int, int); void ff_add_pixels_clamped_armv6(const DCTELEM *block, uint8_t *restrict pixels, int line_size); void ff_get_pixels_armv6(DCTELEM *block, const uint8_t *pixels, int stride); void ff_diff_pixels_armv6(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride); int ff_pix_abs16_armv6(void *s, uint8_t *blk1, uint8_t *blk2, int line_size, int h); int ff_pix_abs16_x2_armv6(void *s, uint8_t *blk1, uint8_t *blk2, int line_size, int h); int ff_pix_abs16_y2_armv6(void *s, uint8_t *blk1, uint8_t *blk2, int line_size, int h); int ff_pix_abs8_armv6(void *s, uint8_t *blk1, uint8_t *blk2, int line_size, int h); int ff_sse16_armv6(void *s, uint8_t *blk1, uint8_t *blk2, int line_size, int h); int ff_pix_norm1_armv6(uint8_t *pix, int line_size); int ff_pix_sum_armv6(uint8_t *pix, int line_size); void av_cold ff_dsputil_init_armv6(DSPContext* c, AVCodecContext *avctx) { if (!avctx->lowres && (avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_SIMPLEARMV6)) { c->idct_put = ff_simple_idct_put_armv6; c->idct_add = ff_simple_idct_add_armv6; c->idct = ff_simple_idct_armv6; c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM; } c->put_pixels_tab[0][0] = ff_put_pixels16_armv6; c->put_pixels_tab[0][1] = ff_put_pixels16_x2_armv6; c->put_pixels_tab[0][2] = ff_put_pixels16_y2_armv6; /* c->put_pixels_tab[0][3] = ff_put_pixels16_xy2_armv6; */ c->put_pixels_tab[1][0] = ff_put_pixels8_armv6; c->put_pixels_tab[1][1] = ff_put_pixels8_x2_armv6; c->put_pixels_tab[1][2] = ff_put_pixels8_y2_armv6; /* c->put_pixels_tab[1][3] = ff_put_pixels8_xy2_armv6; */ c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_armv6; c->put_no_rnd_pixels_tab[0][1] = ff_put_pixels16_x2_no_rnd_armv6; c->put_no_rnd_pixels_tab[0][2] = ff_put_pixels16_y2_no_rnd_armv6; /* c->put_no_rnd_pixels_tab[0][3] = ff_put_pixels16_xy2_no_rnd_armv6; */ c->put_no_rnd_pixels_tab[1][0] = ff_put_pixels8_armv6; c->put_no_rnd_pixels_tab[1][1] = ff_put_pixels8_x2_no_rnd_armv6; c->put_no_rnd_pixels_tab[1][2] = ff_put_pixels8_y2_no_rnd_armv6; /* c->put_no_rnd_pixels_tab[1][3] = ff_put_pixels8_xy2_no_rnd_armv6; */ c->avg_pixels_tab[0][0] = ff_avg_pixels16_armv6; c->avg_pixels_tab[1][0] = ff_avg_pixels8_armv6; c->add_pixels_clamped = ff_add_pixels_clamped_armv6; c->get_pixels = ff_get_pixels_armv6; c->diff_pixels = ff_diff_pixels_armv6; c->pix_abs[0][0] = ff_pix_abs16_armv6; c->pix_abs[0][1] = ff_pix_abs16_x2_armv6; c->pix_abs[0][2] = ff_pix_abs16_y2_armv6; c->pix_abs[1][0] = ff_pix_abs8_armv6; c->sad[0] = ff_pix_abs16_armv6; c->sad[1] = ff_pix_abs8_armv6; c->sse[0] = ff_sse16_armv6; c->pix_norm1 = ff_pix_norm1_armv6; c->pix_sum = ff_pix_sum_armv6; }
123linslouis-android-video-cutter
jni/libavcodec/arm/dsputil_init_armv6.c
C
asf20
5,049
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_ARM_MPEGVIDEO_H #define AVCODEC_ARM_MPEGVIDEO_H #include "libavcodec/mpegvideo.h" void MPV_common_init_iwmmxt(MpegEncContext *s); void MPV_common_init_armv5te(MpegEncContext *s); #endif
123linslouis-android-video-cutter
jni/libavcodec/arm/mpegvideo_arm.h
C
asf20
969
@ @ ARMv4 optimized DSP utils @ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp> @ @ This file is part of FFmpeg. @ @ FFmpeg is free software; you can redistribute it and/or @ modify it under the terms of the GNU Lesser General Public @ License as published by the Free Software Foundation; either @ version 2.1 of the License, or (at your option) any later version. @ @ FFmpeg is distributed in the hope that it will be useful, @ but WITHOUT ANY WARRANTY; without even the implied warranty of @ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU @ Lesser General Public License for more details. @ @ You should have received a copy of the GNU Lesser General Public @ License along with FFmpeg; if not, write to the Free Software @ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA @ #include "config.h" #include "asm.S" preserve8 #if !HAVE_PLD .macro pld reg .endm #endif #if HAVE_ARMV5TE function ff_prefetch_arm, export=1 subs r2, r2, #1 pld [r0] add r0, r0, r1 bne ff_prefetch_arm bx lr endfunc #endif .macro ALIGN_QWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4 mov \Rd0, \Rn0, lsr #(\shift * 8) mov \Rd1, \Rn1, lsr #(\shift * 8) mov \Rd2, \Rn2, lsr #(\shift * 8) mov \Rd3, \Rn3, lsr #(\shift * 8) orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8) orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8) orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8) orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8) .endm .macro ALIGN_DWORD shift, R0, R1, R2 mov \R0, \R0, lsr #(\shift * 8) orr \R0, \R0, \R1, lsl #(32 - \shift * 8) mov \R1, \R1, lsr #(\shift * 8) orr \R1, \R1, \R2, lsl #(32 - \shift * 8) .endm .macro ALIGN_DWORD_D shift, Rdst0, Rdst1, Rsrc0, Rsrc1, Rsrc2 mov \Rdst0, \Rsrc0, lsr #(\shift * 8) mov \Rdst1, \Rsrc1, lsr #(\shift * 8) orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8)) orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8)) .endm .macro RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask @ Rd = (Rn | Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1) @ Rmask = 0xFEFEFEFE @ Rn = destroy eor \Rd0, \Rn0, \Rm0 eor \Rd1, \Rn1, \Rm1 orr \Rn0, \Rn0, \Rm0 orr \Rn1, \Rn1, \Rm1 and \Rd0, \Rd0, \Rmask and \Rd1, \Rd1, \Rmask sub \Rd0, \Rn0, \Rd0, lsr #1 sub \Rd1, \Rn1, \Rd1, lsr #1 .endm .macro NO_RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask @ Rd = (Rn & Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1) @ Rmask = 0xFEFEFEFE @ Rn = destroy eor \Rd0, \Rn0, \Rm0 eor \Rd1, \Rn1, \Rm1 and \Rn0, \Rn0, \Rm0 and \Rn1, \Rn1, \Rm1 and \Rd0, \Rd0, \Rmask and \Rd1, \Rd1, \Rmask add \Rd0, \Rn0, \Rd0, lsr #1 add \Rd1, \Rn1, \Rd1, lsr #1 .endm .macro JMP_ALIGN tmp, reg ands \tmp, \reg, #3 bic \reg, \reg, #3 beq 1f subs \tmp, \tmp, #1 beq 2f subs \tmp, \tmp, #1 beq 3f b 4f .endm @ ---------------------------------------------------------------- .align 5 function ff_put_pixels16_arm, export=1 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11, lr} JMP_ALIGN r5, r1 1: ldm r1, {r4-r7} add r1, r1, r2 stm r0, {r4-r7} pld [r1] subs r3, r3, #1 add r0, r0, r2 bne 1b pop {r4-r11, pc} .align 5 2: ldm r1, {r4-r8} add r1, r1, r2 ALIGN_QWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8 pld [r1] subs r3, r3, #1 stm r0, {r9-r12} add r0, r0, r2 bne 2b pop {r4-r11, pc} .align 5 3: ldm r1, {r4-r8} add r1, r1, r2 ALIGN_QWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8 pld [r1] subs r3, r3, #1 stm r0, {r9-r12} add r0, r0, r2 bne 3b pop {r4-r11, pc} .align 5 4: ldm r1, {r4-r8} add r1, r1, r2 ALIGN_QWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8 pld [r1] subs r3, r3, #1 stm r0, {r9-r12} add r0, r0, r2 bne 4b pop {r4-r11,pc} endfunc @ ---------------------------------------------------------------- .align 5 function ff_put_pixels8_arm, export=1 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r5,lr} JMP_ALIGN r5, r1 1: ldm r1, {r4-r5} add r1, r1, r2 subs r3, r3, #1 pld [r1] stm r0, {r4-r5} add r0, r0, r2 bne 1b pop {r4-r5,pc} .align 5 2: ldm r1, {r4-r5, r12} add r1, r1, r2 ALIGN_DWORD 1, r4, r5, r12 pld [r1] subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 2b pop {r4-r5,pc} .align 5 3: ldm r1, {r4-r5, r12} add r1, r1, r2 ALIGN_DWORD 2, r4, r5, r12 pld [r1] subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 3b pop {r4-r5,pc} .align 5 4: ldm r1, {r4-r5, r12} add r1, r1, r2 ALIGN_DWORD 3, r4, r5, r12 pld [r1] subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 4b pop {r4-r5,pc} endfunc @ ---------------------------------------------------------------- .align 5 function ff_put_pixels8_x2_arm, export=1 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r10,lr} ldr r12, =0xfefefefe JMP_ALIGN r5, r1 1: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 pld [r1] RND_AVG32 r8, r9, r4, r5, r6, r7, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 1b pop {r4-r10,pc} .align 5 2: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10 pld [r1] RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 2b pop {r4-r10,pc} .align 5 3: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 2, r6, r7, r4, r5, r10 ALIGN_DWORD_D 3, r8, r9, r4, r5, r10 pld [r1] RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 3b pop {r4-r10,pc} .align 5 4: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 3, r6, r7, r4, r5, r10 pld [r1] RND_AVG32 r8, r9, r6, r7, r5, r10, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 4b pop {r4-r10,pc} endfunc .align 5 function ff_put_no_rnd_pixels8_x2_arm, export=1 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r10,lr} ldr r12, =0xfefefefe JMP_ALIGN r5, r1 1: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 pld [r1] NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 1b pop {r4-r10,pc} .align 5 2: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 1, r6, r7, r4, r5, r10 ALIGN_DWORD_D 2, r8, r9, r4, r5, r10 pld [r1] NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 2b pop {r4-r10,pc} .align 5 3: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 2, r6, r7, r4, r5, r10 ALIGN_DWORD_D 3, r8, r9, r4, r5, r10 pld [r1] NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12 subs r3, r3, #1 stm r0, {r4-r5} add r0, r0, r2 bne 3b pop {r4-r10,pc} .align 5 4: ldm r1, {r4-r5, r10} add r1, r1, r2 ALIGN_DWORD_D 3, r6, r7, r4, r5, r10 pld [r1] NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 4b pop {r4-r10,pc} endfunc @ ---------------------------------------------------------------- .align 5 function ff_put_pixels8_y2_arm, export=1 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11,lr} mov r3, r3, lsr #1 ldr r12, =0xfefefefe JMP_ALIGN r5, r1 1: ldm r1, {r4-r5} add r1, r1, r2 6: ldm r1, {r6-r7} add r1, r1, r2 pld [r1] RND_AVG32 r8, r9, r4, r5, r6, r7, r12 ldm r1, {r4-r5} add r1, r1, r2 stm r0, {r8-r9} add r0, r0, r2 pld [r1] RND_AVG32 r8, r9, r6, r7, r4, r5, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 2: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 3: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 4: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r7, r8, r9 RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r4, r5, r6 subs r3, r3, #1 RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} endfunc .align 5 function ff_put_no_rnd_pixels8_y2_arm, export=1 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11,lr} mov r3, r3, lsr #1 ldr r12, =0xfefefefe JMP_ALIGN r5, r1 1: ldm r1, {r4-r5} add r1, r1, r2 6: ldm r1, {r6-r7} add r1, r1, r2 pld [r1] NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12 ldm r1, {r4-r5} add r1, r1, r2 stm r0, {r8-r9} add r0, r0, r2 pld [r1] NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12 subs r3, r3, #1 stm r0, {r8-r9} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 2: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 1, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 3: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 2, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} .align 5 4: ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r4, r5, r6 6: ldm r1, {r7-r9} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r7, r8, r9 NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12 stm r0, {r10-r11} add r0, r0, r2 ldm r1, {r4-r6} add r1, r1, r2 pld [r1] ALIGN_DWORD 3, r4, r5, r6 subs r3, r3, #1 NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12 stm r0, {r10-r11} add r0, r0, r2 bne 6b pop {r4-r11,pc} endfunc .ltorg @ ---------------------------------------------------------------- .macro RND_XY2_IT align, rnd @ l1= (a & 0x03030303) + (b & 0x03030303) ?(+ 0x02020202) @ h1= ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2) .if \align == 0 ldm r1, {r6-r8} .elseif \align == 3 ldm r1, {r5-r7} .else ldm r1, {r8-r10} .endif add r1, r1, r2 pld [r1] .if \align == 0 ALIGN_DWORD_D 1, r4, r5, r6, r7, r8 .elseif \align == 1 ALIGN_DWORD_D 1, r4, r5, r8, r9, r10 ALIGN_DWORD_D 2, r6, r7, r8, r9, r10 .elseif \align == 2 ALIGN_DWORD_D 2, r4, r5, r8, r9, r10 ALIGN_DWORD_D 3, r6, r7, r8, r9, r10 .elseif \align == 3 ALIGN_DWORD_D 3, r4, r5, r5, r6, r7 .endif ldr r14, =0x03030303 tst r3, #1 and r8, r4, r14 and r9, r5, r14 and r10, r6, r14 and r11, r7, r14 andeq r14, r14, r14, \rnd #1 add r8, r8, r10 add r9, r9, r11 ldr r12, =0xfcfcfcfc >> 2 addeq r8, r8, r14 addeq r9, r9, r14 and r4, r12, r4, lsr #2 and r5, r12, r5, lsr #2 and r6, r12, r6, lsr #2 and r7, r12, r7, lsr #2 add r10, r4, r6 add r11, r5, r7 subs r3, r3, #1 .endm .macro RND_XY2_EXPAND align, rnd RND_XY2_IT \align, \rnd 6: push {r8-r11} RND_XY2_IT \align, \rnd pop {r4-r7} add r4, r4, r8 add r5, r5, r9 ldr r14, =0x0f0f0f0f add r6, r6, r10 add r7, r7, r11 and r4, r14, r4, lsr #2 and r5, r14, r5, lsr #2 add r4, r4, r6 add r5, r5, r7 stm r0, {r4-r5} add r0, r0, r2 bge 6b pop {r4-r11,pc} .endm .align 5 function ff_put_pixels8_xy2_arm, export=1 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11,lr} @ R14 is also called LR JMP_ALIGN r5, r1 1: RND_XY2_EXPAND 0, lsl .align 5 2: RND_XY2_EXPAND 1, lsl .align 5 3: RND_XY2_EXPAND 2, lsl .align 5 4: RND_XY2_EXPAND 3, lsl endfunc .align 5 function ff_put_no_rnd_pixels8_xy2_arm, export=1 @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h) @ block = word aligned, pixles = unaligned pld [r1] push {r4-r11,lr} JMP_ALIGN r5, r1 1: RND_XY2_EXPAND 0, lsr .align 5 2: RND_XY2_EXPAND 1, lsr .align 5 3: RND_XY2_EXPAND 2, lsr .align 5 4: RND_XY2_EXPAND 3, lsr endfunc .align 5 @ void ff_add_pixels_clamped_arm(int16_t *block, uint8_t *dest, int stride) function ff_add_pixels_clamped_arm, export=1 push {r4-r10} mov r10, #8 1: ldr r4, [r1] /* load dest */ /* block[0] and block[1]*/ ldrsh r5, [r0] ldrsh r7, [r0, #2] and r6, r4, #0xFF and r8, r4, #0xFF00 add r6, r5, r6 add r8, r7, r8, lsr #8 mvn r5, r5 mvn r7, r7 tst r6, #0x100 movne r6, r5, lsr #24 tst r8, #0x100 movne r8, r7, lsr #24 mov r9, r6 ldrsh r5, [r0, #4] /* moved form [A] */ orr r9, r9, r8, lsl #8 /* block[2] and block[3] */ /* [A] */ ldrsh r7, [r0, #6] and r6, r4, #0xFF0000 and r8, r4, #0xFF000000 add r6, r5, r6, lsr #16 add r8, r7, r8, lsr #24 mvn r5, r5 mvn r7, r7 tst r6, #0x100 movne r6, r5, lsr #24 tst r8, #0x100 movne r8, r7, lsr #24 orr r9, r9, r6, lsl #16 ldr r4, [r1, #4] /* moved form [B] */ orr r9, r9, r8, lsl #24 /* store dest */ ldrsh r5, [r0, #8] /* moved form [C] */ str r9, [r1] /* load dest */ /* [B] */ /* block[4] and block[5] */ /* [C] */ ldrsh r7, [r0, #10] and r6, r4, #0xFF and r8, r4, #0xFF00 add r6, r5, r6 add r8, r7, r8, lsr #8 mvn r5, r5 mvn r7, r7 tst r6, #0x100 movne r6, r5, lsr #24 tst r8, #0x100 movne r8, r7, lsr #24 mov r9, r6 ldrsh r5, [r0, #12] /* moved from [D] */ orr r9, r9, r8, lsl #8 /* block[6] and block[7] */ /* [D] */ ldrsh r7, [r0, #14] and r6, r4, #0xFF0000 and r8, r4, #0xFF000000 add r6, r5, r6, lsr #16 add r8, r7, r8, lsr #24 mvn r5, r5 mvn r7, r7 tst r6, #0x100 movne r6, r5, lsr #24 tst r8, #0x100 movne r8, r7, lsr #24 orr r9, r9, r6, lsl #16 add r0, r0, #16 /* moved from [E] */ orr r9, r9, r8, lsl #24 subs r10, r10, #1 /* moved from [F] */ /* store dest */ str r9, [r1, #4] /* [E] */ /* [F] */ add r1, r1, r2 bne 1b pop {r4-r10} bx lr endfunc
123linslouis-android-video-cutter
jni/libavcodec/arm/dsputil_arm.S
Unix Assembly
asf20
24,998
/* * Copyright (c) 2009 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_ARM_DSPUTIL_H #define AVCODEC_ARM_DSPUTIL_H #include "libavcodec/avcodec.h" #include "libavcodec/dsputil.h" void ff_dsputil_init_armv5te(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_armv6(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_vfp(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx); void ff_dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx); #endif
123linslouis-android-video-cutter
jni/libavcodec/arm/dsputil_arm.h
C
asf20
1,284
/* C-like prototype : void j_rev_dct_arm(DCTBLOCK data) With DCTBLOCK being a pointer to an array of 64 'signed shorts' Copyright (c) 2001 Lionel Ulmer (lionel.ulmer@free.fr / bbrox@bbrox.org) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "asm.S" #define FIX_0_298631336 2446 #define FIX_0_541196100 4433 #define FIX_0_765366865 6270 #define FIX_1_175875602 9633 #define FIX_1_501321110 12299 #define FIX_2_053119869 16819 #define FIX_3_072711026 25172 #define FIX_M_0_390180644 -3196 #define FIX_M_0_899976223 -7373 #define FIX_M_1_847759065 -15137 #define FIX_M_1_961570560 -16069 #define FIX_M_2_562915447 -20995 #define FIX_0xFFFF 0xFFFF #define FIX_0_298631336_ID 0 #define FIX_0_541196100_ID 4 #define FIX_0_765366865_ID 8 #define FIX_1_175875602_ID 12 #define FIX_1_501321110_ID 16 #define FIX_2_053119869_ID 20 #define FIX_3_072711026_ID 24 #define FIX_M_0_390180644_ID 28 #define FIX_M_0_899976223_ID 32 #define FIX_M_1_847759065_ID 36 #define FIX_M_1_961570560_ID 40 #define FIX_M_2_562915447_ID 44 #define FIX_0xFFFF_ID 48 .text .align function ff_j_rev_dct_arm, export=1 stmdb sp!, { r4 - r12, lr } @ all callee saved regs sub sp, sp, #4 @ reserve some space on the stack str r0, [ sp ] @ save the DCT pointer to the stack mov lr, r0 @ lr = pointer to the current row mov r12, #8 @ r12 = row-counter adr r11, const_array @ r11 = base pointer to the constants array row_loop: ldrsh r0, [lr, # 0] @ r0 = 'd0' ldrsh r2, [lr, # 2] @ r2 = 'd2' @ Optimization for row that have all items except the first set to 0 @ (this works as the DCTELEMS are always 4-byte aligned) ldr r5, [lr, # 0] ldr r6, [lr, # 4] ldr r3, [lr, # 8] ldr r4, [lr, #12] orr r3, r3, r4 orr r3, r3, r6 orrs r5, r3, r5 beq end_of_row_loop @ nothing to be done as ALL of them are '0' orrs r3, r3, r2 beq empty_row ldrsh r1, [lr, # 8] @ r1 = 'd1' ldrsh r4, [lr, # 4] @ r4 = 'd4' ldrsh r6, [lr, # 6] @ r6 = 'd6' ldr r3, [r11, #FIX_0_541196100_ID] add r7, r2, r6 ldr r5, [r11, #FIX_M_1_847759065_ID] mul r7, r3, r7 @ r7 = z1 ldr r3, [r11, #FIX_0_765366865_ID] mla r6, r5, r6, r7 @ r6 = tmp2 add r5, r0, r4 @ r5 = tmp0 mla r2, r3, r2, r7 @ r2 = tmp3 sub r3, r0, r4 @ r3 = tmp1 add r0, r2, r5, lsl #13 @ r0 = tmp10 rsb r2, r2, r5, lsl #13 @ r2 = tmp13 add r4, r6, r3, lsl #13 @ r4 = tmp11 rsb r3, r6, r3, lsl #13 @ r3 = tmp12 stmdb sp!, { r0, r2, r3, r4 } @ save on the stack tmp10, tmp13, tmp12, tmp11 ldrsh r3, [lr, #10] @ r3 = 'd3' ldrsh r5, [lr, #12] @ r5 = 'd5' ldrsh r7, [lr, #14] @ r7 = 'd7' add r0, r3, r5 @ r0 = 'z2' add r2, r1, r7 @ r2 = 'z1' add r4, r3, r7 @ r4 = 'z3' add r6, r1, r5 @ r6 = 'z4' ldr r9, [r11, #FIX_1_175875602_ID] add r8, r4, r6 @ r8 = z3 + z4 ldr r10, [r11, #FIX_M_0_899976223_ID] mul r8, r9, r8 @ r8 = 'z5' ldr r9, [r11, #FIX_M_2_562915447_ID] mul r2, r10, r2 @ r2 = 'z1' ldr r10, [r11, #FIX_M_1_961570560_ID] mul r0, r9, r0 @ r0 = 'z2' ldr r9, [r11, #FIX_M_0_390180644_ID] mla r4, r10, r4, r8 @ r4 = 'z3' ldr r10, [r11, #FIX_0_298631336_ID] mla r6, r9, r6, r8 @ r6 = 'z4' ldr r9, [r11, #FIX_2_053119869_ID] mla r7, r10, r7, r2 @ r7 = tmp0 + z1 ldr r10, [r11, #FIX_3_072711026_ID] mla r5, r9, r5, r0 @ r5 = tmp1 + z2 ldr r9, [r11, #FIX_1_501321110_ID] mla r3, r10, r3, r0 @ r3 = tmp2 + z2 add r7, r7, r4 @ r7 = tmp0 mla r1, r9, r1, r2 @ r1 = tmp3 + z1 add r5, r5, r6 @ r5 = tmp1 add r3, r3, r4 @ r3 = tmp2 add r1, r1, r6 @ r1 = tmp3 ldmia sp!, { r0, r2, r4, r6 } @ r0 = tmp10 / r2 = tmp13 / r4 = tmp12 / r6 = tmp11 @ r1 = tmp3 / r3 = tmp2 / r5 = tmp1 / r7 = tmp0 @ Compute DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) add r8, r0, r1 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 0] @ Compute DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) sub r8, r0, r1 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, #14] @ Compute DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) add r8, r6, r3 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 2] @ Compute DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) sub r8, r6, r3 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, #12] @ Compute DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) add r8, r4, r5 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 4] @ Compute DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) sub r8, r4, r5 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, #10] @ Compute DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) add r8, r2, r7 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 6] @ Compute DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) sub r8, r2, r7 add r8, r8, #(1<<10) mov r8, r8, asr #11 strh r8, [lr, # 8] @ End of row loop add lr, lr, #16 subs r12, r12, #1 bne row_loop beq start_column_loop empty_row: ldr r1, [r11, #FIX_0xFFFF_ID] mov r0, r0, lsl #2 and r0, r0, r1 add r0, r0, r0, lsl #16 str r0, [lr, # 0] str r0, [lr, # 4] str r0, [lr, # 8] str r0, [lr, #12] end_of_row_loop: @ End of loop add lr, lr, #16 subs r12, r12, #1 bne row_loop start_column_loop: @ Start of column loop ldr lr, [ sp ] mov r12, #8 column_loop: ldrsh r0, [lr, #( 0*8)] @ r0 = 'd0' ldrsh r2, [lr, #( 4*8)] @ r2 = 'd2' ldrsh r4, [lr, #( 8*8)] @ r4 = 'd4' ldrsh r6, [lr, #(12*8)] @ r6 = 'd6' ldr r3, [r11, #FIX_0_541196100_ID] add r1, r2, r6 ldr r5, [r11, #FIX_M_1_847759065_ID] mul r1, r3, r1 @ r1 = z1 ldr r3, [r11, #FIX_0_765366865_ID] mla r6, r5, r6, r1 @ r6 = tmp2 add r5, r0, r4 @ r5 = tmp0 mla r2, r3, r2, r1 @ r2 = tmp3 sub r3, r0, r4 @ r3 = tmp1 add r0, r2, r5, lsl #13 @ r0 = tmp10 rsb r2, r2, r5, lsl #13 @ r2 = tmp13 add r4, r6, r3, lsl #13 @ r4 = tmp11 rsb r6, r6, r3, lsl #13 @ r6 = tmp12 ldrsh r1, [lr, #( 2*8)] @ r1 = 'd1' ldrsh r3, [lr, #( 6*8)] @ r3 = 'd3' ldrsh r5, [lr, #(10*8)] @ r5 = 'd5' ldrsh r7, [lr, #(14*8)] @ r7 = 'd7' @ Check for empty odd column (happens about 20 to 25 % of the time according to my stats) orr r9, r1, r3 orr r10, r5, r7 orrs r10, r9, r10 beq empty_odd_column stmdb sp!, { r0, r2, r4, r6 } @ save on the stack tmp10, tmp13, tmp12, tmp11 add r0, r3, r5 @ r0 = 'z2' add r2, r1, r7 @ r2 = 'z1' add r4, r3, r7 @ r4 = 'z3' add r6, r1, r5 @ r6 = 'z4' ldr r9, [r11, #FIX_1_175875602_ID] add r8, r4, r6 ldr r10, [r11, #FIX_M_0_899976223_ID] mul r8, r9, r8 @ r8 = 'z5' ldr r9, [r11, #FIX_M_2_562915447_ID] mul r2, r10, r2 @ r2 = 'z1' ldr r10, [r11, #FIX_M_1_961570560_ID] mul r0, r9, r0 @ r0 = 'z2' ldr r9, [r11, #FIX_M_0_390180644_ID] mla r4, r10, r4, r8 @ r4 = 'z3' ldr r10, [r11, #FIX_0_298631336_ID] mla r6, r9, r6, r8 @ r6 = 'z4' ldr r9, [r11, #FIX_2_053119869_ID] mla r7, r10, r7, r2 @ r7 = tmp0 + z1 ldr r10, [r11, #FIX_3_072711026_ID] mla r5, r9, r5, r0 @ r5 = tmp1 + z2 ldr r9, [r11, #FIX_1_501321110_ID] mla r3, r10, r3, r0 @ r3 = tmp2 + z2 add r7, r7, r4 @ r7 = tmp0 mla r1, r9, r1, r2 @ r1 = tmp3 + z1 add r5, r5, r6 @ r5 = tmp1 add r3, r3, r4 @ r3 = tmp2 add r1, r1, r6 @ r1 = tmp3 ldmia sp!, { r0, r2, r4, r6 } @ r0 = tmp10 / r2 = tmp13 / r4 = tmp11 / r6 = tmp12 @ r1 = tmp3 / r3 = tmp2 / r5 = tmp1 / r7 = tmp0 @ Compute DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) add r8, r0, r1 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 0*8)] @ Compute DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) sub r8, r0, r1 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #(14*8)] @ Compute DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) add r8, r4, r3 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 2*8)] @ Compute DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) sub r8, r4, r3 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #(12*8)] @ Compute DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) add r8, r6, r5 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 4*8)] @ Compute DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) sub r8, r6, r5 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #(10*8)] @ Compute DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) add r8, r2, r7 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 6*8)] @ Compute DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) sub r8, r2, r7 add r8, r8, #(1<<17) mov r8, r8, asr #18 strh r8, [lr, #( 8*8)] @ End of row loop add lr, lr, #2 subs r12, r12, #1 bne column_loop beq the_end empty_odd_column: @ Compute DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) @ Compute DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) add r0, r0, #(1<<17) mov r0, r0, asr #18 strh r0, [lr, #( 0*8)] strh r0, [lr, #(14*8)] @ Compute DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) @ Compute DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) add r4, r4, #(1<<17) mov r4, r4, asr #18 strh r4, [lr, #( 2*8)] strh r4, [lr, #(12*8)] @ Compute DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) @ Compute DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) add r6, r6, #(1<<17) mov r6, r6, asr #18 strh r6, [lr, #( 4*8)] strh r6, [lr, #(10*8)] @ Compute DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) @ Compute DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) add r2, r2, #(1<<17) mov r2, r2, asr #18 strh r2, [lr, #( 6*8)] strh r2, [lr, #( 8*8)] @ End of row loop add lr, lr, #2 subs r12, r12, #1 bne column_loop the_end: @ The end.... add sp, sp, #4 ldmia sp!, { r4 - r12, pc } @ restore callee saved regs and return const_array: .align .word FIX_0_298631336 .word FIX_0_541196100 .word FIX_0_765366865 .word FIX_1_175875602 .word FIX_1_501321110 .word FIX_2_053119869 .word FIX_3_072711026 .word FIX_M_0_390180644 .word FIX_M_0_899976223 .word FIX_M_1_847759065 .word FIX_M_1_961570560 .word FIX_M_2_562915447 .word FIX_0xFFFF
123linslouis-android-video-cutter
jni/libavcodec/arm/jrevdct_arm.S
Unix Assembly
asf20
13,882
/** * @file * VP6 DSP-oriented functions * * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/common.h" #include "dsputil.h" void ff_vp6_filter_diag4_c(uint8_t *dst, uint8_t *src, int stride, const int16_t *h_weights, const int16_t *v_weights) { int x, y; int tmp[8*11]; int *t = tmp; src -= stride; for (y=0; y<11; y++) { for (x=0; x<8; x++) { t[x] = av_clip_uint8(( src[x-1] * h_weights[0] + src[x ] * h_weights[1] + src[x+1] * h_weights[2] + src[x+2] * h_weights[3] + 64) >> 7); } src += stride; t += 8; } t = tmp + 8; for (y=0; y<8; y++) { for (x=0; x<8; x++) { dst[x] = av_clip_uint8(( t[x-8 ] * v_weights[0] + t[x ] * v_weights[1] + t[x+8 ] * v_weights[2] + t[x+16] * v_weights[3] + 64) >> 7); } dst += stride; t += 8; } }
123linslouis-android-video-cutter
jni/libavcodec/vp6dsp.c
C
asf20
1,876
/* * Electronic Arts Madcow Video Decoder * Copyright (c) 2007-2009 Peter Ross * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Electronic Arts Madcow Video Decoder * by Peter Ross <pross@xvid.org> * * Technical details here: * http://wiki.multimedia.cx/index.php?title=Electronic_Arts_MAD */ #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "aandcttab.h" #include "mpeg12.h" #include "mpeg12data.h" #define EA_PREAMBLE_SIZE 8 #define MADk_TAG MKTAG('M', 'A', 'D', 'k') /* MAD i-frame */ #define MADm_TAG MKTAG('M', 'A', 'D', 'm') /* MAD p-frame */ #define MADe_TAG MKTAG('M', 'A', 'D', 'e') /* MAD lqp-frame */ typedef struct MadContext { MpegEncContext s; AVFrame frame; AVFrame last_frame; void *bitstream_buf; unsigned int bitstream_buf_size; DECLARE_ALIGNED(16, DCTELEM, block)[64]; } MadContext; static void bswap16_buf(uint16_t *dst, const uint16_t *src, int count) { int i; for (i=0; i<count; i++) dst[i] = bswap_16(src[i]); } static av_cold int decode_init(AVCodecContext *avctx) { MadContext *t = avctx->priv_data; MpegEncContext *s = &t->s; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_YUV420P; if (avctx->idct_algo == FF_IDCT_AUTO) avctx->idct_algo = FF_IDCT_EA; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); ff_mpeg12_init_vlcs(); return 0; } static inline void comp(unsigned char *dst, int dst_stride, unsigned char *src, int src_stride, int add) { int j, i; for (j=0; j<8; j++) for (i=0; i<8; i++) dst[j*dst_stride + i] = av_clip_uint8(src[j*src_stride + i] + add); } static inline void comp_block(MadContext *t, int mb_x, int mb_y, int j, int mv_x, int mv_y, int add) { MpegEncContext *s = &t->s; if (j < 4) { comp(t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3), t->frame.linesize[0], t->last_frame.data[0] + (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x, t->last_frame.linesize[0], add); } else if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { int index = j - 3; comp(t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x * 8, t->frame.linesize[index], t->last_frame.data[index] + (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2), t->last_frame.linesize[index], add); } } static inline void idct_put(MadContext *t, DCTELEM *block, int mb_x, int mb_y, int j) { MpegEncContext *s = &t->s; if (j < 4) { s->dsp.idct_put( t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3), t->frame.linesize[0], block); } else if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { int index = j - 3; s->dsp.idct_put( t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x*8, t->frame.linesize[index], block); } } static inline void decode_block_intra(MadContext * t, DCTELEM * block) { MpegEncContext *s = &t->s; int level, i, j, run; RLTable *rl = &ff_rl_mpeg1; const uint8_t *scantable = s->intra_scantable.permutated; int16_t *quant_matrix = s->intra_matrix; block[0] = (128 + get_sbits(&s->gb, 8)) * quant_matrix[0]; /* The RL decoder is derived from mpeg1_decode_block_intra; Escaped level and run values a decoded differently */ i = 0; { OPEN_READER(re, &s->gb); /* now quantify & encode AC coefficients */ for (;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); if (level == 127) { break; } else if (level != 0) { i += run; j = scantable[i]; level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } else { /* escape */ UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 10); SKIP_BITS(re, &s->gb, 10); UPDATE_CACHE(re, &s->gb); run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); i += run; j = scantable[i]; if (level < 0) { level = -level; level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; level = -level; } else { level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; } } if (i > 63) { av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return; } block[j] = level; } CLOSE_READER(re, &s->gb); } } static int decode_motion(GetBitContext *gb) { int value = 0; if (get_bits1(gb)) { if (get_bits1(gb)) value = -17; value += get_bits(gb, 4) + 1; } return value; } static void decode_mb(MadContext *t, int inter) { MpegEncContext *s = &t->s; int mv_map = 0; int mv_x, mv_y; int j; if (inter) { int v = decode210(&s->gb); if (v < 2) { mv_map = v ? get_bits(&s->gb, 6) : 63; mv_x = decode_motion(&s->gb); mv_y = decode_motion(&s->gb); } else { mv_map = 0; } } for (j=0; j<6; j++) { if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map int add = 2*decode_motion(&s->gb); comp_block(t, s->mb_x, s->mb_y, j, mv_x, mv_y, add); } else { s->dsp.clear_block(t->block); decode_block_intra(t, t->block); idct_put(t, t->block, s->mb_x, s->mb_y, j); } } } static void calc_intra_matrix(MadContext *t, int qscale) { MpegEncContext *s = &t->s; int i; if (s->avctx->idct_algo == FF_IDCT_EA) { s->intra_matrix[0] = (ff_inv_aanscales[0]*ff_mpeg1_default_intra_matrix[0]) >> 11; for (i=1; i<64; i++) s->intra_matrix[i] = (ff_inv_aanscales[i]*ff_mpeg1_default_intra_matrix[i]*qscale + 32) >> 10; } else { s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0]; for (i=1; i<64; i++) s->intra_matrix[i] = (ff_mpeg1_default_intra_matrix[i]*qscale) << 1; } } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; MadContext *t = avctx->priv_data; MpegEncContext *s = &t->s; int chunk_type; int inter; if (buf_size < 17) { av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n"); *data_size = 0; return -1; } chunk_type = AV_RL32(&buf[0]); inter = (chunk_type == MADm_TAG || chunk_type == MADe_TAG); buf += 8; av_reduce(&avctx->time_base.num, &avctx->time_base.den, AV_RL16(&buf[6]), 1000, 1<<30); s->width = AV_RL16(&buf[8]); s->height = AV_RL16(&buf[10]); calc_intra_matrix(t, buf[13]); buf += 16; if (avctx->width != s->width || avctx->height != s->height) { if (avcodec_check_dimensions(avctx, s->width, s->height) < 0) return -1; avcodec_set_dimensions(avctx, s->width, s->height); if (t->frame.data[0]) avctx->release_buffer(avctx, &t->frame); } t->frame.reference = 1; if (!t->frame.data[0]) { if (avctx->get_buffer(avctx, &t->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } } av_fast_malloc(&t->bitstream_buf, &t->bitstream_buf_size, (buf_end-buf) + FF_INPUT_BUFFER_PADDING_SIZE); if (!t->bitstream_buf) return AVERROR(ENOMEM); bswap16_buf(t->bitstream_buf, (const uint16_t*)buf, (buf_end-buf)/2); init_get_bits(&s->gb, t->bitstream_buf, 8*(buf_end-buf)); for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++) for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++) decode_mb(t, inter); *data_size = sizeof(AVFrame); *(AVFrame*)data = t->frame; if (chunk_type != MADe_TAG) FFSWAP(AVFrame, t->frame, t->last_frame); return buf_size; } static av_cold int decode_end(AVCodecContext *avctx) { MadContext *t = avctx->priv_data; if (t->frame.data[0]) avctx->release_buffer(avctx, &t->frame); if (t->last_frame.data[0]) avctx->release_buffer(avctx, &t->last_frame); av_free(t->bitstream_buf); return 0; } AVCodec eamad_decoder = { "eamad", AVMEDIA_TYPE_VIDEO, CODEC_ID_MAD, sizeof(MadContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts Madcow Video") };
123linslouis-android-video-cutter
jni/libavcodec/eamad.c
C
asf20
10,056
/* * Misc image conversion routines * Copyright (c) 2001, 2002, 2003 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * misc image conversion routines */ /* TODO: * - write 'ffimg' program to test all the image related stuff * - move all api to slice based system * - integrate deinterlacing, postprocessing and scaling in the conversion process */ #include "avcodec.h" #include "dsputil.h" #include "colorspace.h" #include "internal.h" #include "imgconvert.h" #include "libavutil/pixdesc.h" #if HAVE_MMX #include "x86/mmx.h" #include "x86/dsputil_mmx.h" #endif #define xglue(x, y) x ## y #define glue(x, y) xglue(x, y) #define FF_COLOR_RGB 0 /**< RGB color space */ #define FF_COLOR_GRAY 1 /**< gray color space */ #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */ #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */ #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */ #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */ #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */ typedef struct PixFmtInfo { uint8_t nb_channels; /**< number of channels (including alpha) */ uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */ uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */ uint8_t is_alpha : 1; /**< true if alpha can be specified */ uint8_t depth; /**< bit depth of the color components */ } PixFmtInfo; /* this table gives more information about formats */ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { /* YUV formats */ [PIX_FMT_YUV420P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUV422P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUV444P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUYV422] = { .nb_channels = 1, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_UYVY422] = { .nb_channels = 1, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_YUV410P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUV411P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUV440P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUV420P16LE] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 16, }, [PIX_FMT_YUV422P16LE] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 16, }, [PIX_FMT_YUV444P16LE] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 16, }, [PIX_FMT_YUV420P16BE] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 16, }, [PIX_FMT_YUV422P16BE] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 16, }, [PIX_FMT_YUV444P16BE] = { .nb_channels = 3, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 16, }, /* YUV formats with alpha plane */ [PIX_FMT_YUVA420P] = { .nb_channels = 4, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, /* JPEG YUV */ [PIX_FMT_YUVJ420P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV_JPEG, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUVJ422P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV_JPEG, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUVJ444P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV_JPEG, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_YUVJ440P] = { .nb_channels = 3, .color_type = FF_COLOR_YUV_JPEG, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, /* RGB formats */ [PIX_FMT_RGB24] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_BGR24] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_ARGB] = { .nb_channels = 4, .is_alpha = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_RGB48BE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 16, }, [PIX_FMT_RGB48LE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 16, }, [PIX_FMT_RGB565BE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, }, [PIX_FMT_RGB565LE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, }, [PIX_FMT_RGB555BE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, }, [PIX_FMT_RGB555LE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, }, [PIX_FMT_RGB444BE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 4, }, [PIX_FMT_RGB444LE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 4, }, /* gray / mono formats */ [PIX_FMT_GRAY16BE] = { .nb_channels = 1, .color_type = FF_COLOR_GRAY, .pixel_type = FF_PIXEL_PLANAR, .depth = 16, }, [PIX_FMT_GRAY16LE] = { .nb_channels = 1, .color_type = FF_COLOR_GRAY, .pixel_type = FF_PIXEL_PLANAR, .depth = 16, }, [PIX_FMT_GRAY8] = { .nb_channels = 1, .color_type = FF_COLOR_GRAY, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_MONOWHITE] = { .nb_channels = 1, .color_type = FF_COLOR_GRAY, .pixel_type = FF_PIXEL_PLANAR, .depth = 1, }, [PIX_FMT_MONOBLACK] = { .nb_channels = 1, .color_type = FF_COLOR_GRAY, .pixel_type = FF_PIXEL_PLANAR, .depth = 1, }, /* paletted formats */ [PIX_FMT_PAL8] = { .nb_channels = 4, .is_alpha = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PALETTE, .depth = 8, }, [PIX_FMT_UYYVYY411] = { .nb_channels = 1, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_ABGR] = { .nb_channels = 4, .is_alpha = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_BGR565BE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, }, [PIX_FMT_BGR565LE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, }, [PIX_FMT_BGR555BE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, }, [PIX_FMT_BGR555LE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, }, [PIX_FMT_BGR444BE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 4, }, [PIX_FMT_BGR444LE] = { .nb_channels = 3, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 4, }, [PIX_FMT_RGB8] = { .nb_channels = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_RGB4] = { .nb_channels = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 4, }, [PIX_FMT_RGB4_BYTE] = { .nb_channels = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_BGR8] = { .nb_channels = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_BGR4] = { .nb_channels = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 4, }, [PIX_FMT_BGR4_BYTE] = { .nb_channels = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_NV12] = { .nb_channels = 2, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_NV21] = { .nb_channels = 2, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PLANAR, .depth = 8, }, [PIX_FMT_BGRA] = { .nb_channels = 4, .is_alpha = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, [PIX_FMT_RGBA] = { .nb_channels = 4, .is_alpha = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, }, }; void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift) { *h_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_w; *v_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_h; } const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt) { if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB) return NULL; else return av_pix_fmt_descriptors[pix_fmt].name; } #if LIBAVCODEC_VERSION_MAJOR < 53 enum PixelFormat avcodec_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); } #endif void avcodec_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt) { /* print header */ if (pix_fmt < 0) snprintf (buf, buf_size, "name " " nb_channels" " depth" " is_alpha" ); else{ PixFmtInfo info= pix_fmt_info[pix_fmt]; char is_alpha_char= info.is_alpha ? 'y' : 'n'; snprintf (buf, buf_size, "%-11s %5d %9d %6c", av_pix_fmt_descriptors[pix_fmt].name, info.nb_channels, info.depth, is_alpha_char ); } } int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt) { return av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_HWACCEL; } int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){ int i; for(i=0; i<256; i++){ int r,g,b; switch(pix_fmt) { case PIX_FMT_RGB8: r= (i>>5 )*36; g= ((i>>2)&7)*36; b= (i&3 )*85; break; case PIX_FMT_BGR8: b= (i>>6 )*85; g= ((i>>3)&7)*36; r= (i&7 )*36; break; case PIX_FMT_RGB4_BYTE: r= (i>>3 )*255; g= ((i>>1)&3)*85; b= (i&1 )*255; break; case PIX_FMT_BGR4_BYTE: b= (i>>3 )*255; g= ((i>>1)&3)*85; r= (i&1 )*255; break; case PIX_FMT_GRAY8: r=b=g= i; break; default: return -1; } pal[i] = b + (g<<8) + (r<<16); } return 0; } int ff_fill_linesize(AVPicture *picture, enum PixelFormat pix_fmt, int width) { int i; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; int max_plane_step [4]; int max_plane_step_comp[4]; memset(picture->linesize, 0, sizeof(picture->linesize)); if (desc->flags & PIX_FMT_HWACCEL) return -1; if (desc->flags & PIX_FMT_BITSTREAM) { picture->linesize[0] = (width * (desc->comp[0].step_minus1+1) + 7) >> 3; return 0; } memset(max_plane_step , 0, sizeof(max_plane_step )); memset(max_plane_step_comp, 0, sizeof(max_plane_step_comp)); for (i = 0; i < 4; i++) { const AVComponentDescriptor *comp = &(desc->comp[i]); if ((comp->step_minus1+1) > max_plane_step[comp->plane]) { max_plane_step [comp->plane] = comp->step_minus1+1; max_plane_step_comp[comp->plane] = i; } } for (i = 0; i < 4; i++) { int s = (max_plane_step_comp[i] == 1 || max_plane_step_comp[i] == 2) ? desc->log2_chroma_w : 0; picture->linesize[i] = max_plane_step[i] * (((width + (1 << s) - 1)) >> s); } return 0; } int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt, int height) { int size, h2, size2; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; size = picture->linesize[0] * height; switch(pix_fmt) { case PIX_FMT_YUV420P: case PIX_FMT_YUV422P: case PIX_FMT_YUV444P: case PIX_FMT_YUV410P: case PIX_FMT_YUV411P: case PIX_FMT_YUV440P: case PIX_FMT_YUVJ420P: case PIX_FMT_YUVJ422P: case PIX_FMT_YUVJ444P: case PIX_FMT_YUVJ440P: case PIX_FMT_YUV420P16LE: case PIX_FMT_YUV422P16LE: case PIX_FMT_YUV444P16LE: case PIX_FMT_YUV420P16BE: case PIX_FMT_YUV422P16BE: case PIX_FMT_YUV444P16BE: h2 = (height + (1 << desc->log2_chroma_h) - 1) >> desc->log2_chroma_h; size2 = picture->linesize[1] * h2; picture->data[0] = ptr; picture->data[1] = picture->data[0] + size; picture->data[2] = picture->data[1] + size2; picture->data[3] = NULL; return size + 2 * size2; case PIX_FMT_YUVA420P: h2 = (height + (1 << desc->log2_chroma_h) - 1) >> desc->log2_chroma_h; size2 = picture->linesize[1] * h2; picture->data[0] = ptr; picture->data[1] = picture->data[0] + size; picture->data[2] = picture->data[1] + size2; picture->data[3] = picture->data[1] + size2 + size2; return 2 * size + 2 * size2; case PIX_FMT_NV12: case PIX_FMT_NV21: h2 = (height + (1 << desc->log2_chroma_h) - 1) >> desc->log2_chroma_h; size2 = picture->linesize[1] * h2; picture->data[0] = ptr; picture->data[1] = picture->data[0] + size; picture->data[2] = NULL; picture->data[3] = NULL; return size + size2; case PIX_FMT_RGB24: case PIX_FMT_BGR24: case PIX_FMT_ARGB: case PIX_FMT_ABGR: case PIX_FMT_RGBA: case PIX_FMT_BGRA: case PIX_FMT_RGB48BE: case PIX_FMT_RGB48LE: case PIX_FMT_GRAY16BE: case PIX_FMT_GRAY16LE: case PIX_FMT_BGR444BE: case PIX_FMT_BGR444LE: case PIX_FMT_BGR555BE: case PIX_FMT_BGR555LE: case PIX_FMT_BGR565BE: case PIX_FMT_BGR565LE: case PIX_FMT_RGB444BE: case PIX_FMT_RGB444LE: case PIX_FMT_RGB555BE: case PIX_FMT_RGB555LE: case PIX_FMT_RGB565BE: case PIX_FMT_RGB565LE: case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_UYYVYY411: case PIX_FMT_RGB4: case PIX_FMT_BGR4: case PIX_FMT_MONOWHITE: case PIX_FMT_MONOBLACK: case PIX_FMT_Y400A: picture->data[0] = ptr; picture->data[1] = NULL; picture->data[2] = NULL; picture->data[3] = NULL; return size; case PIX_FMT_PAL8: case PIX_FMT_RGB8: case PIX_FMT_BGR8: case PIX_FMT_RGB4_BYTE: case PIX_FMT_BGR4_BYTE: case PIX_FMT_GRAY8: size2 = (size + 3) & ~3; picture->data[0] = ptr; picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */ picture->data[2] = NULL; picture->data[3] = NULL; return size2 + 256 * 4; default: picture->data[0] = NULL; picture->data[1] = NULL; picture->data[2] = NULL; picture->data[3] = NULL; return -1; } } int avpicture_fill(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt, int width, int height) { if(avcodec_check_dimensions(NULL, width, height)) return -1; if (ff_fill_linesize(picture, pix_fmt, width)) return -1; return ff_fill_pointer(picture, ptr, pix_fmt, height); } int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width, int height, unsigned char *dest, int dest_size) { const PixFmtInfo* pf = &pix_fmt_info[pix_fmt]; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; int i, j, w, ow, h, oh, data_planes; const unsigned char* s; int size = avpicture_get_size(pix_fmt, width, height); if (size > dest_size || size < 0) return -1; if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) { if (pix_fmt == PIX_FMT_YUYV422 || pix_fmt == PIX_FMT_UYVY422 || pix_fmt == PIX_FMT_BGR565BE || pix_fmt == PIX_FMT_BGR565LE || pix_fmt == PIX_FMT_BGR555BE || pix_fmt == PIX_FMT_BGR555LE || pix_fmt == PIX_FMT_BGR444BE || pix_fmt == PIX_FMT_BGR444LE || pix_fmt == PIX_FMT_RGB565BE || pix_fmt == PIX_FMT_RGB565LE || pix_fmt == PIX_FMT_RGB555BE || pix_fmt == PIX_FMT_RGB555LE || pix_fmt == PIX_FMT_RGB444BE || pix_fmt == PIX_FMT_RGB444LE) w = width * 2; else if (pix_fmt == PIX_FMT_UYYVYY411) w = width + width/2; else if (pix_fmt == PIX_FMT_PAL8) w = width; else w = width * (pf->depth * pf->nb_channels / 8); data_planes = 1; h = height; } else { data_planes = pf->nb_channels; w = (width*pf->depth + 7)/8; h = height; } ow = w; oh = h; for (i=0; i<data_planes; i++) { if (i == 1) { w = (- ((-width) >> desc->log2_chroma_w) * pf->depth + 7) / 8; h = -((-height) >> desc->log2_chroma_h); if (pix_fmt == PIX_FMT_NV12 || pix_fmt == PIX_FMT_NV21) w <<= 1; } else if (i == 3) { w = ow; h = oh; } s = src->data[i]; for(j=0; j<h; j++) { memcpy(dest, s, w); dest += w; s += src->linesize[i]; } } if (pf->pixel_type == FF_PIXEL_PALETTE) memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4); return size; } int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height) { AVPicture dummy_pict; if(avcodec_check_dimensions(NULL, width, height)) return -1; switch (pix_fmt) { case PIX_FMT_RGB8: case PIX_FMT_BGR8: case PIX_FMT_RGB4_BYTE: case PIX_FMT_BGR4_BYTE: case PIX_FMT_GRAY8: // do not include palette for these pseudo-paletted formats return width * height; } return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height); } int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_pix_fmt, int has_alpha) { const PixFmtInfo *pf, *ps; const AVPixFmtDescriptor *src_desc = &av_pix_fmt_descriptors[src_pix_fmt]; const AVPixFmtDescriptor *dst_desc = &av_pix_fmt_descriptors[dst_pix_fmt]; int loss; ps = &pix_fmt_info[src_pix_fmt]; /* compute loss */ loss = 0; pf = &pix_fmt_info[dst_pix_fmt]; if (pf->depth < ps->depth || ((dst_pix_fmt == PIX_FMT_RGB555BE || dst_pix_fmt == PIX_FMT_RGB555LE || dst_pix_fmt == PIX_FMT_BGR555BE || dst_pix_fmt == PIX_FMT_BGR555LE) && (src_pix_fmt == PIX_FMT_RGB565BE || src_pix_fmt == PIX_FMT_RGB565LE || src_pix_fmt == PIX_FMT_BGR565BE || src_pix_fmt == PIX_FMT_BGR565LE))) loss |= FF_LOSS_DEPTH; if (dst_desc->log2_chroma_w > src_desc->log2_chroma_w || dst_desc->log2_chroma_h > src_desc->log2_chroma_h) loss |= FF_LOSS_RESOLUTION; switch(pf->color_type) { case FF_COLOR_RGB: if (ps->color_type != FF_COLOR_RGB && ps->color_type != FF_COLOR_GRAY) loss |= FF_LOSS_COLORSPACE; break; case FF_COLOR_GRAY: if (ps->color_type != FF_COLOR_GRAY) loss |= FF_LOSS_COLORSPACE; break; case FF_COLOR_YUV: if (ps->color_type != FF_COLOR_YUV) loss |= FF_LOSS_COLORSPACE; break; case FF_COLOR_YUV_JPEG: if (ps->color_type != FF_COLOR_YUV_JPEG && ps->color_type != FF_COLOR_YUV && ps->color_type != FF_COLOR_GRAY) loss |= FF_LOSS_COLORSPACE; break; default: /* fail safe test */ if (ps->color_type != pf->color_type) loss |= FF_LOSS_COLORSPACE; break; } if (pf->color_type == FF_COLOR_GRAY && ps->color_type != FF_COLOR_GRAY) loss |= FF_LOSS_CHROMA; if (!pf->is_alpha && (ps->is_alpha && has_alpha)) loss |= FF_LOSS_ALPHA; if (pf->pixel_type == FF_PIXEL_PALETTE && (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY)) loss |= FF_LOSS_COLORQUANT; return loss; } static int avg_bits_per_pixel(enum PixelFormat pix_fmt) { int bits; const PixFmtInfo *pf; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; pf = &pix_fmt_info[pix_fmt]; switch(pf->pixel_type) { case FF_PIXEL_PACKED: switch(pix_fmt) { case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_RGB565BE: case PIX_FMT_RGB565LE: case PIX_FMT_RGB555BE: case PIX_FMT_RGB555LE: case PIX_FMT_RGB444BE: case PIX_FMT_RGB444LE: case PIX_FMT_BGR565BE: case PIX_FMT_BGR565LE: case PIX_FMT_BGR555BE: case PIX_FMT_BGR555LE: case PIX_FMT_BGR444BE: case PIX_FMT_BGR444LE: bits = 16; break; case PIX_FMT_UYYVYY411: bits = 12; break; default: bits = pf->depth * pf->nb_channels; break; } break; case FF_PIXEL_PLANAR: if (desc->log2_chroma_w == 0 && desc->log2_chroma_h == 0) { bits = pf->depth * pf->nb_channels; } else { bits = pf->depth + ((2 * pf->depth) >> (desc->log2_chroma_w + desc->log2_chroma_h)); } break; case FF_PIXEL_PALETTE: bits = 8; break; default: bits = -1; break; } return bits; } static enum PixelFormat avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt, int has_alpha, int loss_mask) { int dist, i, loss, min_dist; enum PixelFormat dst_pix_fmt; /* find exact color match with smallest size */ dst_pix_fmt = PIX_FMT_NONE; min_dist = 0x7fffffff; for(i = 0;i < PIX_FMT_NB; i++) { if (pix_fmt_mask & (1ULL << i)) { loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask; if (loss == 0) { dist = avg_bits_per_pixel(i); if (dist < min_dist) { min_dist = dist; dst_pix_fmt = i; } } } } return dst_pix_fmt; } enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt, int has_alpha, int *loss_ptr) { enum PixelFormat dst_pix_fmt; int loss_mask, i; static const int loss_mask_order[] = { ~0, /* no loss first */ ~FF_LOSS_ALPHA, ~FF_LOSS_RESOLUTION, ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION), ~FF_LOSS_COLORQUANT, ~FF_LOSS_DEPTH, 0, }; /* try with successive loss */ i = 0; for(;;) { loss_mask = loss_mask_order[i++]; dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt, has_alpha, loss_mask); if (dst_pix_fmt >= 0) goto found; if (loss_mask == 0) break; } return PIX_FMT_NONE; found: if (loss_ptr) *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha); return dst_pix_fmt; } void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height) { if((!dst) || (!src)) return; for(;height > 0; height--) { memcpy(dst, src, width); dst += dst_wrap; src += src_wrap; } } int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane) { int bits; const PixFmtInfo *pf = &pix_fmt_info[pix_fmt]; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; pf = &pix_fmt_info[pix_fmt]; switch(pf->pixel_type) { case FF_PIXEL_PACKED: switch(pix_fmt) { case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_RGB565BE: case PIX_FMT_RGB565LE: case PIX_FMT_RGB555BE: case PIX_FMT_RGB555LE: case PIX_FMT_RGB444BE: case PIX_FMT_RGB444LE: case PIX_FMT_BGR565BE: case PIX_FMT_BGR565LE: case PIX_FMT_BGR555BE: case PIX_FMT_BGR555LE: case PIX_FMT_BGR444BE: case PIX_FMT_BGR444LE: bits = 16; break; case PIX_FMT_UYYVYY411: bits = 12; break; default: bits = pf->depth * pf->nb_channels; break; } return (width * bits + 7) >> 3; break; case FF_PIXEL_PLANAR: if (plane == 1 || plane == 2) width= -((-width)>>desc->log2_chroma_w); return (width * pf->depth + 7) >> 3; break; case FF_PIXEL_PALETTE: if (plane == 0) return width; break; } return -1; } void av_picture_copy(AVPicture *dst, const AVPicture *src, enum PixelFormat pix_fmt, int width, int height) { int i; const PixFmtInfo *pf = &pix_fmt_info[pix_fmt]; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; switch(pf->pixel_type) { case FF_PIXEL_PACKED: case FF_PIXEL_PLANAR: for(i = 0; i < pf->nb_channels; i++) { int h; int bwidth = ff_get_plane_bytewidth(pix_fmt, width, i); h = height; if (i == 1 || i == 2) { h= -((-height)>>desc->log2_chroma_h); } ff_img_copy_plane(dst->data[i], dst->linesize[i], src->data[i], src->linesize[i], bwidth, h); } break; case FF_PIXEL_PALETTE: ff_img_copy_plane(dst->data[0], dst->linesize[0], src->data[0], src->linesize[0], width, height); /* copy the palette */ memcpy(dst->data[1], src->data[1], 4*256); break; } } /* 2x2 -> 1x1 */ void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height) { int w; const uint8_t *s1, *s2; uint8_t *d; for(;height > 0; height--) { s1 = src; s2 = s1 + src_wrap; d = dst; for(w = width;w >= 4; w-=4) { d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2; d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2; d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2; d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2; s1 += 8; s2 += 8; d += 4; } for(;w > 0; w--) { d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2; s1 += 2; s2 += 2; d++; } src += 2 * src_wrap; dst += dst_wrap; } } /* 4x4 -> 1x1 */ void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height) { int w; const uint8_t *s1, *s2, *s3, *s4; uint8_t *d; for(;height > 0; height--) { s1 = src; s2 = s1 + src_wrap; s3 = s2 + src_wrap; s4 = s3 + src_wrap; d = dst; for(w = width;w > 0; w--) { d[0] = (s1[0] + s1[1] + s1[2] + s1[3] + s2[0] + s2[1] + s2[2] + s2[3] + s3[0] + s3[1] + s3[2] + s3[3] + s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4; s1 += 4; s2 += 4; s3 += 4; s4 += 4; d++; } src += 4 * src_wrap; dst += dst_wrap; } } /* 8x8 -> 1x1 */ void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height) { int w, i; for(;height > 0; height--) { for(w = width;w > 0; w--) { int tmp=0; for(i=0; i<8; i++){ tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7]; src += src_wrap; } *(dst++) = (tmp + 32)>>6; src += 8 - 8*src_wrap; } src += 8*src_wrap - 8*width; dst += dst_wrap - width; } } int avpicture_alloc(AVPicture *picture, enum PixelFormat pix_fmt, int width, int height) { int size; void *ptr; size = avpicture_fill(picture, NULL, pix_fmt, width, height); if(size<0) goto fail; ptr = av_malloc(size); if (!ptr) goto fail; avpicture_fill(picture, ptr, pix_fmt, width, height); if(picture->data[1] && !picture->data[2]) ff_set_systematic_pal((uint32_t*)picture->data[1], pix_fmt); return 0; fail: memset(picture, 0, sizeof(AVPicture)); return -1; } void avpicture_free(AVPicture *picture) { av_free(picture->data[0]); } /* return true if yuv planar */ static inline int is_yuv_planar(const PixFmtInfo *ps) { return (ps->color_type == FF_COLOR_YUV || ps->color_type == FF_COLOR_YUV_JPEG) && ps->pixel_type == FF_PIXEL_PLANAR; } int av_picture_crop(AVPicture *dst, const AVPicture *src, enum PixelFormat pix_fmt, int top_band, int left_band) { int y_shift; int x_shift; if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1; y_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_h; x_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_w; dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band; dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift); dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift); dst->linesize[0] = src->linesize[0]; dst->linesize[1] = src->linesize[1]; dst->linesize[2] = src->linesize[2]; return 0; } int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum PixelFormat pix_fmt, int padtop, int padbottom, int padleft, int padright, int *color) { uint8_t *optr; int y_shift; int x_shift; int yheight; int i, y; if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1; for (i = 0; i < 3; i++) { x_shift = i ? av_pix_fmt_descriptors[pix_fmt].log2_chroma_w : 0; y_shift = i ? av_pix_fmt_descriptors[pix_fmt].log2_chroma_h : 0; if (padtop || padleft) { memset(dst->data[i], color[i], dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift)); } if (padleft || padright) { optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (dst->linesize[i] - (padright >> x_shift)); yheight = (height - 1 - (padtop + padbottom)) >> y_shift; for (y = 0; y < yheight; y++) { memset(optr, color[i], (padleft + padright) >> x_shift); optr += dst->linesize[i]; } } if (src) { /* first line */ uint8_t *iptr = src->data[i]; optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift); memcpy(optr, iptr, (width - padleft - padright) >> x_shift); iptr += src->linesize[i]; optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (dst->linesize[i] - (padright >> x_shift)); yheight = (height - 1 - (padtop + padbottom)) >> y_shift; for (y = 0; y < yheight; y++) { memset(optr, color[i], (padleft + padright) >> x_shift); memcpy(optr + ((padleft + padright) >> x_shift), iptr, (width - padleft - padright) >> x_shift); iptr += src->linesize[i]; optr += dst->linesize[i]; } } if (padbottom || padright) { optr = dst->data[i] + dst->linesize[i] * ((height - padbottom) >> y_shift) - (padright >> x_shift); memset(optr, color[i],dst->linesize[i] * (padbottom >> y_shift) + (padright >> x_shift)); } } return 0; } /* NOTE: we scan all the pixels to have an exact information */ static int get_alpha_info_pal8(const AVPicture *src, int width, int height) { const unsigned char *p; int src_wrap, ret, x, y; unsigned int a; uint32_t *palette = (uint32_t *)src->data[1]; p = src->data[0]; src_wrap = src->linesize[0] - width; ret = 0; for(y=0;y<height;y++) { for(x=0;x<width;x++) { a = palette[p[0]] >> 24; if (a == 0x00) { ret |= FF_ALPHA_TRANSP; } else if (a != 0xff) { ret |= FF_ALPHA_SEMI_TRANSP; } p++; } p += src_wrap; } return ret; } int img_get_alpha_info(const AVPicture *src, enum PixelFormat pix_fmt, int width, int height) { const PixFmtInfo *pf = &pix_fmt_info[pix_fmt]; int ret; /* no alpha can be represented in format */ if (!pf->is_alpha) return 0; switch(pix_fmt) { case PIX_FMT_PAL8: ret = get_alpha_info_pal8(src, width, height); break; default: /* we do not know, so everything is indicated */ ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP; break; } return ret; } #if HAVE_MMX #define DEINT_INPLACE_LINE_LUM \ movd_m2r(lum_m4[0],mm0);\ movd_m2r(lum_m3[0],mm1);\ movd_m2r(lum_m2[0],mm2);\ movd_m2r(lum_m1[0],mm3);\ movd_m2r(lum[0],mm4);\ punpcklbw_r2r(mm7,mm0);\ movd_r2m(mm2,lum_m4[0]);\ punpcklbw_r2r(mm7,mm1);\ punpcklbw_r2r(mm7,mm2);\ punpcklbw_r2r(mm7,mm3);\ punpcklbw_r2r(mm7,mm4);\ paddw_r2r(mm3,mm1);\ psllw_i2r(1,mm2);\ paddw_r2r(mm4,mm0);\ psllw_i2r(2,mm1);\ paddw_r2r(mm6,mm2);\ paddw_r2r(mm2,mm1);\ psubusw_r2r(mm0,mm1);\ psrlw_i2r(3,mm1);\ packuswb_r2r(mm7,mm1);\ movd_r2m(mm1,lum_m2[0]); #define DEINT_LINE_LUM \ movd_m2r(lum_m4[0],mm0);\ movd_m2r(lum_m3[0],mm1);\ movd_m2r(lum_m2[0],mm2);\ movd_m2r(lum_m1[0],mm3);\ movd_m2r(lum[0],mm4);\ punpcklbw_r2r(mm7,mm0);\ punpcklbw_r2r(mm7,mm1);\ punpcklbw_r2r(mm7,mm2);\ punpcklbw_r2r(mm7,mm3);\ punpcklbw_r2r(mm7,mm4);\ paddw_r2r(mm3,mm1);\ psllw_i2r(1,mm2);\ paddw_r2r(mm4,mm0);\ psllw_i2r(2,mm1);\ paddw_r2r(mm6,mm2);\ paddw_r2r(mm2,mm1);\ psubusw_r2r(mm0,mm1);\ psrlw_i2r(3,mm1);\ packuswb_r2r(mm7,mm1);\ movd_r2m(mm1,dst[0]); #endif /* filter parameters: [-1 4 2 4 -1] // 8 */ static void deinterlace_line(uint8_t *dst, const uint8_t *lum_m4, const uint8_t *lum_m3, const uint8_t *lum_m2, const uint8_t *lum_m1, const uint8_t *lum, int size) { #if !HAVE_MMX uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int sum; for(;size > 0;size--) { sum = -lum_m4[0]; sum += lum_m3[0] << 2; sum += lum_m2[0] << 1; sum += lum_m1[0] << 2; sum += -lum[0]; dst[0] = cm[(sum + 4) >> 3]; lum_m4++; lum_m3++; lum_m2++; lum_m1++; lum++; dst++; } #else { pxor_r2r(mm7,mm7); movq_m2r(ff_pw_4,mm6); } for (;size > 3; size-=4) { DEINT_LINE_LUM lum_m4+=4; lum_m3+=4; lum_m2+=4; lum_m1+=4; lum+=4; dst+=4; } #endif } static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum, int size) { #if !HAVE_MMX uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int sum; for(;size > 0;size--) { sum = -lum_m4[0]; sum += lum_m3[0] << 2; sum += lum_m2[0] << 1; lum_m4[0]=lum_m2[0]; sum += lum_m1[0] << 2; sum += -lum[0]; lum_m2[0] = cm[(sum + 4) >> 3]; lum_m4++; lum_m3++; lum_m2++; lum_m1++; lum++; } #else { pxor_r2r(mm7,mm7); movq_m2r(ff_pw_4,mm6); } for (;size > 3; size-=4) { DEINT_INPLACE_LINE_LUM lum_m4+=4; lum_m3+=4; lum_m2+=4; lum_m1+=4; lum+=4; } #endif } /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The top field is copied as is, but the bottom field is deinterlaced against the top field. */ static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap, const uint8_t *src1, int src_wrap, int width, int height) { const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2; int y; src_m2 = src1; src_m1 = src1; src_0=&src_m1[src_wrap]; src_p1=&src_0[src_wrap]; src_p2=&src_p1[src_wrap]; for(y=0;y<(height-2);y+=2) { memcpy(dst,src_m1,width); dst += dst_wrap; deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width); src_m2 = src_0; src_m1 = src_p1; src_0 = src_p2; src_p1 += 2*src_wrap; src_p2 += 2*src_wrap; dst += dst_wrap; } memcpy(dst,src_m1,width); dst += dst_wrap; /* do last line */ deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width); } static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap, int width, int height) { uint8_t *src_m1, *src_0, *src_p1, *src_p2; int y; uint8_t *buf; buf = (uint8_t*)av_malloc(width); src_m1 = src1; memcpy(buf,src_m1,width); src_0=&src_m1[src_wrap]; src_p1=&src_0[src_wrap]; src_p2=&src_p1[src_wrap]; for(y=0;y<(height-2);y+=2) { deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width); src_m1 = src_p1; src_0 = src_p2; src_p1 += 2*src_wrap; src_p2 += 2*src_wrap; } /* do last line */ deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width); av_free(buf); } int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, enum PixelFormat pix_fmt, int width, int height) { int i; if (pix_fmt != PIX_FMT_YUV420P && pix_fmt != PIX_FMT_YUV422P && pix_fmt != PIX_FMT_YUV444P && pix_fmt != PIX_FMT_YUV411P && pix_fmt != PIX_FMT_GRAY8) return -1; if ((width & 3) != 0 || (height & 3) != 0) return -1; for(i=0;i<3;i++) { if (i == 1) { switch(pix_fmt) { case PIX_FMT_YUV420P: width >>= 1; height >>= 1; break; case PIX_FMT_YUV422P: width >>= 1; break; case PIX_FMT_YUV411P: width >>= 2; break; default: break; } if (pix_fmt == PIX_FMT_GRAY8) { break; } } if (src == dst) { deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i], width, height); } else { deinterlace_bottom_field(dst->data[i],dst->linesize[i], src->data[i], src->linesize[i], width, height); } } emms_c(); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/imgconvert.c
C
asf20
43,211
/* * Renderware TeXture Dictionary (.txd) image decoder * Copyright (c) 2007 Ivo van Poorten * * See also: http://wiki.multimedia.cx/index.php?title=TXD * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "s3tc.h" typedef struct TXDContext { AVFrame picture; } TXDContext; static av_cold int txd_init(AVCodecContext *avctx) { TXDContext *s = avctx->priv_data; avcodec_get_frame_defaults(&s->picture); avctx->coded_frame = &s->picture; return 0; } static int txd_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; TXDContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; unsigned int version, w, h, d3d_format, depth, stride, mipmap_count, flags; unsigned int y, v; uint8_t *ptr; const uint8_t *cur = buf; const uint32_t *palette = (const uint32_t *)(cur + 88); uint32_t *pal; version = AV_RL32(cur); d3d_format = AV_RL32(cur+76); w = AV_RL16(cur+80); h = AV_RL16(cur+82); depth = AV_RL8 (cur+84); mipmap_count = AV_RL8 (cur+85); flags = AV_RL8 (cur+87); cur += 92; if (version < 8 || version > 9) { av_log(avctx, AV_LOG_ERROR, "texture data version %i is unsupported\n", version); return -1; } if (depth == 8) { avctx->pix_fmt = PIX_FMT_PAL8; cur += 1024; } else if (depth == 16 || depth == 32) avctx->pix_fmt = PIX_FMT_RGB32; else { av_log(avctx, AV_LOG_ERROR, "depth of %i is unsupported\n", depth); return -1; } if (p->data[0]) avctx->release_buffer(avctx, p); if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = FF_I_TYPE; ptr = p->data[0]; stride = p->linesize[0]; if (depth == 8) { pal = (uint32_t *) p->data[1]; for (y=0; y<256; y++) { v = AV_RB32(palette+y); pal[y] = (v>>8) + (v<<24); } for (y=0; y<h; y++) { memcpy(ptr, cur, w); ptr += stride; cur += w; } } else if (depth == 16) { switch (d3d_format) { case 0: if (!flags&1) goto unsupported; case FF_S3TC_DXT1: ff_decode_dxt1(cur, ptr, w, h, stride); break; case FF_S3TC_DXT3: ff_decode_dxt3(cur, ptr, w, h, stride); break; default: goto unsupported; } } else if (depth == 32) { switch (d3d_format) { case 0x15: case 0x16: for (y=0; y<h; y++) { memcpy(ptr, cur, w*4); ptr += stride; cur += w*4; } break; default: goto unsupported; } } for (; mipmap_count > 1; mipmap_count--) cur += AV_RL32(cur) + 4; *picture = s->picture; *data_size = sizeof(AVPicture); return cur - buf; unsupported: av_log(avctx, AV_LOG_ERROR, "unsupported d3d format (%08x)\n", d3d_format); return -1; } static av_cold int txd_end(AVCodecContext *avctx) { TXDContext *s = avctx->priv_data; if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); return 0; } AVCodec txd_decoder = { "txd", AVMEDIA_TYPE_VIDEO, CODEC_ID_TXD, sizeof(TXDContext), txd_init, NULL, txd_end, txd_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("Renderware TXD (TeXture Dictionary) image"), };
123linslouis-android-video-cutter
jni/libavcodec/txd.c
C
asf20
4,739
/* * Copyright (c) 2008 BBC, Anuradha Suraparaju <asuraparaju at gmail dot com > * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * function definitions common to libschroedingerdec.c and libschroedingerenc.c */ #include "libdirac_libschro.h" #include "libschroedinger.h" /** * Schroedinger video preset table. Ensure that this tables matches up correctly * with the ff_dirac_schro_video_format_info table in libdirac_libschro.c. */ static const SchroVideoFormatEnum ff_schro_video_formats[]={ SCHRO_VIDEO_FORMAT_CUSTOM , SCHRO_VIDEO_FORMAT_QSIF , SCHRO_VIDEO_FORMAT_QCIF , SCHRO_VIDEO_FORMAT_SIF , SCHRO_VIDEO_FORMAT_CIF , SCHRO_VIDEO_FORMAT_4SIF , SCHRO_VIDEO_FORMAT_4CIF , SCHRO_VIDEO_FORMAT_SD480I_60 , SCHRO_VIDEO_FORMAT_SD576I_50 , SCHRO_VIDEO_FORMAT_HD720P_60 , SCHRO_VIDEO_FORMAT_HD720P_50 , SCHRO_VIDEO_FORMAT_HD1080I_60 , SCHRO_VIDEO_FORMAT_HD1080I_50 , SCHRO_VIDEO_FORMAT_HD1080P_60 , SCHRO_VIDEO_FORMAT_HD1080P_50 , SCHRO_VIDEO_FORMAT_DC2K_24 , SCHRO_VIDEO_FORMAT_DC4K_24 , }; SchroVideoFormatEnum ff_get_schro_video_format_preset(AVCodecContext *avccontext) { unsigned int num_formats = sizeof(ff_schro_video_formats) / sizeof(ff_schro_video_formats[0]); unsigned int idx = ff_dirac_schro_get_video_format_idx (avccontext); return (idx < num_formats) ? ff_schro_video_formats[idx] : SCHRO_VIDEO_FORMAT_CUSTOM; } int ff_get_schro_frame_format (SchroChromaFormat schro_pix_fmt, SchroFrameFormat *schro_frame_fmt) { unsigned int num_formats = sizeof(ffmpeg_schro_pixel_format_map) / sizeof(ffmpeg_schro_pixel_format_map[0]); int idx; for (idx = 0; idx < num_formats; ++idx) { if (ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt == schro_pix_fmt) { *schro_frame_fmt = ffmpeg_schro_pixel_format_map[idx].schro_frame_fmt; return 0; } } return -1; } static void FreeSchroFrame(SchroFrame *frame, void *priv) { AVPicture *p_pic = priv; if (!p_pic) return; avpicture_free(p_pic); av_freep(&p_pic); } SchroFrame *ff_create_schro_frame(AVCodecContext *avccontext, SchroFrameFormat schro_frame_fmt) { AVPicture *p_pic; SchroFrame *p_frame; int y_width, uv_width; int y_height, uv_height; int i; y_width = avccontext->width; y_height = avccontext->height; uv_width = y_width >> (SCHRO_FRAME_FORMAT_H_SHIFT(schro_frame_fmt)); uv_height = y_height >> (SCHRO_FRAME_FORMAT_V_SHIFT(schro_frame_fmt)); p_pic = av_mallocz(sizeof(AVPicture)); avpicture_alloc(p_pic, avccontext->pix_fmt, y_width, y_height); p_frame = schro_frame_new(); p_frame->format = schro_frame_fmt; p_frame->width = y_width; p_frame->height = y_height; schro_frame_set_free_callback(p_frame, FreeSchroFrame, (void *)p_pic); for (i = 0; i < 3; ++i) { p_frame->components[i].width = i ? uv_width : y_width; p_frame->components[i].stride = p_pic->linesize[i]; p_frame->components[i].height = i ? uv_height : y_height; p_frame->components[i].length = p_frame->components[i].stride * p_frame->components[i].height; p_frame->components[i].data = p_pic->data[i]; if (i) { p_frame->components[i].v_shift = SCHRO_FRAME_FORMAT_V_SHIFT(p_frame->format); p_frame->components[i].h_shift = SCHRO_FRAME_FORMAT_H_SHIFT(p_frame->format); } } return p_frame; }
123linslouis-android-video-cutter
jni/libavcodec/libschroedinger.c
C
asf20
4,453
/* * H.264 HW decode acceleration through VA API * * Copyright (C) 2008-2009 Splitted-Desktop Systems * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "vaapi_internal.h" #include "h264.h" /** @file * This file implements the glue code between FFmpeg's and VA API's * structures for H.264 decoding. */ /** * Initializes an empty VA API picture. * * VA API requires a fixed-size reference picture array. */ static void init_vaapi_pic(VAPictureH264 *va_pic) { va_pic->picture_id = VA_INVALID_ID; va_pic->flags = VA_PICTURE_H264_INVALID; va_pic->TopFieldOrderCnt = 0; va_pic->BottomFieldOrderCnt = 0; } /** * Translates an FFmpeg Picture into its VA API form. * * @param[out] va_pic A pointer to VA API's own picture struct * @param[in] pic A pointer to the FFmpeg picture struct to convert * @param[in] pic_structure The picture field type (as defined in mpegvideo.h), * supersedes pic's field type if nonzero. */ static void fill_vaapi_pic(VAPictureH264 *va_pic, Picture *pic, int pic_structure) { if (pic_structure == 0) pic_structure = pic->reference; va_pic->picture_id = ff_vaapi_get_surface_id(pic); va_pic->frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; va_pic->flags = 0; if (pic_structure != PICT_FRAME) va_pic->flags |= (pic_structure & PICT_TOP_FIELD) ? VA_PICTURE_H264_TOP_FIELD : VA_PICTURE_H264_BOTTOM_FIELD; if (pic->reference) va_pic->flags |= pic->long_ref ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE; va_pic->TopFieldOrderCnt = 0; if (pic->field_poc[0] != INT_MAX) va_pic->TopFieldOrderCnt = pic->field_poc[0]; va_pic->BottomFieldOrderCnt = 0; if (pic->field_poc[1] != INT_MAX) va_pic->BottomFieldOrderCnt = pic->field_poc[1]; } /** Decoded Picture Buffer (DPB). */ typedef struct DPB { int size; ///< Current number of reference frames in the DPB int max_size; ///< Max number of reference frames. This is FF_ARRAY_ELEMS(VAPictureParameterBufferH264.ReferenceFrames) VAPictureH264 *va_pics; ///< Pointer to VAPictureParameterBufferH264.ReferenceFrames array } DPB; /** * Appends picture to the decoded picture buffer, in a VA API form that * merges the second field picture attributes with the first, if * available. The decoded picture buffer's size must be large enough * to receive the new VA API picture object. */ static int dpb_add(DPB *dpb, Picture *pic) { int i; if (dpb->size >= dpb->max_size) return -1; for (i = 0; i < dpb->size; i++) { VAPictureH264 * const va_pic = &dpb->va_pics[i]; if (va_pic->picture_id == ff_vaapi_get_surface_id(pic)) { VAPictureH264 temp_va_pic; fill_vaapi_pic(&temp_va_pic, pic, 0); if ((temp_va_pic.flags ^ va_pic->flags) & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD)) { va_pic->flags |= temp_va_pic.flags & (VA_PICTURE_H264_TOP_FIELD | VA_PICTURE_H264_BOTTOM_FIELD); /* Merge second field */ if (temp_va_pic.flags & VA_PICTURE_H264_TOP_FIELD) { va_pic->TopFieldOrderCnt = temp_va_pic.TopFieldOrderCnt; } else { va_pic->BottomFieldOrderCnt = temp_va_pic.BottomFieldOrderCnt; } } return 0; } } fill_vaapi_pic(&dpb->va_pics[dpb->size++], pic, 0); return 0; } /** Fills in VA API reference frames array. */ static int fill_vaapi_ReferenceFrames(VAPictureParameterBufferH264 *pic_param, H264Context *h) { DPB dpb; int i; dpb.size = 0; dpb.max_size = FF_ARRAY_ELEMS(pic_param->ReferenceFrames); dpb.va_pics = pic_param->ReferenceFrames; for (i = 0; i < dpb.max_size; i++) init_vaapi_pic(&dpb.va_pics[i]); for (i = 0; i < h->short_ref_count; i++) { Picture * const pic = h->short_ref[i]; if (pic && pic->reference && dpb_add(&dpb, pic) < 0) return -1; } for (i = 0; i < 16; i++) { Picture * const pic = h->long_ref[i]; if (pic && pic->reference && dpb_add(&dpb, pic) < 0) return -1; } return 0; } /** * Fills in VA API reference picture lists from the FFmpeg reference * picture list. * * @param[out] RefPicList VA API internal reference picture list * @param[in] ref_list A pointer to the FFmpeg reference list * @param[in] ref_count The number of reference pictures in ref_list */ static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32], Picture *ref_list, unsigned int ref_count) { unsigned int i, n = 0; for (i = 0; i < ref_count; i++) if (ref_list[i].reference) fill_vaapi_pic(&RefPicList[n++], &ref_list[i], 0); for (; n < 32; n++) init_vaapi_pic(&RefPicList[n]); } /** * Fills in prediction weight table. * * VA API requires a plain prediction weight table as it does not infer * any value. * * @param[in] h A pointer to the current H.264 context * @param[in] list The reference frame list index to use * @param[out] luma_weight_flag VA API plain luma weight flag * @param[out] luma_weight VA API plain luma weight table * @param[out] luma_offset VA API plain luma offset table * @param[out] chroma_weight_flag VA API plain chroma weight flag * @param[out] chroma_weight VA API plain chroma weight table * @param[out] chroma_offset VA API plain chroma offset table */ static void fill_vaapi_plain_pred_weight_table(H264Context *h, int list, unsigned char *luma_weight_flag, short luma_weight[32], short luma_offset[32], unsigned char *chroma_weight_flag, short chroma_weight[32][2], short chroma_offset[32][2]) { unsigned int i, j; *luma_weight_flag = h->luma_weight_flag[list]; *chroma_weight_flag = h->chroma_weight_flag[list]; for (i = 0; i < h->ref_count[list]; i++) { /* VA API also wants the inferred (default) values, not only what is available in the bitstream (7.4.3.2). */ if (h->luma_weight_flag[list]) { luma_weight[i] = h->luma_weight[i][list][0]; luma_offset[i] = h->luma_weight[i][list][1]; } else { luma_weight[i] = 1 << h->luma_log2_weight_denom; luma_offset[i] = 0; } for (j = 0; j < 2; j++) { if (h->chroma_weight_flag[list]) { chroma_weight[i][j] = h->chroma_weight[i][list][j][0]; chroma_offset[i][j] = h->chroma_weight[i][list][j][1]; } else { chroma_weight[i][j] = 1 << h->chroma_log2_weight_denom; chroma_offset[i][j] = 0; } } } } /** Initializes and starts decoding a frame with VA API. */ static int start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) { H264Context * const h = avctx->priv_data; MpegEncContext * const s = &h->s; struct vaapi_context * const vactx = avctx->hwaccel_context; VAPictureParameterBufferH264 *pic_param; VAIQMatrixBufferH264 *iq_matrix; dprintf(avctx, "start_frame()\n"); vactx->slice_param_size = sizeof(VASliceParameterBufferH264); /* Fill in VAPictureParameterBufferH264. */ pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferH264)); if (!pic_param) return -1; fill_vaapi_pic(&pic_param->CurrPic, s->current_picture_ptr, s->picture_structure); if (fill_vaapi_ReferenceFrames(pic_param, h) < 0) return -1; pic_param->picture_width_in_mbs_minus1 = s->mb_width - 1; pic_param->picture_height_in_mbs_minus1 = s->mb_height - 1; pic_param->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8; pic_param->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8; pic_param->num_ref_frames = h->sps.ref_frame_count; pic_param->seq_fields.value = 0; /* reset all bits */ pic_param->seq_fields.bits.chroma_format_idc = h->sps.chroma_format_idc; pic_param->seq_fields.bits.residual_colour_transform_flag = h->sps.residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */ pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag; pic_param->seq_fields.bits.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = h->sps.mb_aff; pic_param->seq_fields.bits.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = h->sps.level_idc >= 31; /* A.3.3.2 */ pic_param->seq_fields.bits.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; pic_param->seq_fields.bits.pic_order_cnt_type = h->sps.poc_type; pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4; pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; pic_param->num_slice_groups_minus1 = h->pps.slice_group_count - 1; pic_param->slice_group_map_type = h->pps.mb_slice_group_map_type; pic_param->slice_group_change_rate_minus1 = 0; /* XXX: unimplemented in FFmpeg */ pic_param->pic_init_qp_minus26 = h->pps.init_qp - 26; pic_param->pic_init_qs_minus26 = h->pps.init_qs - 26; pic_param->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; pic_param->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; pic_param->pic_fields.value = 0; /* reset all bits */ pic_param->pic_fields.bits.entropy_coding_mode_flag = h->pps.cabac; pic_param->pic_fields.bits.weighted_pred_flag = h->pps.weighted_pred; pic_param->pic_fields.bits.weighted_bipred_idc = h->pps.weighted_bipred_idc; pic_param->pic_fields.bits.transform_8x8_mode_flag = h->pps.transform_8x8_mode; pic_param->pic_fields.bits.field_pic_flag = s->picture_structure != PICT_FRAME; pic_param->pic_fields.bits.constrained_intra_pred_flag = h->pps.constrained_intra_pred; pic_param->pic_fields.bits.pic_order_present_flag = h->pps.pic_order_present; pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; pic_param->pic_fields.bits.reference_pic_flag = h->nal_ref_idc != 0; pic_param->frame_num = h->frame_num; /* Fill in VAIQMatrixBufferH264. */ iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264)); if (!iq_matrix) return -1; memcpy(iq_matrix->ScalingList4x4, h->pps.scaling_matrix4, sizeof(iq_matrix->ScalingList4x4)); memcpy(iq_matrix->ScalingList8x8, h->pps.scaling_matrix8, sizeof(iq_matrix->ScalingList8x8)); return 0; } /** Ends a hardware decoding based frame. */ static int end_frame(AVCodecContext *avctx) { H264Context * const h = avctx->priv_data; dprintf(avctx, "end_frame()\n"); return ff_vaapi_common_end_frame(&h->s); } /** Decodes the given H.264 slice with VA API. */ static int decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { H264Context * const h = avctx->priv_data; MpegEncContext * const s = &h->s; VASliceParameterBufferH264 *slice_param; dprintf(avctx, "decode_slice(): buffer %p, size %d\n", buffer, size); /* Fill in VASliceParameterBufferH264. */ slice_param = (VASliceParameterBufferH264 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size); if (!slice_param) return -1; slice_param->slice_data_bit_offset = get_bits_count(&h->s.gb) + 8; /* bit buffer started beyond nal_unit_type */ slice_param->first_mb_in_slice = (s->mb_y >> FIELD_OR_MBAFF_PICTURE) * s->mb_width + s->mb_x; slice_param->slice_type = ff_h264_get_slice_type(h); slice_param->direct_spatial_mv_pred_flag = h->slice_type == FF_B_TYPE ? h->direct_spatial_mv_pred : 0; slice_param->num_ref_idx_l0_active_minus1 = h->list_count > 0 ? h->ref_count[0] - 1 : 0; slice_param->num_ref_idx_l1_active_minus1 = h->list_count > 1 ? h->ref_count[1] - 1 : 0; slice_param->cabac_init_idc = h->cabac_init_idc; slice_param->slice_qp_delta = s->qscale - h->pps.init_qp; slice_param->disable_deblocking_filter_idc = h->deblocking_filter < 2 ? !h->deblocking_filter : h->deblocking_filter; slice_param->slice_alpha_c0_offset_div2 = h->slice_alpha_c0_offset / 2 - 26; slice_param->slice_beta_offset_div2 = h->slice_beta_offset / 2 - 26; slice_param->luma_log2_weight_denom = h->luma_log2_weight_denom; slice_param->chroma_log2_weight_denom = h->chroma_log2_weight_denom; fill_vaapi_RefPicList(slice_param->RefPicList0, h->ref_list[0], h->list_count > 0 ? h->ref_count[0] : 0); fill_vaapi_RefPicList(slice_param->RefPicList1, h->ref_list[1], h->list_count > 1 ? h->ref_count[1] : 0); fill_vaapi_plain_pred_weight_table(h, 0, &slice_param->luma_weight_l0_flag, slice_param->luma_weight_l0, slice_param->luma_offset_l0, &slice_param->chroma_weight_l0_flag, slice_param->chroma_weight_l0, slice_param->chroma_offset_l0); fill_vaapi_plain_pred_weight_table(h, 1, &slice_param->luma_weight_l1_flag, slice_param->luma_weight_l1, slice_param->luma_offset_l1, &slice_param->chroma_weight_l1_flag, slice_param->chroma_weight_l1, slice_param->chroma_offset_l1); return 0; } AVHWAccel h264_vaapi_hwaccel = { .name = "h264_vaapi", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_H264, .pix_fmt = PIX_FMT_VAAPI_VLD, .capabilities = 0, .start_frame = start_frame, .end_frame = end_frame, .decode_slice = decode_slice, .priv_data_size = 0, };
123linslouis-android-video-cutter
jni/libavcodec/vaapi_h264.c
C
asf20
16,131
/* * Various fixed-point math operations * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_CELP_MATH_H #define AVCODEC_CELP_MATH_H #include <stdint.h> /** * fixed-point implementation of cosine in [0; PI) domain. * @param arg fixed-point cosine argument, 0 <= arg < 0x4000 * * @return value of (1<<15) * cos(arg * PI / (1<<14)), -0x8000 <= result <= 0x7fff */ int16_t ff_cos(uint16_t arg); /** * fixed-point implementation of exp2(x) in [0; 1] domain. * @param power argument to exp2, 0 <= power <= 0x7fff * * @return value of (1<<20) * exp2(power / (1<<15)) * 0x8000c <= result <= 0xfffea */ int ff_exp2(uint16_t power); /** * Calculates log2(x). * @param value function argument, 0 < value <= 7fff ffff * * @return value of (1<<15) * log2(value) */ int ff_log2(uint32_t value); /** * Shift value left or right depending on sign of offset parameter. * @param value value to shift * @param offset shift offset * * @return value << offset, if offset>=0; value >> -offset - otherwise */ static inline int bidir_sal(int value, int offset) { if(offset < 0) return value >> -offset; else return value << offset; } /** * returns the dot product. * @param a input data array * @param b input data array * @param length number of elements * * @return dot product = sum of elementwise products */ float ff_dot_productf(const float* a, const float* b, int length); #endif /* AVCODEC_CELP_MATH_H */
123linslouis-android-video-cutter
jni/libavcodec/celp_math.h
C
asf20
2,227
/* * MLP parser prototypes * Copyright (c) 2007 Ian Caulfield * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MLP parser prototypes */ #ifndef AVCODEC_MLP_PARSER_H #define AVCODEC_MLP_PARSER_H #include "get_bits.h" typedef struct MLPHeaderInfo { int stream_type; ///< 0xBB for MLP, 0xBA for TrueHD int group1_bits; ///< The bit depth of the first substream int group2_bits; ///< Bit depth of the second substream (MLP only) int group1_samplerate; ///< Sample rate of first substream int group2_samplerate; ///< Sample rate of second substream (MLP only) int channels_mlp; ///< Channel arrangement for MLP streams int channels_thd_stream1; ///< Channel arrangement for substream 1 of TrueHD streams (5.1) int channels_thd_stream2; ///< Channel arrangement for substream 2 of TrueHD streams (7.1) int access_unit_size; ///< Number of samples per coded frame int access_unit_size_pow2; ///< Next power of two above number of samples per frame int is_vbr; ///< Stream is VBR instead of CBR int peak_bitrate; ///< Peak bitrate for VBR, actual bitrate (==peak) for CBR int num_substreams; ///< Number of substreams within stream } MLPHeaderInfo; int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb); #endif /* AVCODEC_MLP_PARSER_H */
123linslouis-android-video-cutter
jni/libavcodec/mlp_parser.h
C
asf20
2,149
/* * Header file for hardcoded Parametric Stereo tables * * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AACPS_TABLEGEN_H #define AACPS_TABLEGEN_H #include <stdint.h> #if CONFIG_HARDCODED_TABLES #define ps_tableinit() #include "libavcodec/aacps_tables.h" #else #include "../libavutil/common.h" #include "../libavutil/mathematics.h" #define NR_ALLPASS_BANDS20 30 #define NR_ALLPASS_BANDS34 50 #define PS_AP_LINKS 3 static float pd_re_smooth[8*8*8]; static float pd_im_smooth[8*8*8]; static float HA[46][8][4]; static float HB[46][8][4]; static float f20_0_8 [ 8][7][2]; static float f34_0_12[12][7][2]; static float f34_1_8 [ 8][7][2]; static float f34_2_4 [ 4][7][2]; static float Q_fract_allpass[2][50][3][2]; static float phi_fract[2][50][2]; static const float g0_Q8[] = { 0.00746082949812f, 0.02270420949825f, 0.04546865930473f, 0.07266113929591f, 0.09885108575264f, 0.11793710567217f, 0.125f }; static const float g0_Q12[] = { 0.04081179924692f, 0.03812810994926f, 0.05144908135699f, 0.06399831151592f, 0.07428313801106f, 0.08100347892914f, 0.08333333333333f }; static const float g1_Q8[] = { 0.01565675600122f, 0.03752716391991f, 0.05417891378782f, 0.08417044116767f, 0.10307344158036f, 0.12222452249753f, 0.125f }; static const float g2_Q4[] = { -0.05908211155639f, -0.04871498374946f, 0.0f, 0.07778723915851f, 0.16486303567403f, 0.23279856662996f, 0.25f }; static void make_filters_from_proto(float (*filter)[7][2], const float *proto, int bands) { int q, n; for (q = 0; q < bands; q++) { for (n = 0; n < 7; n++) { double theta = 2 * M_PI * (q + 0.5) * (n - 6) / bands; filter[q][n][0] = proto[n] * cos(theta); filter[q][n][1] = proto[n] * -sin(theta); } } } static void ps_tableinit(void) { static const float ipdopd_sin[] = { 0, M_SQRT1_2, 1, M_SQRT1_2, 0, -M_SQRT1_2, -1, -M_SQRT1_2 }; static const float ipdopd_cos[] = { 1, M_SQRT1_2, 0, -M_SQRT1_2, -1, -M_SQRT1_2, 0, M_SQRT1_2 }; int pd0, pd1, pd2; static const float iid_par_dequant[] = { //iid_par_dequant_default 0.05623413251903, 0.12589254117942, 0.19952623149689, 0.31622776601684, 0.44668359215096, 0.63095734448019, 0.79432823472428, 1, 1.25892541179417, 1.58489319246111, 2.23872113856834, 3.16227766016838, 5.01187233627272, 7.94328234724282, 17.7827941003892, //iid_par_dequant_fine 0.00316227766017, 0.00562341325190, 0.01, 0.01778279410039, 0.03162277660168, 0.05623413251903, 0.07943282347243, 0.11220184543020, 0.15848931924611, 0.22387211385683, 0.31622776601684, 0.39810717055350, 0.50118723362727, 0.63095734448019, 0.79432823472428, 1, 1.25892541179417, 1.58489319246111, 1.99526231496888, 2.51188643150958, 3.16227766016838, 4.46683592150963, 6.30957344480193, 8.91250938133745, 12.5892541179417, 17.7827941003892, 31.6227766016838, 56.2341325190349, 100, 177.827941003892, 316.227766016837, }; static const float icc_invq[] = { 1, 0.937, 0.84118, 0.60092, 0.36764, 0, -0.589, -1 }; static const float acos_icc_invq[] = { 0, 0.35685527, 0.57133466, 0.92614472, 1.1943263, M_PI/2, 2.2006171, M_PI }; int iid, icc; int k, m; static const int8_t f_center_20[] = { -3, -1, 1, 3, 5, 7, 10, 14, 18, 22, }; static const int8_t f_center_34[] = { 2, 6, 10, 14, 18, 22, 26, 30, 34,-10, -6, -2, 51, 57, 15, 21, 27, 33, 39, 45, 54, 66, 78, 42, 102, 66, 78, 90,102,114,126, 90, }; static const float fractional_delay_links[] = { 0.43f, 0.75f, 0.347f }; const float fractional_delay_gain = 0.39f; for (pd0 = 0; pd0 < 8; pd0++) { float pd0_re = ipdopd_cos[pd0]; float pd0_im = ipdopd_sin[pd0]; for (pd1 = 0; pd1 < 8; pd1++) { float pd1_re = ipdopd_cos[pd1]; float pd1_im = ipdopd_sin[pd1]; for (pd2 = 0; pd2 < 8; pd2++) { float pd2_re = ipdopd_cos[pd2]; float pd2_im = ipdopd_sin[pd2]; float re_smooth = 0.25f * pd0_re + 0.5f * pd1_re + pd2_re; float im_smooth = 0.25f * pd0_im + 0.5f * pd1_im + pd2_im; float pd_mag = 1 / sqrt(im_smooth * im_smooth + re_smooth * re_smooth); pd_re_smooth[pd0*64+pd1*8+pd2] = re_smooth * pd_mag; pd_im_smooth[pd0*64+pd1*8+pd2] = im_smooth * pd_mag; } } } for (iid = 0; iid < 46; iid++) { float c = iid_par_dequant[iid]; //<Linear Inter-channel Intensity Difference float c1 = (float)M_SQRT2 / sqrtf(1.0f + c*c); float c2 = c * c1; for (icc = 0; icc < 8; icc++) { /*if (PS_BASELINE || ps->icc_mode < 3)*/ { float alpha = 0.5f * acos_icc_invq[icc]; float beta = alpha * (c1 - c2) * (float)M_SQRT1_2; HA[iid][icc][0] = c2 * cosf(beta + alpha); HA[iid][icc][1] = c1 * cosf(beta - alpha); HA[iid][icc][2] = c2 * sinf(beta + alpha); HA[iid][icc][3] = c1 * sinf(beta - alpha); } /* else */ { float alpha, gamma, mu, rho; float alpha_c, alpha_s, gamma_c, gamma_s; rho = FFMAX(icc_invq[icc], 0.05f); alpha = 0.5f * atan2f(2.0f * c * rho, c*c - 1.0f); mu = c + 1.0f / c; mu = sqrtf(1 + (4 * rho * rho - 4)/(mu * mu)); gamma = atanf(sqrtf((1.0f - mu)/(1.0f + mu))); if (alpha < 0) alpha += M_PI/2; alpha_c = cosf(alpha); alpha_s = sinf(alpha); gamma_c = cosf(gamma); gamma_s = sinf(gamma); HB[iid][icc][0] = M_SQRT2 * alpha_c * gamma_c; HB[iid][icc][1] = M_SQRT2 * alpha_s * gamma_c; HB[iid][icc][2] = -M_SQRT2 * alpha_s * gamma_s; HB[iid][icc][3] = M_SQRT2 * alpha_c * gamma_s; } } } for (k = 0; k < NR_ALLPASS_BANDS20; k++) { double f_center, theta; if (k < FF_ARRAY_ELEMS(f_center_20)) f_center = f_center_20[k] * 0.125; else f_center = k - 6.5f; for (m = 0; m < PS_AP_LINKS; m++) { theta = -M_PI * fractional_delay_links[m] * f_center; Q_fract_allpass[0][k][m][0] = cos(theta); Q_fract_allpass[0][k][m][1] = sin(theta); } theta = -M_PI*fractional_delay_gain*f_center; phi_fract[0][k][0] = cos(theta); phi_fract[0][k][1] = sin(theta); } for (k = 0; k < NR_ALLPASS_BANDS34; k++) { double f_center, theta; if (k < FF_ARRAY_ELEMS(f_center_34)) f_center = f_center_34[k] / 24.; else f_center = k - 26.5f; for (m = 0; m < PS_AP_LINKS; m++) { theta = -M_PI * fractional_delay_links[m] * f_center; Q_fract_allpass[1][k][m][0] = cos(theta); Q_fract_allpass[1][k][m][1] = sin(theta); } theta = -M_PI*fractional_delay_gain*f_center; phi_fract[1][k][0] = cos(theta); phi_fract[1][k][1] = sin(theta); } make_filters_from_proto(f20_0_8, g0_Q8, 8); make_filters_from_proto(f34_0_12, g0_Q12, 12); make_filters_from_proto(f34_1_8, g1_Q8, 8); make_filters_from_proto(f34_2_4, g2_Q4, 4); } #endif /* CONFIG_HARDCODED_TABLES */ #endif /* AACPS_TABLEGEN_H */
123linslouis-android-video-cutter
jni/libavcodec/aacps_tablegen.h
C
asf20
8,354
/* * id Quake II CIN Video Decoder * Copyright (C) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * id Quake II Cin Video Decoder by Dr. Tim Ferguson * For more information about the id CIN format, visit: * http://www.csse.monash.edu.au/~timf/ * * This video decoder outputs PAL8 colorspace data. Interacting with this * decoder is a little involved. During initialization, the demuxer must * transmit the 65536-byte Huffman table(s) to the decoder via extradata. * Then, whenever a palette change is encountered while demuxing the file, * the demuxer must use the same extradata space to transmit an * AVPaletteControl structure. * * id CIN video is purely Huffman-coded, intraframe-only codec. It achieves * a little more compression by exploiting the fact that adjacent pixels * tend to be similar. * * Note that this decoder could use ffmpeg's optimized VLC facilities * rather than naive, tree-based Huffman decoding. However, there are 256 * Huffman tables. Plus, the VLC bit coding order is right -> left instead * or left -> right, so all of the bits would have to be reversed. Further, * the original Quake II implementation likely used a similar naive * decoding algorithm and it worked fine on much lower spec machines. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "avcodec.h" #define HUFFMAN_TABLE_SIZE 64 * 1024 #define HUF_TOKENS 256 #define PALETTE_COUNT 256 typedef struct { int count; unsigned char used; int children[2]; } hnode; typedef struct IdcinContext { AVCodecContext *avctx; AVFrame frame; const unsigned char *buf; int size; hnode huff_nodes[256][HUF_TOKENS*2]; int num_huff_nodes[256]; } IdcinContext; /* * Find the lowest probability node in a Huffman table, and mark it as * being assigned to a higher probability. * Returns the node index of the lowest unused node, or -1 if all nodes * are used. */ static int huff_smallest_node(hnode *hnodes, int num_hnodes) { int i; int best, best_node; best = 99999999; best_node = -1; for(i = 0; i < num_hnodes; i++) { if(hnodes[i].used) continue; if(!hnodes[i].count) continue; if(hnodes[i].count < best) { best = hnodes[i].count; best_node = i; } } if(best_node == -1) return -1; hnodes[best_node].used = 1; return best_node; } /* * Build the Huffman tree using the generated/loaded probabilities histogram. * * On completion: * huff_nodes[prev][i < HUF_TOKENS] - are the nodes at the base of the tree. * huff_nodes[prev][i >= HUF_TOKENS] - are used to construct the tree. * num_huff_nodes[prev] - contains the index to the root node of the tree. * That is: huff_nodes[prev][num_huff_nodes[prev]] is the root node. */ static av_cold void huff_build_tree(IdcinContext *s, int prev) { hnode *node, *hnodes; int num_hnodes, i; num_hnodes = HUF_TOKENS; hnodes = s->huff_nodes[prev]; for(i = 0; i < HUF_TOKENS * 2; i++) hnodes[i].used = 0; while (1) { node = &hnodes[num_hnodes]; /* next free node */ /* pick two lowest counts */ node->children[0] = huff_smallest_node(hnodes, num_hnodes); if(node->children[0] == -1) break; /* reached the root node */ node->children[1] = huff_smallest_node(hnodes, num_hnodes); if(node->children[1] == -1) break; /* reached the root node */ /* combine nodes probability for new node */ node->count = hnodes[node->children[0]].count + hnodes[node->children[1]].count; num_hnodes++; } s->num_huff_nodes[prev] = num_hnodes - 1; } static av_cold int idcin_decode_init(AVCodecContext *avctx) { IdcinContext *s = avctx->priv_data; int i, j, histogram_index = 0; unsigned char *histograms; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_PAL8; /* make sure the Huffman tables make it */ if (s->avctx->extradata_size != HUFFMAN_TABLE_SIZE) { av_log(s->avctx, AV_LOG_ERROR, " id CIN video: expected extradata size of %d\n", HUFFMAN_TABLE_SIZE); return -1; } /* build the 256 Huffman decode trees */ histograms = (unsigned char *)s->avctx->extradata; for (i = 0; i < 256; i++) { for(j = 0; j < HUF_TOKENS; j++) s->huff_nodes[i][j].count = histograms[histogram_index++]; huff_build_tree(s, i); } s->frame.data[0] = NULL; return 0; } static void idcin_decode_vlcs(IdcinContext *s) { hnode *hnodes; long x, y; int prev; unsigned char v = 0; int bit_pos, node_num, dat_pos; prev = bit_pos = dat_pos = 0; for (y = 0; y < (s->frame.linesize[0] * s->avctx->height); y += s->frame.linesize[0]) { for (x = y; x < y + s->avctx->width; x++) { node_num = s->num_huff_nodes[prev]; hnodes = s->huff_nodes[prev]; while(node_num >= HUF_TOKENS) { if(!bit_pos) { if(dat_pos >= s->size) { av_log(s->avctx, AV_LOG_ERROR, "Huffman decode error.\n"); return; } bit_pos = 8; v = s->buf[dat_pos++]; } node_num = hnodes[node_num].children[v & 0x01]; v = v >> 1; bit_pos--; } s->frame.data[0][x] = node_num; prev = node_num; } } } static int idcin_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; IdcinContext *s = avctx->priv_data; AVPaletteControl *palette_control = avctx->palctrl; s->buf = buf; s->size = buf_size; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); if (avctx->get_buffer(avctx, &s->frame)) { av_log(avctx, AV_LOG_ERROR, " id CIN Video: get_buffer() failed\n"); return -1; } idcin_decode_vlcs(s); /* make the palette available on the way out */ memcpy(s->frame.data[1], palette_control->palette, PALETTE_COUNT * 4); /* If palette changed inform application*/ if (palette_control->palette_changed) { palette_control->palette_changed = 0; s->frame.palette_has_changed = 1; } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; /* report that the buffer was completely consumed */ return buf_size; } static av_cold int idcin_decode_end(AVCodecContext *avctx) { IdcinContext *s = avctx->priv_data; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); return 0; } AVCodec idcin_decoder = { "idcinvideo", AVMEDIA_TYPE_VIDEO, CODEC_ID_IDCIN, sizeof(IdcinContext), idcin_decode_init, NULL, idcin_decode_end, idcin_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("id Quake II CIN video"), };
123linslouis-android-video-cutter
jni/libavcodec/idcinvideo.c
C
asf20
7,866
/* * LPCM codecs for PCM formats found in MPEG streams * Copyright (c) 2009 Christian Schmidt * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * PCM codecs for encodings found in MPEG streams (DVD/Blu-ray) */ #include "avcodec.h" #include "bytestream.h" /* * Channel Mapping according to * Blu-ray Disc Read-Only Format Version 1 * Part 3: Audio Visual Basic Specifications * mono M1 X * stereo L R * 3/0 L R C X * 2/1 L R S X * 3/1 L R C S * 2/2 L R LS RS * 3/2 L R C LS RS X * 3/2+lfe L R C LS RS lfe * 3/4 L R C LS Rls Rrs RS X * 3/4+lfe L R C LS Rls Rrs RS lfe */ /** * Parse the header of a LPCM frame read from a MPEG-TS stream * @param avctx the codec context * @param header pointer to the first four bytes of the data packet */ static int pcm_bluray_parse_header(AVCodecContext *avctx, const uint8_t *header) { static const uint8_t bits_per_samples[4] = { 0, 16, 20, 24 }; static const uint32_t channel_layouts[16] = { 0, CH_LAYOUT_MONO, 0, CH_LAYOUT_STEREO, CH_LAYOUT_SURROUND, CH_LAYOUT_2_1, CH_LAYOUT_4POINT0, CH_LAYOUT_2_2, CH_LAYOUT_5POINT0, CH_LAYOUT_5POINT1, CH_LAYOUT_7POINT0, CH_LAYOUT_7POINT1, 0, 0, 0, 0 }; static const uint8_t channels[16] = { 0, 1, 0, 2, 3, 3, 4, 4, 5, 6, 7, 8, 0, 0, 0, 0 }; uint8_t channel_layout = header[2] >> 4; if (avctx->debug & FF_DEBUG_PICT_INFO) dprintf(avctx, "pcm_bluray_parse_header: header = %02x%02x%02x%02x\n", header[0], header[1], header[2], header[3]); /* get the sample depth and derive the sample format from it */ avctx->bits_per_coded_sample = bits_per_samples[header[3] >> 6]; if (!avctx->bits_per_coded_sample) { av_log(avctx, AV_LOG_ERROR, "unsupported sample depth (0)\n"); return -1; } avctx->sample_fmt = avctx->bits_per_coded_sample == 16 ? SAMPLE_FMT_S16 : SAMPLE_FMT_S32; /* get the sample rate. Not all values are known or exist. */ switch (header[2] & 0x0f) { case 1: avctx->sample_rate = 48000; break; case 4: avctx->sample_rate = 96000; break; case 5: avctx->sample_rate = 192000; break; default: avctx->sample_rate = 0; av_log(avctx, AV_LOG_ERROR, "unsupported sample rate (%d)\n", header[2] & 0x0f); return -1; } /* * get the channel number (and mapping). Not all values are known or exist. * It must be noted that the number of channels in the MPEG stream can * differ from the actual meaningful number, e.g. mono audio still has two * channels, one being empty. */ avctx->channel_layout = channel_layouts[channel_layout]; avctx->channels = channels[channel_layout]; if (!avctx->channels) { av_log(avctx, AV_LOG_ERROR, "unsupported channel configuration (%d)\n", channel_layout); return -1; } avctx->bit_rate = avctx->channels * avctx->sample_rate * avctx->bits_per_coded_sample; if (avctx->debug & FF_DEBUG_PICT_INFO) dprintf(avctx, "pcm_bluray_parse_header: %d channels, %d bits per sample, %d kHz, %d kbit\n", avctx->channels, avctx->bits_per_coded_sample, avctx->sample_rate, avctx->bit_rate); return 0; } static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *src = avpkt->data; int buf_size = avpkt->size; int num_source_channels, channel, retval; int sample_size, samples, output_size; int16_t *dst16 = data; int32_t *dst32 = data; if (buf_size < 4) { av_log(avctx, AV_LOG_ERROR, "PCM packet too small\n"); return -1; } if (pcm_bluray_parse_header(avctx, src)) return -1; src += 4; buf_size -= 4; /* There's always an even number of channels in the source */ num_source_channels = FFALIGN(avctx->channels, 2); sample_size = (num_source_channels * avctx->bits_per_coded_sample) >> 3; samples = buf_size / sample_size; output_size = samples * avctx->channels * (avctx->sample_fmt == SAMPLE_FMT_S32 ? 4 : 2); if (output_size > *data_size) { av_log(avctx, AV_LOG_ERROR, "Insufficient output buffer space (%d bytes, needed %d bytes)\n", *data_size, output_size); return -1; } *data_size = output_size; if (samples) { switch (avctx->channel_layout) { /* cases with same number of source and coded channels */ case CH_LAYOUT_STEREO: case CH_LAYOUT_4POINT0: case CH_LAYOUT_2_2: samples *= num_source_channels; if (SAMPLE_FMT_S16 == avctx->sample_fmt) { #if HAVE_BIGENDIAN memcpy(dst16, src, output_size); #else do { *dst16++ = bytestream_get_be16(&src); } while (--samples); #endif } else { do { *dst32++ = bytestream_get_be24(&src) << 8; } while (--samples); } break; /* cases where number of source channels = coded channels + 1 */ case CH_LAYOUT_MONO: case CH_LAYOUT_SURROUND: case CH_LAYOUT_2_1: case CH_LAYOUT_5POINT0: if (SAMPLE_FMT_S16 == avctx->sample_fmt) { do { #if HAVE_BIGENDIAN memcpy(dst16, src, avctx->channels * 2); dst16 += avctx->channels; src += sample_size; #else channel = avctx->channels; do { *dst16++ = bytestream_get_be16(&src); } while (--channel); src += 2; #endif } while (--samples); } else { do { channel = avctx->channels; do { *dst32++ = bytestream_get_be24(&src) << 8; } while (--channel); src += 3; } while (--samples); } break; /* remapping: L, R, C, LBack, RBack, LF */ case CH_LAYOUT_5POINT1: if (SAMPLE_FMT_S16 == avctx->sample_fmt) { do { dst16[0] = bytestream_get_be16(&src); dst16[1] = bytestream_get_be16(&src); dst16[2] = bytestream_get_be16(&src); dst16[4] = bytestream_get_be16(&src); dst16[5] = bytestream_get_be16(&src); dst16[3] = bytestream_get_be16(&src); dst16 += 6; } while (--samples); } else { do { dst32[0] = bytestream_get_be24(&src) << 8; dst32[1] = bytestream_get_be24(&src) << 8; dst32[2] = bytestream_get_be24(&src) << 8; dst32[4] = bytestream_get_be24(&src) << 8; dst32[5] = bytestream_get_be24(&src) << 8; dst32[3] = bytestream_get_be24(&src) << 8; dst32 += 6; } while (--samples); } break; /* remapping: L, R, C, LSide, LBack, RBack, RSide, <unused> */ case CH_LAYOUT_7POINT0: if (SAMPLE_FMT_S16 == avctx->sample_fmt) { do { dst16[0] = bytestream_get_be16(&src); dst16[1] = bytestream_get_be16(&src); dst16[2] = bytestream_get_be16(&src); dst16[5] = bytestream_get_be16(&src); dst16[3] = bytestream_get_be16(&src); dst16[4] = bytestream_get_be16(&src); dst16[6] = bytestream_get_be16(&src); dst16 += 7; src += 2; } while (--samples); } else { do { dst32[0] = bytestream_get_be24(&src) << 8; dst32[1] = bytestream_get_be24(&src) << 8; dst32[2] = bytestream_get_be24(&src) << 8; dst32[5] = bytestream_get_be24(&src) << 8; dst32[3] = bytestream_get_be24(&src) << 8; dst32[4] = bytestream_get_be24(&src) << 8; dst32[6] = bytestream_get_be24(&src) << 8; dst32 += 7; src += 3; } while (--samples); } break; /* remapping: L, R, C, LSide, LBack, RBack, RSide, LF */ case CH_LAYOUT_7POINT1: if (SAMPLE_FMT_S16 == avctx->sample_fmt) { do { dst16[0] = bytestream_get_be16(&src); dst16[1] = bytestream_get_be16(&src); dst16[2] = bytestream_get_be16(&src); dst16[6] = bytestream_get_be16(&src); dst16[4] = bytestream_get_be16(&src); dst16[5] = bytestream_get_be16(&src); dst16[7] = bytestream_get_be16(&src); dst16[3] = bytestream_get_be16(&src); dst16 += 8; } while (--samples); } else { do { dst32[0] = bytestream_get_be24(&src) << 8; dst32[1] = bytestream_get_be24(&src) << 8; dst32[2] = bytestream_get_be24(&src) << 8; dst32[6] = bytestream_get_be24(&src) << 8; dst32[4] = bytestream_get_be24(&src) << 8; dst32[5] = bytestream_get_be24(&src) << 8; dst32[7] = bytestream_get_be24(&src) << 8; dst32[3] = bytestream_get_be24(&src) << 8; dst32 += 8; } while (--samples); } break; } } retval = src - avpkt->data; if (avctx->debug & FF_DEBUG_BITSTREAM) dprintf(avctx, "pcm_bluray_decode_frame: decoded %d -> %d bytes\n", retval, *data_size); return retval; } AVCodec pcm_bluray_decoder = { "pcm_bluray", AVMEDIA_TYPE_AUDIO, CODEC_ID_PCM_BLURAY, 0, NULL, NULL, NULL, pcm_bluray_decode_frame, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16, SAMPLE_FMT_S32, SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16|20|24-bit big-endian for Blu-ray media"), };
123linslouis-android-video-cutter
jni/libavcodec/pcm-mpeg.c
C
asf20
11,641
/* * DVB subtitle parser for FFmpeg * Copyright (c) 2005 Ian Caulfield * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "dsputil.h" #include "get_bits.h" //#define DEBUG //#define DEBUG_PACKET_CONTENTS /* Parser (mostly) copied from dvdsub.c */ #define PARSE_BUF_SIZE (65536) /* parser definition */ typedef struct DVBSubParseContext { uint8_t *packet_buf; int packet_start; int packet_index; int in_packet; } DVBSubParseContext; static av_cold int dvbsub_parse_init(AVCodecParserContext *s) { DVBSubParseContext *pc = s->priv_data; pc->packet_buf = av_malloc(PARSE_BUF_SIZE); return 0; } static int dvbsub_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { DVBSubParseContext *pc = s->priv_data; uint8_t *p, *p_end; int len, buf_pos = 0; dprintf(avctx, "DVB parse packet pts=%"PRIx64", lpts=%"PRIx64", cpts=%"PRIx64":\n", s->pts, s->last_pts, s->cur_frame_pts[s->cur_frame_start_index]); #ifdef DEBUG_PACKET_CONTENTS int i; for (i=0; i < buf_size; i++) { av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]); if (i % 16 == 15) av_log(avctx, AV_LOG_INFO, "\n"); } if (i % 16 != 0) av_log(avctx, AV_LOG_INFO, "\n"); #endif *poutbuf = NULL; *poutbuf_size = 0; s->fetch_timestamp = 1; if (s->last_pts != s->pts && s->pts != AV_NOPTS_VALUE) /* Start of a new packet */ { if (pc->packet_index != pc->packet_start) { dprintf(avctx, "Discarding %d bytes\n", pc->packet_index - pc->packet_start); } pc->packet_start = 0; pc->packet_index = 0; if (buf_size < 2 || buf[0] != 0x20 || buf[1] != 0x00) { dprintf(avctx, "Bad packet header\n"); return -1; } buf_pos = 2; pc->in_packet = 1; } else { if (pc->packet_start != 0) { if (pc->packet_index != pc->packet_start) { memmove(pc->packet_buf, pc->packet_buf + pc->packet_start, pc->packet_index - pc->packet_start); pc->packet_index -= pc->packet_start; pc->packet_start = 0; } else { pc->packet_start = 0; pc->packet_index = 0; } } } if (buf_size - buf_pos + pc->packet_index > PARSE_BUF_SIZE) return -1; /* if not currently in a packet, discard data */ if (pc->in_packet == 0) return buf_size; memcpy(pc->packet_buf + pc->packet_index, buf + buf_pos, buf_size - buf_pos); pc->packet_index += buf_size - buf_pos; p = pc->packet_buf; p_end = pc->packet_buf + pc->packet_index; while (p < p_end) { if (*p == 0x0f) { if (p + 6 <= p_end) { len = AV_RB16(p + 4); if (p + len + 6 <= p_end) { *poutbuf_size += len + 6; p += len + 6; } else break; } else break; } else if (*p == 0xff) { if (p + 1 < p_end) { dprintf(avctx, "Junk at end of packet\n"); } pc->packet_index = p - pc->packet_buf; pc->in_packet = 0; break; } else { av_log(avctx, AV_LOG_ERROR, "Junk in packet\n"); pc->packet_index = p - pc->packet_buf; pc->in_packet = 0; break; } } if (*poutbuf_size > 0) { *poutbuf = pc->packet_buf; pc->packet_start = *poutbuf_size; } if (s->pts == AV_NOPTS_VALUE) s->pts = s->last_pts; return buf_size; } static av_cold void dvbsub_parse_close(AVCodecParserContext *s) { DVBSubParseContext *pc = s->priv_data; av_freep(&pc->packet_buf); } AVCodecParser dvbsub_parser = { { CODEC_ID_DVB_SUBTITLE }, sizeof(DVBSubParseContext), dvbsub_parse_init, dvbsub_parse, dvbsub_parse_close, };
123linslouis-android-video-cutter
jni/libavcodec/dvbsub_parser.c
C
asf20
4,963
/* * SVQ1 decoder * ported to MPlayer by Arpi <arpi@thot.banki.hu> * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru> * * Copyright (C) 2002 the xine project * Copyright (C) 2002 the ffmpeg project * * SVQ1 Encoder (c) 2004 Mike Melanson <melanson@pcisys.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Sorenson Vector Quantizer #1 (SVQ1) video codec. * For more information of the SVQ1 algorithm, visit: * http://www.pcisys.net/~melanson/codecs/ */ #ifndef AVCODEC_SVQ1_H #define AVCODEC_SVQ1_H #include <stdint.h> #define SVQ1_BLOCK_SKIP 0 #define SVQ1_BLOCK_INTER 1 #define SVQ1_BLOCK_INTER_4V 2 #define SVQ1_BLOCK_INTRA 3 struct svq1_frame_size { uint16_t width; uint16_t height; }; uint16_t ff_svq1_packet_checksum (const uint8_t *data, const int length, int value); extern const int8_t* const ff_svq1_inter_codebooks[6]; extern const int8_t* const ff_svq1_intra_codebooks[6]; extern const uint8_t ff_svq1_block_type_vlc[4][2]; extern const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]; extern const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]; extern const uint16_t ff_svq1_intra_mean_vlc[256][2]; extern const uint16_t ff_svq1_inter_mean_vlc[512][2]; extern const struct svq1_frame_size ff_svq1_frame_size_table[7]; #endif /* AVCODEC_SVQ1_H */
123linslouis-android-video-cutter
jni/libavcodec/svq1.h
C
asf20
2,089
/* * Copyright (C) 2007 Vitor Sessak <vitor1001@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Codebook Generator using the ELBG algorithm */ #include <string.h> #include "libavutil/lfg.h" #include "elbg.h" #include "avcodec.h" #define DELTA_ERR_MAX 0.1 ///< Precision of the ELBG algorithm (as percentual error) /** * In the ELBG jargon, a cell is the set of points that are closest to a * codebook entry. Not to be confused with a RoQ Video cell. */ typedef struct cell_s { int index; struct cell_s *next; } cell; /** * ELBG internal data */ typedef struct{ int error; int dim; int numCB; int *codebook; cell **cells; int *utility; int *utility_inc; int *nearest_cb; int *points; AVLFG *rand_state; } elbg_data; static inline int distance_limited(int *a, int *b, int dim, int limit) { int i, dist=0; for (i=0; i<dim; i++) { dist += (a[i] - b[i])*(a[i] - b[i]); if (dist > limit) return INT_MAX; } return dist; } static inline void vect_division(int *res, int *vect, int div, int dim) { int i; if (div > 1) for (i=0; i<dim; i++) res[i] = ROUNDED_DIV(vect[i],div); else if (res != vect) memcpy(res, vect, dim*sizeof(int)); } static int eval_error_cell(elbg_data *elbg, int *centroid, cell *cells) { int error=0; for (; cells; cells=cells->next) error += distance_limited(centroid, elbg->points + cells->index*elbg->dim, elbg->dim, INT_MAX); return error; } static int get_closest_codebook(elbg_data *elbg, int index) { int i, pick=0, diff, diff_min = INT_MAX; for (i=0; i<elbg->numCB; i++) if (i != index) { diff = distance_limited(elbg->codebook + i*elbg->dim, elbg->codebook + index*elbg->dim, elbg->dim, diff_min); if (diff < diff_min) { pick = i; diff_min = diff; } } return pick; } static int get_high_utility_cell(elbg_data *elbg) { int i=0; /* Using linear search, do binary if it ever turns to be speed critical */ int r = av_lfg_get(elbg->rand_state)%elbg->utility_inc[elbg->numCB-1] + 1; while (elbg->utility_inc[i] < r) i++; assert(!elbg->cells[i]); return i; } /** * Implementation of the simple LBG algorithm for just two codebooks */ static int simple_lbg(int dim, int *centroid[3], int newutility[3], int *points, cell *cells) { int i, idx; int numpoints[2] = {0,0}; int newcentroid[2][dim]; cell *tempcell; memset(newcentroid, 0, sizeof(newcentroid)); newutility[0] = newutility[1] = 0; for (tempcell = cells; tempcell; tempcell=tempcell->next) { idx = distance_limited(centroid[0], points + tempcell->index*dim, dim, INT_MAX)>= distance_limited(centroid[1], points + tempcell->index*dim, dim, INT_MAX); numpoints[idx]++; for (i=0; i<dim; i++) newcentroid[idx][i] += points[tempcell->index*dim + i]; } vect_division(centroid[0], newcentroid[0], numpoints[0], dim); vect_division(centroid[1], newcentroid[1], numpoints[1], dim); for (tempcell = cells; tempcell; tempcell=tempcell->next) { int dist[2] = {distance_limited(centroid[0], points + tempcell->index*dim, dim, INT_MAX), distance_limited(centroid[1], points + tempcell->index*dim, dim, INT_MAX)}; int idx = dist[0] > dist[1]; newutility[idx] += dist[idx]; } return newutility[0] + newutility[1]; } static void get_new_centroids(elbg_data *elbg, int huc, int *newcentroid_i, int *newcentroid_p) { cell *tempcell; int min[elbg->dim]; int max[elbg->dim]; int i; for (i=0; i< elbg->dim; i++) { min[i]=INT_MAX; max[i]=0; } for (tempcell = elbg->cells[huc]; tempcell; tempcell = tempcell->next) for(i=0; i<elbg->dim; i++) { min[i]=FFMIN(min[i], elbg->points[tempcell->index*elbg->dim + i]); max[i]=FFMAX(max[i], elbg->points[tempcell->index*elbg->dim + i]); } for (i=0; i<elbg->dim; i++) { newcentroid_i[i] = min[i] + (max[i] - min[i])/3; newcentroid_p[i] = min[i] + (2*(max[i] - min[i]))/3; } } /** * Add the points in the low utility cell to its closest cell. Split the high * utility cell, putting the separed points in the (now empty) low utility * cell. * * @param elbg Internal elbg data * @param indexes {luc, huc, cluc} * @param newcentroid A vector with the position of the new centroids */ static void shift_codebook(elbg_data *elbg, int *indexes, int *newcentroid[3]) { cell *tempdata; cell **pp = &elbg->cells[indexes[2]]; while(*pp) pp= &(*pp)->next; *pp = elbg->cells[indexes[0]]; elbg->cells[indexes[0]] = NULL; tempdata = elbg->cells[indexes[1]]; elbg->cells[indexes[1]] = NULL; while(tempdata) { cell *tempcell2 = tempdata->next; int idx = distance_limited(elbg->points + tempdata->index*elbg->dim, newcentroid[0], elbg->dim, INT_MAX) > distance_limited(elbg->points + tempdata->index*elbg->dim, newcentroid[1], elbg->dim, INT_MAX); tempdata->next = elbg->cells[indexes[idx]]; elbg->cells[indexes[idx]] = tempdata; tempdata = tempcell2; } } static void evaluate_utility_inc(elbg_data *elbg) { int i, inc=0; for (i=0; i < elbg->numCB; i++) { if (elbg->numCB*elbg->utility[i] > elbg->error) inc += elbg->utility[i]; elbg->utility_inc[i] = inc; } } static void update_utility_and_n_cb(elbg_data *elbg, int idx, int newutility) { cell *tempcell; elbg->utility[idx] = newutility; for (tempcell=elbg->cells[idx]; tempcell; tempcell=tempcell->next) elbg->nearest_cb[tempcell->index] = idx; } /** * Evaluate if a shift lower the error. If it does, call shift_codebooks * and update elbg->error, elbg->utility and elbg->nearest_cb. * * @param elbg Internal elbg data * @param indexes {luc (low utility cell, huc (high utility cell), cluc (closest cell to low utility cell)} */ static void try_shift_candidate(elbg_data *elbg, int idx[3]) { int j, k, olderror=0, newerror, cont=0; int newutility[3]; int newcentroid[3][elbg->dim]; int *newcentroid_ptrs[3]; cell *tempcell; newcentroid_ptrs[0] = newcentroid[0]; newcentroid_ptrs[1] = newcentroid[1]; newcentroid_ptrs[2] = newcentroid[2]; for (j=0; j<3; j++) olderror += elbg->utility[idx[j]]; memset(newcentroid[2], 0, elbg->dim*sizeof(int)); for (k=0; k<2; k++) for (tempcell=elbg->cells[idx[2*k]]; tempcell; tempcell=tempcell->next) { cont++; for (j=0; j<elbg->dim; j++) newcentroid[2][j] += elbg->points[tempcell->index*elbg->dim + j]; } vect_division(newcentroid[2], newcentroid[2], cont, elbg->dim); get_new_centroids(elbg, idx[1], newcentroid[0], newcentroid[1]); newutility[2] = eval_error_cell(elbg, newcentroid[2], elbg->cells[idx[0]]); newutility[2] += eval_error_cell(elbg, newcentroid[2], elbg->cells[idx[2]]); newerror = newutility[2]; newerror += simple_lbg(elbg->dim, newcentroid_ptrs, newutility, elbg->points, elbg->cells[idx[1]]); if (olderror > newerror) { shift_codebook(elbg, idx, newcentroid_ptrs); elbg->error += newerror - olderror; for (j=0; j<3; j++) update_utility_and_n_cb(elbg, idx[j], newutility[j]); evaluate_utility_inc(elbg); } } /** * Implementation of the ELBG block */ static void do_shiftings(elbg_data *elbg) { int idx[3]; evaluate_utility_inc(elbg); for (idx[0]=0; idx[0] < elbg->numCB; idx[0]++) if (elbg->numCB*elbg->utility[idx[0]] < elbg->error) { if (elbg->utility_inc[elbg->numCB-1] == 0) return; idx[1] = get_high_utility_cell(elbg); idx[2] = get_closest_codebook(elbg, idx[0]); if (idx[1] != idx[0] && idx[1] != idx[2]) try_shift_candidate(elbg, idx); } } #define BIG_PRIME 433494437LL void ff_init_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int max_steps, int *closest_cb, AVLFG *rand_state) { int i, k; if (numpoints > 24*numCB) { /* ELBG is very costly for a big number of points. So if we have a lot of them, get a good initial codebook to save on iterations */ int *temp_points = av_malloc(dim*(numpoints/8)*sizeof(int)); for (i=0; i<numpoints/8; i++) { k = (i*BIG_PRIME) % numpoints; memcpy(temp_points + i*dim, points + k*dim, dim*sizeof(int)); } ff_init_elbg(temp_points, dim, numpoints/8, codebook, numCB, 2*max_steps, closest_cb, rand_state); ff_do_elbg(temp_points, dim, numpoints/8, codebook, numCB, 2*max_steps, closest_cb, rand_state); av_free(temp_points); } else // If not, initialize the codebook with random positions for (i=0; i < numCB; i++) memcpy(codebook + i*dim, points + ((i*BIG_PRIME)%numpoints)*dim, dim*sizeof(int)); } void ff_do_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int max_steps, int *closest_cb, AVLFG *rand_state) { int dist; elbg_data elbg_d; elbg_data *elbg = &elbg_d; int i, j, k, last_error, steps=0; int *dist_cb = av_malloc(numpoints*sizeof(int)); int *size_part = av_malloc(numCB*sizeof(int)); cell *list_buffer = av_malloc(numpoints*sizeof(cell)); cell *free_cells; int best_dist, best_idx = 0; elbg->error = INT_MAX; elbg->dim = dim; elbg->numCB = numCB; elbg->codebook = codebook; elbg->cells = av_malloc(numCB*sizeof(cell *)); elbg->utility = av_malloc(numCB*sizeof(int)); elbg->nearest_cb = closest_cb; elbg->points = points; elbg->utility_inc = av_malloc(numCB*sizeof(int)); elbg->rand_state = rand_state; do { free_cells = list_buffer; last_error = elbg->error; steps++; memset(elbg->utility, 0, numCB*sizeof(int)); memset(elbg->cells, 0, numCB*sizeof(cell *)); elbg->error = 0; /* This loop evaluate the actual Voronoi partition. It is the most costly part of the algorithm. */ for (i=0; i < numpoints; i++) { best_dist = distance_limited(elbg->points + i*elbg->dim, elbg->codebook + best_idx*elbg->dim, dim, INT_MAX); for (k=0; k < elbg->numCB; k++) { dist = distance_limited(elbg->points + i*elbg->dim, elbg->codebook + k*elbg->dim, dim, best_dist); if (dist < best_dist) { best_dist = dist; best_idx = k; } } elbg->nearest_cb[i] = best_idx; dist_cb[i] = best_dist; elbg->error += dist_cb[i]; elbg->utility[elbg->nearest_cb[i]] += dist_cb[i]; free_cells->index = i; free_cells->next = elbg->cells[elbg->nearest_cb[i]]; elbg->cells[elbg->nearest_cb[i]] = free_cells; free_cells++; } do_shiftings(elbg); memset(size_part, 0, numCB*sizeof(int)); memset(elbg->codebook, 0, elbg->numCB*dim*sizeof(int)); for (i=0; i < numpoints; i++) { size_part[elbg->nearest_cb[i]]++; for (j=0; j < elbg->dim; j++) elbg->codebook[elbg->nearest_cb[i]*elbg->dim + j] += elbg->points[i*elbg->dim + j]; } for (i=0; i < elbg->numCB; i++) vect_division(elbg->codebook + i*elbg->dim, elbg->codebook + i*elbg->dim, size_part[i], elbg->dim); } while(((last_error - elbg->error) > DELTA_ERR_MAX*elbg->error) && (steps < max_steps)); av_free(dist_cb); av_free(size_part); av_free(elbg->utility); av_free(list_buffer); av_free(elbg->cells); av_free(elbg->utility_inc); }
123linslouis-android-video-cutter
jni/libavcodec/elbg.c
C
asf20
13,088
/* * Common code between Nellymoser encoder and decoder * Copyright (c) 2007 a840bda5870ba11f19698ff6eb9581dfb0f95fa5, * 539459aeb7d425140b62a3ec7dbf6dc8e408a306, and * 520e17cd55896441042b14df2566a6eb610ed444 * Copyright (c) 2007 Loic Minier <lool at dooz.org> * Benjamin Larsson * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /** * @file * The 3 alphanumeric copyright notices are md5summed they are from the original * implementors. The original code is available from http://code.google.com/p/nelly2pcm/ */ #ifndef AVCODEC_NELLYMOSER_H #define AVCODEC_NELLYMOSER_H #include "avcodec.h" #define NELLY_BANDS 23 #define NELLY_BLOCK_LEN 64 #define NELLY_HEADER_BITS 116 #define NELLY_DETAIL_BITS 198 #define NELLY_BUF_LEN 128 #define NELLY_FILL_LEN 124 #define NELLY_BIT_CAP 6 #define NELLY_BASE_OFF 4228 #define NELLY_BASE_SHIFT 19 #define NELLY_SAMPLES (2 * NELLY_BUF_LEN) extern const float ff_nelly_dequantization_table[127]; extern const uint8_t ff_nelly_band_sizes_table[NELLY_BANDS]; extern const uint16_t ff_nelly_init_table[64]; extern const int16_t ff_nelly_delta_table[32]; void ff_nelly_get_sample_bits(const float *buf, int *bits); #endif
123linslouis-android-video-cutter
jni/libavcodec/nellymoser.h
C
asf20
2,290
/* * MPEG1/2 decoder tables * copyright (c) 2000,2001 Fabrice Bellard * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MPEG1/2 decoder tables. */ #ifndef AVCODEC_MPEG12DECDATA_H #define AVCODEC_MPEG12DECDATA_H #include <stdint.h> #include "mpegvideo.h" #define MB_TYPE_ZERO_MV 0x20000000 #define IS_ZERO_MV(a) ((a)&MB_TYPE_ZERO_MV) static const uint8_t table_mb_ptype[7][2] = { { 3, 5 }, // 0x01 MB_INTRA { 1, 2 }, // 0x02 MB_PAT { 1, 3 }, // 0x08 MB_FOR { 1, 1 }, // 0x0A MB_FOR|MB_PAT { 1, 6 }, // 0x11 MB_QUANT|MB_INTRA { 1, 5 }, // 0x12 MB_QUANT|MB_PAT { 2, 5 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT }; static const uint32_t ptype2mb_type[7] = { MB_TYPE_INTRA, MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16, MB_TYPE_L0, MB_TYPE_L0 | MB_TYPE_CBP, MB_TYPE_QUANT | MB_TYPE_INTRA, MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16, MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP, }; static const uint8_t table_mb_btype[11][2] = { { 3, 5 }, // 0x01 MB_INTRA { 2, 3 }, // 0x04 MB_BACK { 3, 3 }, // 0x06 MB_BACK|MB_PAT { 2, 4 }, // 0x08 MB_FOR { 3, 4 }, // 0x0A MB_FOR|MB_PAT { 2, 2 }, // 0x0C MB_FOR|MB_BACK { 3, 2 }, // 0x0E MB_FOR|MB_BACK|MB_PAT { 1, 6 }, // 0x11 MB_QUANT|MB_INTRA { 2, 6 }, // 0x16 MB_QUANT|MB_BACK|MB_PAT { 3, 6 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT { 2, 5 }, // 0x1E MB_QUANT|MB_FOR|MB_BACK|MB_PAT }; static const uint32_t btype2mb_type[11] = { MB_TYPE_INTRA, MB_TYPE_L1, MB_TYPE_L1 | MB_TYPE_CBP, MB_TYPE_L0, MB_TYPE_L0 | MB_TYPE_CBP, MB_TYPE_L0L1, MB_TYPE_L0L1 | MB_TYPE_CBP, MB_TYPE_QUANT | MB_TYPE_INTRA, MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_CBP, MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP, MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_CBP, }; static const uint8_t non_linear_qscale[32] = { 0, 1, 2, 3, 4, 5, 6, 7, 8,10,12,14,16,18,20,22, 24,28,32,36,40,44,48,52, 56,64,72,80,88,96,104,112, }; #endif /* AVCODEC_MPEG12DECDATA_H */
123linslouis-android-video-cutter
jni/libavcodec/mpeg12decdata.h
C
asf20
3,027
/* * IMC compatible decoder * Copyright (c) 2002-2004 Maxim Poliakovski * Copyright (c) 2006 Benjamin Larsson * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_IMCDATA_H #define AVCODEC_IMCDATA_H #include <stdint.h> static const uint16_t band_tab[33] = { 0, 3, 6, 9, 12, 16, 20, 24, 29, 34, 40, 46, 53, 60, 68, 76, 84, 93, 102, 111, 121, 131, 141, 151, 162, 173, 184, 195, 207, 219, 231, 243, 256, }; static const int8_t cyclTab[32] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 32, }; static const int8_t cyclTab2[32] = { -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29}; static const float imc_weights1[31] = { 0.119595, 0.123124, 0.129192, 9.97377e-2, 8.1923e-2, 9.61153e-2, 8.77885e-2, 8.61174e-2, 9.00882e-2, 9.91658e-2, 0.112991, 0.131126, 0.152886, 0.177292, 0.221782, 0.244917, 0.267386, 0.306816, 0.323046, 0.33729, 0.366773, 0.392557, 0.398076, 0.403302, 0.42451, 0.444777, 0.449188, 0.455445, 0.477853, 0.500669, 0.510395}; static const float imc_weights2[31] = { 3.23466e-3, 3.49886e-3, 3.98413e-3, 1.98116e-3, 1.16465e-3, 1.79283e-3, 1.40372e-3, 1.33274e-3, 1.50523e-3, 1.95064e-3, 2.77472e-3, 4.14725e-3, 6.2776e-3, 9.36401e-3, 1.71397e-2, 2.24052e-2, 2.83971e-2, 4.11689e-2, 4.73165e-2, 5.31631e-2, 6.66614e-2, 8.00824e-2, 8.31588e-2, 8.61397e-2, 9.89229e-2, 0.112197, 0.115227, 0.119613, 0.136174, 0.15445, 0.162685}; static const float imc_quantizer1[4][8] = { { 8.4431201e-1, 4.7358301e-1, 1.448354, 2.7073899e-1, 7.4449003e-1, 1.241991, 1.845484, 0.0}, { 8.6876702e-1, 4.7659001e-1, 1.478224, 2.5672799e-1, 7.55777e-1, 1.3229851, 2.03438, 0.0}, { 7.5891501e-1, 6.2272799e-1, 1.271322, 3.47904e-1, 7.5317699e-1, 1.150767, 1.628476, 0.0}, { 7.65257e-1, 6.44647e-1, 1.263824, 3.4548101e-1, 7.6384902e-1, 1.214466, 1.7638789, 0.0}, }; static const float imc_quantizer2[2][56] = { { 1.39236e-1, 3.50548e-1, 5.9547901e-1, 8.5772401e-1, 1.121545, 1.3882281, 1.695882, 2.1270809, 7.2221003e-2, 1.85177e-1, 2.9521701e-1, 4.12568e-1, 5.4068601e-1, 6.7679501e-1, 8.1196898e-1, 9.4765198e-1, 1.0779999, 1.203415, 1.337265, 1.481871, 1.639982, 1.814766, 2.0701399, 2.449862, 3.7533998e-2, 1.02722e-1, 1.6021401e-1, 2.16043e-1, 2.7231601e-1, 3.3025399e-1, 3.9022601e-1, 4.52849e-1, 5.1794899e-1, 5.8529502e-1, 6.53956e-1, 7.2312802e-1, 7.9150802e-1, 8.5891002e-1, 9.28141e-1, 9.9706203e-1, 1.062153, 1.12564, 1.189834, 1.256122, 1.324469, 1.3955311, 1.468906, 1.545084, 1.6264729, 1.711524, 1.802705, 1.91023, 2.0533991, 2.22333, 2.4830019, 3.253329 }, { 1.11654e-1, 3.54469e-1, 6.4232099e-1, 9.6128798e-1, 1.295053, 1.61777, 1.989839, 2.51107, 5.7721999e-2, 1.69879e-1, 2.97589e-1, 4.3858799e-1, 5.9039903e-1, 7.4934798e-1, 9.1628098e-1, 1.087297, 1.262751, 1.4288321, 1.6040879, 1.79067, 2.000668, 2.2394669, 2.649332, 5.2760072, 2.9722e-2, 8.7316997e-2, 1.4445201e-1, 2.04247e-1, 2.6879501e-1, 3.3716801e-1, 4.08811e-1, 4.8306999e-1, 5.6049401e-1, 6.3955498e-1, 7.2044599e-1, 8.0427998e-1, 8.8933599e-1, 9.7537601e-1, 1.062461, 1.1510431, 1.240236, 1.326715, 1.412513, 1.500502, 1.591749, 1.686413, 1.785239, 1.891233, 2.0051291, 2.127681, 2.2709141, 2.475826, 2.7219379, 3.101985, 4.686213, 6.2287788}, }; static const float xTab[14] = {7.6, 3.6, 4.4, 3.7, 6.1, 5.1, 2.3, 1.6, 6.2, 1.5, 1.8, 1.2, 0, 0}; //10014048 /* precomputed table for 10^(i/4), i=-15..16 */ static const float imc_exp_tab[32] = { 1.778280e-4, 3.162278e-4, 5.623413e-4, 1.000000e-3, 1.778280e-3, 3.162278e-3, 5.623413e-3, 1.000000e-2, 1.778280e-2, 3.162278e-2, 5.623413e-2, 1.000000e-1, 1.778280e-1, 3.162278e-1, 5.623413e-1, 1.000000e00, 1.778280e00, 3.162278e00, 5.623413e00, 1.000000e01, 1.778280e01, 3.162278e01, 5.623413e01, 1.000000e02, 1.778280e02, 3.162278e02, 5.623413e02, 1.000000e03, 1.778280e03, 3.162278e03, 5.623413e03, 1.000000e04 }; static const float * const imc_exp_tab2 = imc_exp_tab + 8; static const uint8_t imc_cb_select[4][32] = { { 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2 }, { 0, 2, 0, 3, 2, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2 }, { 0, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; static const uint8_t imc_huffman_sizes[4] = { 17, 17, 18, 18 }; static const uint8_t imc_huffman_lens[4][4][18] = { { { 16, 15, 13, 11, 8, 5, 3, 1, 2, 4, 6, 9, 10, 12, 14, 16, 7, 0 }, { 10, 8, 7, 6, 4, 4, 3, 2, 2, 3, 4, 6, 7, 9, 11, 11, 7, 0 }, { 15, 15, 14, 11, 8, 6, 4, 2, 1, 4, 5, 7, 9, 10, 12, 13, 4, 0 }, { 13, 11, 10, 8, 6, 4, 2, 2, 2, 3, 5, 7, 9, 12, 15, 15, 14, 0 }, }, { { 14, 12, 10, 8, 7, 4, 2, 2, 2, 3, 5, 7, 9, 11, 13, 14, 7, 0 }, { 14, 13, 11, 8, 6, 4, 3, 2, 2, 3, 5, 7, 9, 10, 12, 14, 3, 0 }, { 13, 12, 10, 7, 5, 4, 3, 2, 2, 3, 4, 6, 8, 9, 11, 13, 4, 0 }, { 13, 12, 10, 7, 5, 4, 3, 2, 2, 3, 4, 6, 8, 9, 11, 13, 4, 0 }, }, { { 16, 14, 12, 10, 8, 5, 3, 1, 2, 4, 7, 9, 11, 13, 15, 17, 6, 17 }, { 15, 13, 11, 8, 6, 4, 2, 2, 2, 3, 5, 7, 10, 12, 14, 16, 9, 16 }, { 14, 12, 11, 9, 8, 6, 3, 1, 2, 5, 7, 10, 13, 15, 16, 17, 4, 17 }, { 16, 14, 12, 9, 7, 5, 2, 2, 2, 3, 4, 6, 8, 11, 13, 15, 10, 16 }, }, { { 13, 11, 10, 8, 7, 5, 2, 2, 2, 4, 6, 9, 12, 14, 15, 16, 3, 16 }, { 11, 11, 10, 9, 8, 7, 5, 4, 3, 3, 3, 3, 3, 3, 4, 5, 6, 5 }, { 9, 9, 7, 6, 5, 4, 3, 3, 2, 3, 4, 5, 4, 5, 5, 6, 8, 6 }, { 13, 12, 10, 8, 5, 3, 3, 2, 2, 3, 4, 7, 9, 11, 14, 15, 6, 15 }, } }; static const uint16_t imc_huffman_bits[4][4][18] = { { { 0xCC32, 0x6618, 0x1987, 0x0660, 0x00CD, 0x0018, 0x0007, 0x0000, 0x0002, 0x000D, 0x0032, 0x0199, 0x0331, 0x0CC2, 0x330D, 0xCC33, 0x0067, 0x0000 }, { 0x02FE, 0x00BE, 0x005E, 0x002D, 0x000A, 0x0009, 0x0003, 0x0003, 0x0000, 0x0002, 0x0008, 0x002C, 0x005D, 0x017E, 0x05FE, 0x05FF, 0x005C, 0x0000 }, { 0x5169, 0x5168, 0x28B5, 0x0517, 0x00A3, 0x0029, 0x0008, 0x0003, 0x0000, 0x0009, 0x0015, 0x0050, 0x0144, 0x028A, 0x0A2C, 0x145B, 0x000B, 0x0000 }, { 0x1231, 0x048D, 0x0247, 0x0090, 0x0025, 0x0008, 0x0001, 0x0003, 0x0000, 0x0005, 0x0013, 0x0049, 0x0122, 0x0919, 0x48C3, 0x48C2, 0x2460, 0x0000 }, }, { { 0x2D1D, 0x0B46, 0x02D0, 0x00B5, 0x0059, 0x000A, 0x0003, 0x0001, 0x0000, 0x0004, 0x0017, 0x005B, 0x0169, 0x05A2, 0x168F, 0x2D1C, 0x0058, 0x0000 }, { 0x1800, 0x0C01, 0x0301, 0x0061, 0x0019, 0x0007, 0x0004, 0x0003, 0x0000, 0x0005, 0x000D, 0x0031, 0x00C1, 0x0181, 0x0601, 0x1801, 0x0002, 0x0000 }, { 0x1556, 0x0AAA, 0x02AB, 0x0054, 0x0014, 0x000B, 0x0002, 0x0003, 0x0000, 0x0003, 0x0008, 0x002B, 0x00AB, 0x0154, 0x0554, 0x1557, 0x0009, 0x0000 }, { 0x1556, 0x0AAA, 0x02AB, 0x0054, 0x0014, 0x000B, 0x0002, 0x0003, 0x0000, 0x0003, 0x0008, 0x002B, 0x00AB, 0x0154, 0x0554, 0x1557, 0x0009, 0x0000 }, }, { { 0x2993, 0x0A65, 0x0298, 0x00A7, 0x0028, 0x0004, 0x0000, 0x0001, 0x0001, 0x0003, 0x0015, 0x0052, 0x014D, 0x0533, 0x14C8, 0x5324, 0x000B, 0x5325 }, { 0x09B8, 0x026F, 0x009A, 0x0012, 0x0005, 0x0000, 0x0001, 0x0002, 0x0003, 0x0001, 0x0003, 0x0008, 0x004C, 0x0136, 0x04DD, 0x1373, 0x0027, 0x1372 }, { 0x0787, 0x01E0, 0x00F1, 0x003D, 0x001F, 0x0006, 0x0001, 0x0001, 0x0001, 0x0002, 0x000E, 0x0079, 0x03C2, 0x0F0D, 0x1E19, 0x3C30, 0x0000, 0x3C31 }, { 0x4B06, 0x12C0, 0x04B1, 0x0097, 0x0024, 0x0008, 0x0002, 0x0003, 0x0000, 0x0003, 0x0005, 0x0013, 0x004A, 0x0259, 0x0961, 0x2582, 0x012D, 0x4B07 }, }, { { 0x0A5A, 0x0297, 0x014A, 0x0053, 0x0028, 0x000B, 0x0003, 0x0000, 0x0002, 0x0004, 0x0015, 0x00A4, 0x052C, 0x14B7, 0x296C, 0x52DB, 0x0003, 0x52DA }, { 0x0193, 0x0192, 0x00C8, 0x0065, 0x0033, 0x0018, 0x0007, 0x0004, 0x0000, 0x0004, 0x0005, 0x0007, 0x0006, 0x0003, 0x0005, 0x0005, 0x000D, 0x0004 }, { 0x0012, 0x0013, 0x0005, 0x0003, 0x0000, 0x0003, 0x0005, 0x0004, 0x0003, 0x0003, 0x0005, 0x0005, 0x0004, 0x0004, 0x0003, 0x0005, 0x0008, 0x0004 }, { 0x0D66, 0x06B2, 0x01AD, 0x006A, 0x000C, 0x0005, 0x0004, 0x0000, 0x0003, 0x0002, 0x0007, 0x0034, 0x00D7, 0x0358, 0x1ACF, 0x359C, 0x001B, 0x359D }, } }; #endif /* AVCODEC_IMCDATA_H */
123linslouis-android-video-cutter
jni/libavcodec/imcdata.h
C
asf20
9,597
/* * Generate a file for hardcoded tables * * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <inttypes.h> #include "tableprint.h" WRITE_1D_FUNC(int8, int8_t, "%3"PRIi8, 15) WRITE_1D_FUNC(uint8, uint8_t, "0x%02"PRIx8, 15) WRITE_1D_FUNC(uint16, uint16_t, "0x%08"PRIx16, 7) WRITE_1D_FUNC(uint32, uint32_t, "0x%08"PRIx32, 7) WRITE_1D_FUNC(float, float, "%.18e", 3) WRITE_2D_FUNC(int8, int8_t) WRITE_2D_FUNC(uint8, uint8_t) WRITE_2D_FUNC(uint32, uint32_t) void write_fileheader(void) { printf("/* This file was generated by libavcodec/tableprint */\n"); printf("#include <stdint.h>\n"); }
123linslouis-android-video-cutter
jni/libavcodec/tableprint.c
C
asf20
1,420
/* * FLV decoding. * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mpegvideo.h" #include "h263.h" #include "flv.h" void ff_flv2_decode_ac_esc(GetBitContext *gb, int *level, int *run, int *last){ int is11 = get_bits1(gb); *last = get_bits1(gb); *run = get_bits(gb, 6); if(is11){ *level = get_sbits(gb, 11); } else { *level = get_sbits(gb, 7); } } int ff_flv_decode_picture_header(MpegEncContext *s) { int format, width, height; /* picture header */ if (get_bits_long(&s->gb, 17) != 1) { av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return -1; } format = get_bits(&s->gb, 5); if (format != 0 && format != 1) { av_log(s->avctx, AV_LOG_ERROR, "Bad picture format\n"); return -1; } s->h263_flv = format+1; s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */ format = get_bits(&s->gb, 3); switch (format) { case 0: width = get_bits(&s->gb, 8); height = get_bits(&s->gb, 8); break; case 1: width = get_bits(&s->gb, 16); height = get_bits(&s->gb, 16); break; case 2: width = 352; height = 288; break; case 3: width = 176; height = 144; break; case 4: width = 128; height = 96; break; case 5: width = 320; height = 240; break; case 6: width = 160; height = 120; break; default: width = height = 0; break; } if(avcodec_check_dimensions(s->avctx, width, height)) return -1; s->width = width; s->height = height; s->pict_type = FF_I_TYPE + get_bits(&s->gb, 2); s->dropable= s->pict_type > FF_P_TYPE; if (s->dropable) s->pict_type = FF_P_TYPE; skip_bits1(&s->gb); /* deblocking flag */ s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); s->h263_plus = 0; s->unrestricted_mv = 1; s->h263_long_vectors = 0; /* PEI */ while (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); } s->f_code = 1; if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "%c esc_type:%d, qp:%d num:%d\n", s->dropable ? 'D' : av_get_pict_type_char(s->pict_type), s->h263_flv-1, s->qscale, s->picture_number); } s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; return 0; } AVCodec flv_decoder = { "flv", AVMEDIA_TYPE_VIDEO, CODEC_ID_FLV1, sizeof(MpegEncContext), ff_h263_decode_init, NULL, ff_h263_decode_end, ff_h263_decode_frame, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), .pix_fmts= ff_pixfmt_list_420, };
123linslouis-android-video-cutter
jni/libavcodec/flvdec.c
C
asf20
3,561
/* * SGI image decoder * Todd Kirby <doubleshot@pacbell.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "bytestream.h" #include "sgi.h" typedef struct SgiState { AVFrame picture; unsigned int width; unsigned int height; unsigned int depth; unsigned int bytes_per_channel; int linesize; } SgiState; /** * Expand an RLE row into a channel. * @param in_buf input buffer * @param in_end end of input buffer * @param out_buf Points to one line after the output buffer. * @param out_end end of line in output buffer * @param pixelstride pixel stride of input buffer * @return size of output in bytes, -1 if buffer overflows */ static int expand_rle_row(const uint8_t *in_buf, const uint8_t* in_end, unsigned char *out_buf, uint8_t* out_end, int pixelstride) { unsigned char pixel, count; unsigned char *orig = out_buf; while (1) { if(in_buf + 1 > in_end) return -1; pixel = bytestream_get_byte(&in_buf); if (!(count = (pixel & 0x7f))) { return (out_buf - orig) / pixelstride; } /* Check for buffer overflow. */ if(out_buf + pixelstride * count >= out_end) return -1; if (pixel & 0x80) { while (count--) { *out_buf = bytestream_get_byte(&in_buf); out_buf += pixelstride; } } else { pixel = bytestream_get_byte(&in_buf); while (count--) { *out_buf = pixel; out_buf += pixelstride; } } } } /** * Read a run length encoded SGI image. * @param out_buf output buffer * @param in_buf input buffer * @param in_end end of input buffer * @param s the current image state * @return 0 if no error, else return error number. */ static int read_rle_sgi(unsigned char* out_buf, const uint8_t *in_buf, const uint8_t *in_end, SgiState* s) { uint8_t *dest_row; unsigned int len = s->height * s->depth * 4; const uint8_t *start_table = in_buf; unsigned int y, z; unsigned int start_offset; /* size of RLE offset and length tables */ if(len * 2 > in_end - in_buf) { return AVERROR_INVALIDDATA; } in_buf -= SGI_HEADER_SIZE; for (z = 0; z < s->depth; z++) { dest_row = out_buf; for (y = 0; y < s->height; y++) { dest_row -= s->linesize; start_offset = bytestream_get_be32(&start_table); if(start_offset > in_end - in_buf) { return AVERROR_INVALIDDATA; } if (expand_rle_row(in_buf + start_offset, in_end, dest_row + z, dest_row + FFABS(s->linesize), s->depth) != s->width) return AVERROR_INVALIDDATA; } } return 0; } /** * Read an uncompressed SGI image. * @param out_buf output buffer * @param out_end end ofoutput buffer * @param in_buf input buffer * @param in_end end of input buffer * @param s the current image state * @return 0 if read success, otherwise return -1. */ static int read_uncompressed_sgi(unsigned char* out_buf, uint8_t* out_end, const uint8_t *in_buf, const uint8_t *in_end, SgiState* s) { int x, y, z; const uint8_t *ptr; unsigned int offset = s->height * s->width * s->bytes_per_channel; /* Test buffer size. */ if (offset * s->depth > in_end - in_buf) { return -1; } for (y = s->height - 1; y >= 0; y--) { out_end = out_buf + (y * s->linesize); for (x = s->width; x > 0; x--) { ptr = in_buf += s->bytes_per_channel; for(z = 0; z < s->depth; z ++) { memcpy(out_end, ptr, s->bytes_per_channel); out_end += s->bytes_per_channel; ptr += offset; } } } return 0; } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *in_buf = avpkt->data; int buf_size = avpkt->size; SgiState *s = avctx->priv_data; AVFrame *picture = data; AVFrame *p = &s->picture; const uint8_t *in_end = in_buf + buf_size; unsigned int dimension, rle; int ret = 0; uint8_t *out_buf, *out_end; if (buf_size < SGI_HEADER_SIZE){ av_log(avctx, AV_LOG_ERROR, "buf_size too small (%d)\n", buf_size); return -1; } /* Test for SGI magic. */ if (bytestream_get_be16(&in_buf) != SGI_MAGIC) { av_log(avctx, AV_LOG_ERROR, "bad magic number\n"); return -1; } rle = bytestream_get_byte(&in_buf); s->bytes_per_channel = bytestream_get_byte(&in_buf); dimension = bytestream_get_be16(&in_buf); s->width = bytestream_get_be16(&in_buf); s->height = bytestream_get_be16(&in_buf); s->depth = bytestream_get_be16(&in_buf); if (s->bytes_per_channel != 1 && (s->bytes_per_channel != 2 || rle)) { av_log(avctx, AV_LOG_ERROR, "wrong channel number\n"); return -1; } /* Check for supported image dimensions. */ if (dimension != 2 && dimension != 3) { av_log(avctx, AV_LOG_ERROR, "wrong dimension number\n"); return -1; } if (s->depth == SGI_GRAYSCALE) { avctx->pix_fmt = s->bytes_per_channel == 2 ? PIX_FMT_GRAY16BE : PIX_FMT_GRAY8; } else if (s->depth == SGI_RGB) { avctx->pix_fmt = s->bytes_per_channel == 2 ? PIX_FMT_RGB48BE : PIX_FMT_RGB24; } else if (s->depth == SGI_RGBA && s->bytes_per_channel == 1) { avctx->pix_fmt = PIX_FMT_RGBA; } else { av_log(avctx, AV_LOG_ERROR, "wrong picture format\n"); return -1; } if (avcodec_check_dimensions(avctx, s->width, s->height)) return -1; avcodec_set_dimensions(avctx, s->width, s->height); if (p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n"); return -1; } p->pict_type = FF_I_TYPE; p->key_frame = 1; out_buf = p->data[0]; out_end = out_buf + p->linesize[0] * s->height; s->linesize = p->linesize[0]; /* Skip header. */ in_buf += SGI_HEADER_SIZE - 12; if (rle) { ret = read_rle_sgi(out_end, in_buf, in_end, s); } else { ret = read_uncompressed_sgi(out_buf, out_end, in_buf, in_end, s); } if (ret == 0) { *picture = s->picture; *data_size = sizeof(AVPicture); return buf_size; } else { return -1; } } static av_cold int sgi_init(AVCodecContext *avctx){ SgiState *s = avctx->priv_data; avcodec_get_frame_defaults(&s->picture); avctx->coded_frame = &s->picture; return 0; } static av_cold int sgi_end(AVCodecContext *avctx) { SgiState * const s = avctx->priv_data; if (s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); return 0; } AVCodec sgi_decoder = { "sgi", AVMEDIA_TYPE_VIDEO, CODEC_ID_SGI, sizeof(SgiState), sgi_init, NULL, sgi_end, decode_frame, .long_name = NULL_IF_CONFIG_SMALL("SGI image"), };
123linslouis-android-video-cutter
jni/libavcodec/sgidec.c
C
asf20
7,915
/* * Common code between the AC-3 and E-AC-3 decoders * Copyright (c) 2007 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Common code between the AC-3 and E-AC-3 decoders. * * Summary of MDCT Coefficient Grouping: * The individual MDCT coefficient indices are often referred to in the * (E-)AC-3 specification as frequency bins. These bins are grouped together * into subbands of 12 coefficients each. The subbands are grouped together * into bands as defined in the bitstream by the band structures, which * determine the number of bands and the size of each band. The full spectrum * of 256 frequency bins is divided into 1 DC bin + 21 subbands = 253 bins. * This system of grouping coefficients is used for channel bandwidth, stereo * rematrixing, channel coupling, enhanced coupling, and spectral extension. * * +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+-+ * |1| |12| | [12|12|12|12] | | | | | | | | | | | | |3| * +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+-+ * ~~~ ~~~~ ~~~~~~~~~~~~~ ~~~ * | | | | * | | | 3 unused frequency bins--+ * | | | * | | +--1 band containing 4 subbands * | | * | +--1 subband of 12 frequency bins * | * +--DC frequency bin */ #ifndef AVCODEC_AC3DEC_H #define AVCODEC_AC3DEC_H #include "libavutil/lfg.h" #include "ac3.h" #include "get_bits.h" #include "dsputil.h" #include "fft.h" /* override ac3.h to include coupling channel */ #undef AC3_MAX_CHANNELS #define AC3_MAX_CHANNELS 7 #define CPL_CH 0 #define AC3_OUTPUT_LFEON 8 #define AC3_MAX_COEFS 256 #define AC3_BLOCK_SIZE 256 #define MAX_BLOCKS 6 #define SPX_MAX_BANDS 17 typedef struct { AVCodecContext *avctx; ///< parent context GetBitContext gbc; ///< bitstream reader uint8_t *input_buffer; ///< temp buffer to prevent overread ///@defgroup bsi bit stream information ///@{ int frame_type; ///< frame type (strmtyp) int substreamid; ///< substream identification int frame_size; ///< current frame size, in bytes int bit_rate; ///< stream bit rate, in bits-per-second int sample_rate; ///< sample frequency, in Hz int num_blocks; ///< number of audio blocks int channel_mode; ///< channel mode (acmod) int channel_layout; ///< channel layout int lfe_on; ///< lfe channel in use int channel_map; ///< custom channel map int center_mix_level; ///< Center mix level index int surround_mix_level; ///< Surround mix level index int eac3; ///< indicates if current frame is E-AC-3 ///@} ///@defgroup audfrm frame syntax parameters int snr_offset_strategy; ///< SNR offset strategy (snroffststr) int block_switch_syntax; ///< block switch syntax enabled (blkswe) int dither_flag_syntax; ///< dither flag syntax enabled (dithflage) int bit_allocation_syntax; ///< bit allocation model syntax enabled (bamode) int fast_gain_syntax; ///< fast gain codes enabled (frmfgaincode) int dba_syntax; ///< delta bit allocation syntax enabled (dbaflde) int skip_syntax; ///< skip field syntax enabled (skipflde) ///@} ///@defgroup cpl standard coupling int cpl_in_use[MAX_BLOCKS]; ///< coupling in use (cplinu) int cpl_strategy_exists[MAX_BLOCKS]; ///< coupling strategy exists (cplstre) int channel_in_cpl[AC3_MAX_CHANNELS]; ///< channel in coupling (chincpl) int phase_flags_in_use; ///< phase flags in use (phsflginu) int phase_flags[18]; ///< phase flags (phsflg) int num_cpl_bands; ///< number of coupling bands (ncplbnd) uint8_t cpl_band_sizes[18]; ///< number of coeffs in each coupling band int firstchincpl; ///< first channel in coupling int first_cpl_coords[AC3_MAX_CHANNELS]; ///< first coupling coordinates states (firstcplcos) int cpl_coords[AC3_MAX_CHANNELS][18]; ///< coupling coordinates (cplco) ///@} ///@defgroup spx spectral extension ///@{ int spx_in_use; ///< spectral extension in use (spxinu) uint8_t channel_uses_spx[AC3_MAX_CHANNELS]; ///< channel uses spectral extension (chinspx) int8_t spx_atten_code[AC3_MAX_CHANNELS]; ///< spx attenuation code (spxattencod) int spx_src_start_freq; ///< spx start frequency bin int spx_dst_end_freq; ///< spx end frequency bin int spx_dst_start_freq; ///< spx starting frequency bin for copying (copystartmant) ///< the copy region ends at the start of the spx region. int num_spx_bands; ///< number of spx bands (nspxbnds) uint8_t spx_band_sizes[SPX_MAX_BANDS]; ///< number of bins in each spx band uint8_t first_spx_coords[AC3_MAX_CHANNELS]; ///< first spx coordinates states (firstspxcos) float spx_noise_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS]; ///< spx noise blending factor (nblendfact) float spx_signal_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS];///< spx signal blending factor (sblendfact) ///@} ///@defgroup aht adaptive hybrid transform int channel_uses_aht[AC3_MAX_CHANNELS]; ///< channel AHT in use (chahtinu) int pre_mantissa[AC3_MAX_CHANNELS][AC3_MAX_COEFS][MAX_BLOCKS]; ///< pre-IDCT mantissas ///@} ///@defgroup channel channel int fbw_channels; ///< number of full-bandwidth channels int channels; ///< number of total channels int lfe_ch; ///< index of LFE channel float downmix_coeffs[AC3_MAX_CHANNELS][2]; ///< stereo downmix coefficients int downmixed; ///< indicates if coeffs are currently downmixed int output_mode; ///< output channel configuration int out_channels; ///< number of output channels ///@} ///@defgroup dynrng dynamic range float dynamic_range[2]; ///< dynamic range ///@} ///@defgroup bandwidth bandwidth int start_freq[AC3_MAX_CHANNELS]; ///< start frequency bin (strtmant) int end_freq[AC3_MAX_CHANNELS]; ///< end frequency bin (endmant) ///@} ///@defgroup rematrixing rematrixing int num_rematrixing_bands; ///< number of rematrixing bands (nrematbnd) int rematrixing_flags[4]; ///< rematrixing flags (rematflg) ///@} ///@defgroup exponents exponents int num_exp_groups[AC3_MAX_CHANNELS]; ///< Number of exponent groups (nexpgrp) int8_t dexps[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< decoded exponents int exp_strategy[MAX_BLOCKS][AC3_MAX_CHANNELS]; ///< exponent strategies (expstr) ///@} ///@defgroup bitalloc bit allocation AC3BitAllocParameters bit_alloc_params; ///< bit allocation parameters int first_cpl_leak; ///< first coupling leak state (firstcplleak) int snr_offset[AC3_MAX_CHANNELS]; ///< signal-to-noise ratio offsets (snroffst) int fast_gain[AC3_MAX_CHANNELS]; ///< fast gain values/SMR's (fgain) uint8_t bap[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< bit allocation pointers int16_t psd[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< scaled exponents int16_t band_psd[AC3_MAX_CHANNELS][50]; ///< interpolated exponents int16_t mask[AC3_MAX_CHANNELS][50]; ///< masking curve values int dba_mode[AC3_MAX_CHANNELS]; ///< delta bit allocation mode int dba_nsegs[AC3_MAX_CHANNELS]; ///< number of delta segments uint8_t dba_offsets[AC3_MAX_CHANNELS][8]; ///< delta segment offsets uint8_t dba_lengths[AC3_MAX_CHANNELS][8]; ///< delta segment lengths uint8_t dba_values[AC3_MAX_CHANNELS][8]; ///< delta values for each segment ///@} ///@defgroup dithering zero-mantissa dithering int dither_flag[AC3_MAX_CHANNELS]; ///< dither flags (dithflg) AVLFG dith_state; ///< for dither generation ///@} ///@defgroup imdct IMDCT int block_switch[AC3_MAX_CHANNELS]; ///< block switch flags (blksw) FFTContext imdct_512; ///< for 512 sample IMDCT FFTContext imdct_256; ///< for 256 sample IMDCT ///@} ///@defgroup opt optimization DSPContext dsp; ///< for optimization float add_bias; ///< offset for float_to_int16 conversion float mul_bias; ///< scaling for float_to_int16 conversion ///@} ///@defgroup arrays aligned arrays DECLARE_ALIGNED(16, int, fixed_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///> fixed-point transform coefficients DECLARE_ALIGNED(16, float, transform_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< transform coefficients DECLARE_ALIGNED(16, float, delay)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< delay - added to the next block DECLARE_ALIGNED(16, float, window)[AC3_BLOCK_SIZE]; ///< window coefficients DECLARE_ALIGNED(16, float, tmp_output)[AC3_BLOCK_SIZE]; ///< temporary storage for output before windowing DECLARE_ALIGNED(16, float, output)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< output after imdct transform and windowing ///@} } AC3DecodeContext; /** * Parse the E-AC-3 frame header. * This parses both the bit stream info and audio frame header. */ int ff_eac3_parse_header(AC3DecodeContext *s); /** * Decode mantissas in a single channel for the entire frame. * This is used when AHT mode is enabled. */ void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch); void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len); /** * Apply spectral extension to each channel by copying lower frequency * coefficients to higher frequency bins and applying side information to * approximate the original high frequency signal. */ void ff_eac3_apply_spectral_extension(AC3DecodeContext *s); #endif /* AVCODEC_AC3DEC_H */
123linslouis-android-video-cutter
jni/libavcodec/ac3dec.h
C
asf20
12,111
/* * VC-1 HW decode acceleration through VA API * * Copyright (C) 2008-2009 Splitted-Desktop Systems * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "vaapi_internal.h" #include "vc1.h" #include "vc1data.h" /** Translates FFmpeg MV modes to VA API */ static int get_VAMvModeVC1(enum MVModes mv_mode) { switch (mv_mode) { case MV_PMODE_1MV_HPEL_BILIN: return VAMvMode1MvHalfPelBilinear; case MV_PMODE_1MV: return VAMvMode1Mv; case MV_PMODE_1MV_HPEL: return VAMvMode1MvHalfPel; case MV_PMODE_MIXED_MV: return VAMvModeMixedMv; case MV_PMODE_INTENSITY_COMP: return VAMvModeIntensityCompensation; } return 0; } /** Checks whether the MVTYPEMB bitplane is present */ static inline int vc1_has_MVTYPEMB_bitplane(VC1Context *v) { if (v->mv_type_is_raw) return 0; return (v->s.pict_type == FF_P_TYPE && (v->mv_mode == MV_PMODE_MIXED_MV || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_MIXED_MV))); } /** Checks whether the SKIPMB bitplane is present */ static inline int vc1_has_SKIPMB_bitplane(VC1Context *v) { if (v->skip_is_raw) return 0; return (v->s.pict_type == FF_P_TYPE || (v->s.pict_type == FF_B_TYPE && !v->bi_type)); } /** Checks whether the DIRECTMB bitplane is present */ static inline int vc1_has_DIRECTMB_bitplane(VC1Context *v) { if (v->dmb_is_raw) return 0; return v->s.pict_type == FF_B_TYPE && !v->bi_type; } /** Checks whether the ACPRED bitplane is present */ static inline int vc1_has_ACPRED_bitplane(VC1Context *v) { if (v->acpred_is_raw) return 0; return (v->profile == PROFILE_ADVANCED && (v->s.pict_type == FF_I_TYPE || (v->s.pict_type == FF_B_TYPE && v->bi_type))); } /** Check whether the OVERFLAGS bitplane is present */ static inline int vc1_has_OVERFLAGS_bitplane(VC1Context *v) { if (v->overflg_is_raw) return 0; return (v->profile == PROFILE_ADVANCED && (v->s.pict_type == FF_I_TYPE || (v->s.pict_type == FF_B_TYPE && v->bi_type)) && (v->overlap && v->pq <= 8) && v->condover == CONDOVER_SELECT); } /** Reconstruct bitstream PTYPE (7.1.1.4, index into Table-35) */ static int vc1_get_PTYPE(VC1Context *v) { MpegEncContext * const s = &v->s; switch (s->pict_type) { case FF_I_TYPE: return 0; case FF_P_TYPE: return v->p_frame_skipped ? 4 : 1; case FF_B_TYPE: return v->bi_type ? 3 : 2; } return 0; } /** Reconstruct bitstream MVMODE (7.1.1.32) */ static inline VAMvModeVC1 vc1_get_MVMODE(VC1Context *v) { if (v->s.pict_type == FF_P_TYPE || (v->s.pict_type == FF_B_TYPE && !v->bi_type)) return get_VAMvModeVC1(v->mv_mode); return 0; } /** Reconstruct bitstream MVMODE2 (7.1.1.33) */ static inline VAMvModeVC1 vc1_get_MVMODE2(VC1Context *v) { if (v->s.pict_type == FF_P_TYPE && v->mv_mode == MV_PMODE_INTENSITY_COMP) return get_VAMvModeVC1(v->mv_mode2); return 0; } /** Pack FFmpeg bitplanes into a VABitPlaneBuffer element */ static inline void vc1_pack_bitplanes(uint8_t *bitplane, int n, const uint8_t *ff_bp[3], int x, int y, int stride) { const int bitplane_index = n / 2; const int ff_bp_index = y * stride + x; uint8_t v = 0; if (ff_bp[0]) v = ff_bp[0][ff_bp_index]; if (ff_bp[1]) v |= ff_bp[1][ff_bp_index] << 1; if (ff_bp[2]) v |= ff_bp[2][ff_bp_index] << 2; bitplane[bitplane_index] = (bitplane[bitplane_index] << 4) | v; } static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) { VC1Context * const v = avctx->priv_data; MpegEncContext * const s = &v->s; struct vaapi_context * const vactx = avctx->hwaccel_context; VAPictureParameterBufferVC1 *pic_param; dprintf(avctx, "vaapi_vc1_start_frame()\n"); vactx->slice_param_size = sizeof(VASliceParameterBufferVC1); /* Fill in VAPictureParameterBufferVC1 */ pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferVC1)); if (!pic_param) return -1; pic_param->forward_reference_picture = VA_INVALID_ID; pic_param->backward_reference_picture = VA_INVALID_ID; pic_param->inloop_decoded_picture = VA_INVALID_ID; pic_param->sequence_fields.value = 0; /* reset all bits */ pic_param->sequence_fields.bits.pulldown = v->broadcast; pic_param->sequence_fields.bits.interlace = v->interlace; pic_param->sequence_fields.bits.tfcntrflag = v->tfcntrflag; pic_param->sequence_fields.bits.finterpflag = v->finterpflag; pic_param->sequence_fields.bits.psf = v->psf; pic_param->sequence_fields.bits.multires = v->multires; pic_param->sequence_fields.bits.overlap = v->overlap; pic_param->sequence_fields.bits.syncmarker = s->resync_marker; pic_param->sequence_fields.bits.rangered = v->rangered; pic_param->sequence_fields.bits.max_b_frames = s->avctx->max_b_frames; pic_param->coded_width = s->avctx->coded_width; pic_param->coded_height = s->avctx->coded_height; pic_param->entrypoint_fields.value = 0; /* reset all bits */ pic_param->entrypoint_fields.bits.broken_link = v->broken_link; pic_param->entrypoint_fields.bits.closed_entry = v->closed_entry; pic_param->entrypoint_fields.bits.panscan_flag = v->panscanflag; pic_param->entrypoint_fields.bits.loopfilter = s->loop_filter; pic_param->conditional_overlap_flag = v->condover; pic_param->fast_uvmc_flag = v->fastuvmc; pic_param->range_mapping_fields.value = 0; /* reset all bits */ pic_param->range_mapping_fields.bits.luma_flag = v->range_mapy_flag; pic_param->range_mapping_fields.bits.luma = v->range_mapy; pic_param->range_mapping_fields.bits.chroma_flag = v->range_mapuv_flag; pic_param->range_mapping_fields.bits.chroma = v->range_mapuv; pic_param->b_picture_fraction = v->bfraction_lut_index; pic_param->cbp_table = v->cbpcy_vlc ? v->cbpcy_vlc - ff_vc1_cbpcy_p_vlc : 0; pic_param->mb_mode_table = 0; /* XXX: interlaced frame */ pic_param->range_reduction_frame = v->rangeredfrm; pic_param->rounding_control = v->rnd; pic_param->post_processing = v->postproc; pic_param->picture_resolution_index = v->respic; pic_param->luma_scale = v->lumscale; pic_param->luma_shift = v->lumshift; pic_param->picture_fields.value = 0; /* reset all bits */ pic_param->picture_fields.bits.picture_type = vc1_get_PTYPE(v); pic_param->picture_fields.bits.frame_coding_mode = v->fcm; pic_param->picture_fields.bits.top_field_first = v->tff; pic_param->picture_fields.bits.is_first_field = v->fcm == 0; /* XXX: interlaced frame */ pic_param->picture_fields.bits.intensity_compensation = v->mv_mode == MV_PMODE_INTENSITY_COMP; pic_param->raw_coding.value = 0; /* reset all bits */ pic_param->raw_coding.flags.mv_type_mb = v->mv_type_is_raw; pic_param->raw_coding.flags.direct_mb = v->dmb_is_raw; pic_param->raw_coding.flags.skip_mb = v->skip_is_raw; pic_param->raw_coding.flags.field_tx = 0; /* XXX: interlaced frame */ pic_param->raw_coding.flags.forward_mb = 0; /* XXX: interlaced frame */ pic_param->raw_coding.flags.ac_pred = v->acpred_is_raw; pic_param->raw_coding.flags.overflags = v->overflg_is_raw; pic_param->bitplane_present.value = 0; /* reset all bits */ pic_param->bitplane_present.flags.bp_mv_type_mb = vc1_has_MVTYPEMB_bitplane(v); pic_param->bitplane_present.flags.bp_direct_mb = vc1_has_DIRECTMB_bitplane(v); pic_param->bitplane_present.flags.bp_skip_mb = vc1_has_SKIPMB_bitplane(v); pic_param->bitplane_present.flags.bp_field_tx = 0; /* XXX: interlaced frame */ pic_param->bitplane_present.flags.bp_forward_mb = 0; /* XXX: interlaced frame */ pic_param->bitplane_present.flags.bp_ac_pred = vc1_has_ACPRED_bitplane(v); pic_param->bitplane_present.flags.bp_overflags = vc1_has_OVERFLAGS_bitplane(v); pic_param->reference_fields.value = 0; /* reset all bits */ pic_param->reference_fields.bits.reference_distance_flag = v->refdist_flag; pic_param->reference_fields.bits.reference_distance = 0; /* XXX: interlaced frame */ pic_param->reference_fields.bits.num_reference_pictures = 0; /* XXX: interlaced frame */ pic_param->reference_fields.bits.reference_field_pic_indicator = 0; /* XXX: interlaced frame */ pic_param->mv_fields.value = 0; /* reset all bits */ pic_param->mv_fields.bits.mv_mode = vc1_get_MVMODE(v); pic_param->mv_fields.bits.mv_mode2 = vc1_get_MVMODE2(v); pic_param->mv_fields.bits.mv_table = s->mv_table_index; pic_param->mv_fields.bits.two_mv_block_pattern_table = 0; /* XXX: interlaced frame */ pic_param->mv_fields.bits.four_mv_switch = 0; /* XXX: interlaced frame */ pic_param->mv_fields.bits.four_mv_block_pattern_table = 0; /* XXX: interlaced frame */ pic_param->mv_fields.bits.extended_mv_flag = v->extended_mv; pic_param->mv_fields.bits.extended_mv_range = v->mvrange; pic_param->mv_fields.bits.extended_dmv_flag = v->extended_dmv; pic_param->mv_fields.bits.extended_dmv_range = 0; /* XXX: interlaced frame */ pic_param->pic_quantizer_fields.value = 0; /* reset all bits */ pic_param->pic_quantizer_fields.bits.dquant = v->dquant; pic_param->pic_quantizer_fields.bits.quantizer = v->quantizer_mode; pic_param->pic_quantizer_fields.bits.half_qp = v->halfpq; pic_param->pic_quantizer_fields.bits.pic_quantizer_scale = v->pq; pic_param->pic_quantizer_fields.bits.pic_quantizer_type = v->pquantizer; pic_param->pic_quantizer_fields.bits.dq_frame = v->dquantfrm; pic_param->pic_quantizer_fields.bits.dq_profile = v->dqprofile; pic_param->pic_quantizer_fields.bits.dq_sb_edge = v->dqprofile == DQPROFILE_SINGLE_EDGE ? v->dqsbedge : 0; pic_param->pic_quantizer_fields.bits.dq_db_edge = v->dqprofile == DQPROFILE_DOUBLE_EDGES ? v->dqsbedge : 0; pic_param->pic_quantizer_fields.bits.dq_binary_level = v->dqbilevel; pic_param->pic_quantizer_fields.bits.alt_pic_quantizer = v->altpq; pic_param->transform_fields.value = 0; /* reset all bits */ pic_param->transform_fields.bits.variable_sized_transform_flag = v->vstransform; pic_param->transform_fields.bits.mb_level_transform_type_flag = v->ttmbf; pic_param->transform_fields.bits.frame_level_transform_type = v->ttfrm; pic_param->transform_fields.bits.transform_ac_codingset_idx1 = v->c_ac_table_index; pic_param->transform_fields.bits.transform_ac_codingset_idx2 = v->y_ac_table_index; pic_param->transform_fields.bits.intra_transform_dc_table = v->s.dc_table_index; switch (s->pict_type) { case FF_B_TYPE: pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture); // fall-through case FF_P_TYPE: pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture); break; } if (pic_param->bitplane_present.value) { uint8_t *bitplane; const uint8_t *ff_bp[3]; int x, y, n; switch (s->pict_type) { case FF_P_TYPE: ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL; ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL; ff_bp[2] = pic_param->bitplane_present.flags.bp_mv_type_mb ? v->mv_type_mb_plane : NULL; break; case FF_B_TYPE: if (!v->bi_type) { ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL; ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL; ff_bp[2] = NULL; /* XXX: interlaced frame (FORWARD plane) */ break; } /* fall-through (BI-type) */ case FF_I_TYPE: ff_bp[0] = NULL; /* XXX: interlaced frame (FIELDTX plane) */ ff_bp[1] = pic_param->bitplane_present.flags.bp_ac_pred ? v->acpred_plane : NULL; ff_bp[2] = pic_param->bitplane_present.flags.bp_overflags ? v->over_flags_plane : NULL; break; default: ff_bp[0] = NULL; ff_bp[1] = NULL; ff_bp[2] = NULL; break; } bitplane = ff_vaapi_alloc_bitplane(vactx, (s->mb_width * s->mb_height + 1) / 2); if (!bitplane) return -1; n = 0; for (y = 0; y < s->mb_height; y++) for (x = 0; x < s->mb_width; x++, n++) vc1_pack_bitplanes(bitplane, n, ff_bp, x, y, s->mb_stride); if (n & 1) /* move last nibble to the high order */ bitplane[n/2] <<= 4; } return 0; } static int vaapi_vc1_end_frame(AVCodecContext *avctx) { VC1Context * const v = avctx->priv_data; return ff_vaapi_common_end_frame(&v->s); } static int vaapi_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { VC1Context * const v = avctx->priv_data; MpegEncContext * const s = &v->s; VASliceParameterBufferVC1 *slice_param; dprintf(avctx, "vaapi_vc1_decode_slice(): buffer %p, size %d\n", buffer, size); /* Current bit buffer is beyond any marker for VC-1, so skip it */ if (avctx->codec_id == CODEC_ID_VC1 && IS_MARKER(AV_RB32(buffer))) { buffer += 4; size -= 4; } /* Fill in VASliceParameterBufferVC1 */ slice_param = (VASliceParameterBufferVC1 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size); if (!slice_param) return -1; slice_param->macroblock_offset = get_bits_count(&s->gb); slice_param->slice_vertical_position = s->mb_y; return 0; } #if CONFIG_WMV3_VAAPI_HWACCEL AVHWAccel wmv3_vaapi_hwaccel = { .name = "wmv3_vaapi", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_WMV3, .pix_fmt = PIX_FMT_VAAPI_VLD, .capabilities = 0, .start_frame = vaapi_vc1_start_frame, .end_frame = vaapi_vc1_end_frame, .decode_slice = vaapi_vc1_decode_slice, .priv_data_size = 0, }; #endif AVHWAccel vc1_vaapi_hwaccel = { .name = "vc1_vaapi", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_VC1, .pix_fmt = PIX_FMT_VAAPI_VLD, .capabilities = 0, .start_frame = vaapi_vc1_start_frame, .end_frame = vaapi_vc1_end_frame, .decode_slice = vaapi_vc1_decode_slice, .priv_data_size = 0, };
123linslouis-android-video-cutter
jni/libavcodec/vaapi_vc1.c
C
asf20
17,323
/* * Quicktime Video (RPZA) Video Decoder * Copyright (C) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * QT RPZA Video Decoder by Roberto Togni * For more information about the RPZA format, visit: * http://www.pcisys.net/~melanson/codecs/ * * The RPZA decoder outputs RGB555 colorspace data. * * Note that this decoder reads big endian RGB555 pixel values from the * bytestream, arranges them in the host's endian order, and outputs * them to the final rendered map in the same host endian order. This is * intended behavior as the ffmpeg documentation states that RGB555 pixels * shall be stored in native CPU endianness. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "libavutil/intreadwrite.h" #include "avcodec.h" typedef struct RpzaContext { AVCodecContext *avctx; AVFrame frame; const unsigned char *buf; int size; } RpzaContext; #define ADVANCE_BLOCK() \ { \ pixel_ptr += 4; \ if (pixel_ptr >= width) \ { \ pixel_ptr = 0; \ row_ptr += stride * 4; \ } \ total_blocks--; \ if (total_blocks < 0) \ { \ av_log(s->avctx, AV_LOG_ERROR, "warning: block counter just went negative (this should not happen)\n"); \ return; \ } \ } static void rpza_decode_stream(RpzaContext *s) { int width = s->avctx->width; int stride = s->frame.linesize[0] / 2; int row_inc = stride - 4; int stream_ptr = 0; int chunk_size; unsigned char opcode; int n_blocks; unsigned short colorA = 0, colorB; unsigned short color4[4]; unsigned char index, idx; unsigned short ta, tb; unsigned short *pixels = (unsigned short *)s->frame.data[0]; int row_ptr = 0; int pixel_ptr = 0; int block_ptr; int pixel_x, pixel_y; int total_blocks; /* First byte is always 0xe1. Warn if it's different */ if (s->buf[stream_ptr] != 0xe1) av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0xe1\n", s->buf[stream_ptr]); /* Get chunk size, ingnoring first byte */ chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF; stream_ptr += 4; /* If length mismatch use size from MOV file and try to decode anyway */ if (chunk_size != s->size) av_log(s->avctx, AV_LOG_ERROR, "MOV chunk size != encoded chunk size; using MOV chunk size\n"); chunk_size = s->size; /* Number of 4x4 blocks in frame. */ total_blocks = ((s->avctx->width + 3) / 4) * ((s->avctx->height + 3) / 4); /* Process chunk data */ while (stream_ptr < chunk_size) { opcode = s->buf[stream_ptr++]; /* Get opcode */ n_blocks = (opcode & 0x1f) + 1; /* Extract block counter from opcode */ /* If opcode MSbit is 0, we need more data to decide what to do */ if ((opcode & 0x80) == 0) { colorA = (opcode << 8) | (s->buf[stream_ptr++]); opcode = 0; if ((s->buf[stream_ptr] & 0x80) != 0) { /* Must behave as opcode 110xxxxx, using colorA computed * above. Use fake opcode 0x20 to enter switch block at * the right place */ opcode = 0x20; n_blocks = 1; } } switch (opcode & 0xe0) { /* Skip blocks */ case 0x80: while (n_blocks--) { ADVANCE_BLOCK(); } break; /* Fill blocks with one color */ case 0xa0: colorA = AV_RB16 (&s->buf[stream_ptr]); stream_ptr += 2; while (n_blocks--) { block_ptr = row_ptr + pixel_ptr; for (pixel_y = 0; pixel_y < 4; pixel_y++) { for (pixel_x = 0; pixel_x < 4; pixel_x++){ pixels[block_ptr] = colorA; block_ptr++; } block_ptr += row_inc; } ADVANCE_BLOCK(); } break; /* Fill blocks with 4 colors */ case 0xc0: colorA = AV_RB16 (&s->buf[stream_ptr]); stream_ptr += 2; case 0x20: colorB = AV_RB16 (&s->buf[stream_ptr]); stream_ptr += 2; /* sort out the colors */ color4[0] = colorB; color4[1] = 0; color4[2] = 0; color4[3] = colorA; /* red components */ ta = (colorA >> 10) & 0x1F; tb = (colorB >> 10) & 0x1F; color4[1] |= ((11 * ta + 21 * tb) >> 5) << 10; color4[2] |= ((21 * ta + 11 * tb) >> 5) << 10; /* green components */ ta = (colorA >> 5) & 0x1F; tb = (colorB >> 5) & 0x1F; color4[1] |= ((11 * ta + 21 * tb) >> 5) << 5; color4[2] |= ((21 * ta + 11 * tb) >> 5) << 5; /* blue components */ ta = colorA & 0x1F; tb = colorB & 0x1F; color4[1] |= ((11 * ta + 21 * tb) >> 5); color4[2] |= ((21 * ta + 11 * tb) >> 5); while (n_blocks--) { block_ptr = row_ptr + pixel_ptr; for (pixel_y = 0; pixel_y < 4; pixel_y++) { index = s->buf[stream_ptr++]; for (pixel_x = 0; pixel_x < 4; pixel_x++){ idx = (index >> (2 * (3 - pixel_x))) & 0x03; pixels[block_ptr] = color4[idx]; block_ptr++; } block_ptr += row_inc; } ADVANCE_BLOCK(); } break; /* Fill block with 16 colors */ case 0x00: block_ptr = row_ptr + pixel_ptr; for (pixel_y = 0; pixel_y < 4; pixel_y++) { for (pixel_x = 0; pixel_x < 4; pixel_x++){ /* We already have color of upper left pixel */ if ((pixel_y != 0) || (pixel_x !=0)) { colorA = AV_RB16 (&s->buf[stream_ptr]); stream_ptr += 2; } pixels[block_ptr] = colorA; block_ptr++; } block_ptr += row_inc; } ADVANCE_BLOCK(); break; /* Unknown opcode */ default: av_log(s->avctx, AV_LOG_ERROR, "Unknown opcode %d in rpza chunk." " Skip remaining %d bytes of chunk data.\n", opcode, chunk_size - stream_ptr); return; } /* Opcode switch */ } } static av_cold int rpza_decode_init(AVCodecContext *avctx) { RpzaContext *s = avctx->priv_data; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_RGB555; s->frame.data[0] = NULL; return 0; } static int rpza_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; RpzaContext *s = avctx->priv_data; s->buf = buf; s->size = buf_size; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame)) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } rpza_decode_stream(s); *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; /* always report that the buffer was completely consumed */ return buf_size; } static av_cold int rpza_decode_end(AVCodecContext *avctx) { RpzaContext *s = avctx->priv_data; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); return 0; } AVCodec rpza_decoder = { "rpza", AVMEDIA_TYPE_VIDEO, CODEC_ID_RPZA, sizeof(RpzaContext), rpza_decode_init, NULL, rpza_decode_end, rpza_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("QuickTime video (RPZA)"), };
123linslouis-android-video-cutter
jni/libavcodec/rpza.c
C
asf20
8,784
/* * MPEG1/2 encoder * Copyright (c) 2000,2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MPEG1/2 encoder */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "mpeg12.h" #include "mpeg12data.h" #include "bytestream.h" static const uint8_t inv_non_linear_qscale[13] = { 0, 2, 4, 6, 8, 9,10,11,12,13,14,15,16, }; static const uint8_t svcd_scan_offset_placeholder[14] = { 0x10, 0x0E, 0x00, 0x80, 0x81, 0x00, 0x80, 0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; static void mpeg1_encode_block(MpegEncContext *s, DCTELEM *block, int component); static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code); // RAL: f_code parameter added static uint8_t mv_penalty[MAX_FCODE+1][MAX_MV*2+1]; static uint8_t fcode_tab[MAX_MV*2+1]; static uint8_t uni_mpeg1_ac_vlc_len [64*64*2]; static uint8_t uni_mpeg2_ac_vlc_len [64*64*2]; /* simple include everything table for dc, first byte is bits number next 3 are code*/ static uint32_t mpeg1_lum_dc_uni[512]; static uint32_t mpeg1_chr_dc_uni[512]; static uint8_t mpeg1_index_run[2][64]; static int8_t mpeg1_max_level[2][64]; static void init_uni_ac_vlc(RLTable *rl, uint8_t *uni_ac_vlc_len){ int i; for(i=0; i<128; i++){ int level= i-64; int run; for(run=0; run<64; run++){ int len, bits, code; int alevel= FFABS(level); int sign= (level>>31)&1; if (alevel > rl->max_level[0][run]) code= 111; /*rl->n*/ else code= rl->index_run[0][run] + alevel - 1; if (code < 111 /* rl->n */) { /* store the vlc & sign at once */ len= rl->table_vlc[code][1]+1; bits= (rl->table_vlc[code][0]<<1) + sign; } else { len= rl->table_vlc[111/*rl->n*/][1]+6; bits= rl->table_vlc[111/*rl->n*/][0]<<6; bits|= run; if (alevel < 128) { bits<<=8; len+=8; bits|= level & 0xff; } else { bits<<=16; len+=16; bits|= level & 0xff; if (level < 0) { bits|= 0x8001 + level + 255; } else { bits|= level & 0xffff; } } } uni_ac_vlc_len [UNI_AC_ENC_INDEX(run, i)]= len; } } } static int find_frame_rate_index(MpegEncContext *s){ int i; int64_t dmin= INT64_MAX; int64_t d; for(i=1;i<14;i++) { int64_t n0= 1001LL/ff_frame_rate_tab[i].den*ff_frame_rate_tab[i].num*s->avctx->time_base.num; int64_t n1= 1001LL*s->avctx->time_base.den; if(s->avctx->strict_std_compliance > FF_COMPLIANCE_INOFFICIAL && i>=9) break; d = FFABS(n0 - n1); if(d < dmin){ dmin=d; s->frame_rate_index= i; } } if(dmin) return -1; else return 0; } static av_cold int encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; if(MPV_encode_init(avctx) < 0) return -1; if(find_frame_rate_index(s) < 0){ if(s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){ av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps\n", avctx->time_base.den, avctx->time_base.num); return -1; }else{ av_log(avctx, AV_LOG_INFO, "MPEG1/2 does not support %d/%d fps, there may be AV sync issues\n", avctx->time_base.den, avctx->time_base.num); } } if(avctx->profile == FF_PROFILE_UNKNOWN){ if(avctx->level != FF_LEVEL_UNKNOWN){ av_log(avctx, AV_LOG_ERROR, "Set profile and level\n"); return -1; } avctx->profile = s->chroma_format == CHROMA_420 ? 4 : 0; /* Main or 4:2:2 */ } if(avctx->level == FF_LEVEL_UNKNOWN){ if(avctx->profile == 0){ /* 4:2:2 */ if(avctx->width <= 720 && avctx->height <= 608) avctx->level = 5; /* Main */ else avctx->level = 2; /* High */ }else{ if(avctx->profile != 1 && s->chroma_format != CHROMA_420){ av_log(avctx, AV_LOG_ERROR, "Only High(1) and 4:2:2(0) profiles support 4:2:2 color sampling\n"); return -1; } if(avctx->width <= 720 && avctx->height <= 576) avctx->level = 8; /* Main */ else if(avctx->width <= 1440) avctx->level = 6; /* High 1440 */ else avctx->level = 4; /* High */ } } if((avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) && s->frame_rate_index != 4){ av_log(avctx, AV_LOG_ERROR, "Drop frame time code only allowed with 1001/30000 fps\n"); return -1; } return 0; } static void put_header(MpegEncContext *s, int header) { align_put_bits(&s->pb); put_bits(&s->pb, 16, header>>16); put_sbits(&s->pb, 16, header); } /* put sequence header if needed */ static void mpeg1_encode_sequence_header(MpegEncContext *s) { unsigned int vbv_buffer_size; unsigned int fps, v; int i; uint64_t time_code; float best_aspect_error= 1E10; float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio); int constraint_parameter_flag; if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA) if (s->current_picture.key_frame) { AVRational framerate= ff_frame_rate_tab[s->frame_rate_index]; /* mpeg1 header repeated every gop */ put_header(s, SEQ_START_CODE); put_sbits(&s->pb, 12, s->width ); put_sbits(&s->pb, 12, s->height); for(i=1; i<15; i++){ float error= aspect_ratio; if(s->codec_id == CODEC_ID_MPEG1VIDEO || i <=1) error-= 1.0/ff_mpeg1_aspect[i]; else error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width; error= FFABS(error); if(error < best_aspect_error){ best_aspect_error= error; s->aspect_ratio_info= i; } } put_bits(&s->pb, 4, s->aspect_ratio_info); put_bits(&s->pb, 4, s->frame_rate_index); if(s->avctx->rc_max_rate){ v = (s->avctx->rc_max_rate + 399) / 400; if (v > 0x3ffff && s->codec_id == CODEC_ID_MPEG1VIDEO) v = 0x3ffff; }else{ v= 0x3FFFF; } if(s->avctx->rc_buffer_size) vbv_buffer_size = s->avctx->rc_buffer_size; else /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */ vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024; vbv_buffer_size= (vbv_buffer_size + 16383) / 16384; put_sbits(&s->pb, 18, v); put_bits(&s->pb, 1, 1); /* marker */ put_sbits(&s->pb, 10, vbv_buffer_size); constraint_parameter_flag= s->width <= 768 && s->height <= 576 && s->mb_width * s->mb_height <= 396 && s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 && framerate.num <= framerate.den*30 && s->avctx->me_range && s->avctx->me_range < 128 && vbv_buffer_size <= 20 && v <= 1856000/400 && s->codec_id == CODEC_ID_MPEG1VIDEO; put_bits(&s->pb, 1, constraint_parameter_flag); ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); if(s->codec_id == CODEC_ID_MPEG2VIDEO){ put_header(s, EXT_START_CODE); put_bits(&s->pb, 4, 1); //seq ext put_bits(&s->pb, 1, s->avctx->profile == 0); //escx 1 for 4:2:2 profile */ put_bits(&s->pb, 3, s->avctx->profile); //profile put_bits(&s->pb, 4, s->avctx->level); //level put_bits(&s->pb, 1, s->progressive_sequence); put_bits(&s->pb, 2, s->chroma_format); put_bits(&s->pb, 2, s->width >>12); put_bits(&s->pb, 2, s->height>>12); put_bits(&s->pb, 12, v>>18); //bitrate ext put_bits(&s->pb, 1, 1); //marker put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext put_bits(&s->pb, 1, s->low_delay); put_bits(&s->pb, 2, 0); // frame_rate_ext_n put_bits(&s->pb, 5, 0); // frame_rate_ext_d } put_header(s, GOP_START_CODE); put_bits(&s->pb, 1, !!(s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE)); /* drop frame flag */ /* time code : we must convert from the real frame rate to a fake mpeg frame rate in case of low frame rate */ fps = (framerate.num + framerate.den/2)/ framerate.den; time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start; s->gop_picture_number = s->current_picture_ptr->coded_picture_number; if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) { /* only works for NTSC 29.97 */ int d = time_code / 17982; int m = time_code % 17982; //if (m < 2) m += 2; /* not needed since -2,-1 / 1798 in C returns 0 */ time_code += 18 * d + 2 * ((m - 2) / 1798); } put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24)); put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60)); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60)); put_bits(&s->pb, 6, (uint32_t)((time_code % fps))); put_bits(&s->pb, 1, !!(s->flags & CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); /* broken link */ } } static inline void encode_mb_skip_run(MpegEncContext *s, int run){ while (run >= 33) { put_bits(&s->pb, 11, 0x008); run -= 33; } put_bits(&s->pb, ff_mpeg12_mbAddrIncrTable[run][1], ff_mpeg12_mbAddrIncrTable[run][0]); } static av_always_inline void put_qscale(MpegEncContext *s) { if(s->q_scale_type){ assert(s->qscale>=1 && s->qscale <=12); put_bits(&s->pb, 5, inv_non_linear_qscale[s->qscale]); }else{ put_bits(&s->pb, 5, s->qscale); } } void ff_mpeg1_encode_slice_header(MpegEncContext *s){ if (s->height > 2800) { put_header(s, SLICE_MIN_START_CODE + (s->mb_y & 127)); put_bits(&s->pb, 3, s->mb_y >> 7); /* slice_vertical_position_extension */ } else { put_header(s, SLICE_MIN_START_CODE + s->mb_y); } put_qscale(s); put_bits(&s->pb, 1, 0); /* slice extra information */ } void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) { mpeg1_encode_sequence_header(s); /* mpeg1 picture header */ put_header(s, PICTURE_START_CODE); /* temporal reference */ // RAL: s->picture_number instead of s->fake_picture_number put_bits(&s->pb, 10, (s->picture_number - s->gop_picture_number) & 0x3ff); put_bits(&s->pb, 3, s->pict_type); s->vbv_delay_ptr= s->pb.buf + put_bits_count(&s->pb)/8; put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */ // RAL: Forward f_code also needed for B frames if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) { put_bits(&s->pb, 1, 0); /* half pel coordinates */ if(s->codec_id == CODEC_ID_MPEG1VIDEO) put_bits(&s->pb, 3, s->f_code); /* forward_f_code */ else put_bits(&s->pb, 3, 7); /* forward_f_code */ } // RAL: Backward f_code necessary for B frames if (s->pict_type == FF_B_TYPE) { put_bits(&s->pb, 1, 0); /* half pel coordinates */ if(s->codec_id == CODEC_ID_MPEG1VIDEO) put_bits(&s->pb, 3, s->b_code); /* backward_f_code */ else put_bits(&s->pb, 3, 7); /* backward_f_code */ } put_bits(&s->pb, 1, 0); /* extra bit picture */ s->frame_pred_frame_dct = 1; if(s->codec_id == CODEC_ID_MPEG2VIDEO){ put_header(s, EXT_START_CODE); put_bits(&s->pb, 4, 8); //pic ext if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) { put_bits(&s->pb, 4, s->f_code); put_bits(&s->pb, 4, s->f_code); }else{ put_bits(&s->pb, 8, 255); } if (s->pict_type == FF_B_TYPE) { put_bits(&s->pb, 4, s->b_code); put_bits(&s->pb, 4, s->b_code); }else{ put_bits(&s->pb, 8, 255); } put_bits(&s->pb, 2, s->intra_dc_precision); assert(s->picture_structure == PICT_FRAME); put_bits(&s->pb, 2, s->picture_structure); if (s->progressive_sequence) { put_bits(&s->pb, 1, 0); /* no repeat */ } else { put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); } /* XXX: optimize the generation of this flag with entropy measures */ s->frame_pred_frame_dct = s->progressive_sequence; put_bits(&s->pb, 1, s->frame_pred_frame_dct); put_bits(&s->pb, 1, s->concealment_motion_vectors); put_bits(&s->pb, 1, s->q_scale_type); put_bits(&s->pb, 1, s->intra_vlc_format); put_bits(&s->pb, 1, s->alternate_scan); put_bits(&s->pb, 1, s->repeat_first_field); s->progressive_frame = s->progressive_sequence; put_bits(&s->pb, 1, s->chroma_format == CHROMA_420 ? s->progressive_frame : 0); /* chroma_420_type */ put_bits(&s->pb, 1, s->progressive_frame); put_bits(&s->pb, 1, 0); //composite_display_flag } if(s->flags & CODEC_FLAG_SVCD_SCAN_OFFSET){ int i; put_header(s, USER_START_CODE); for(i=0; i<sizeof(svcd_scan_offset_placeholder); i++){ put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]); } } s->mb_y=0; ff_mpeg1_encode_slice_header(s); } static inline void put_mb_modes(MpegEncContext *s, int n, int bits, int has_mv, int field_motion) { put_bits(&s->pb, n, bits); if (!s->frame_pred_frame_dct) { if (has_mv) put_bits(&s->pb, 2, 2 - field_motion); /* motion_type: frame/field */ put_bits(&s->pb, 1, s->interlaced_dct); } } static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y, int mb_block_count) { int i, cbp; const int mb_x = s->mb_x; const int mb_y = s->mb_y; const int first_mb= mb_x == s->resync_mb_x && mb_y == s->resync_mb_y; /* compute cbp */ cbp = 0; for(i=0;i<mb_block_count;i++) { if (s->block_last_index[i] >= 0) cbp |= 1 << (mb_block_count - 1 - i); } if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) && ((s->pict_type == FF_P_TYPE && (motion_x | motion_y) == 0) || (s->pict_type == FF_B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { s->mb_skip_run++; s->qscale -= s->dquant; s->skip_count++; s->misc_bits++; s->last_bits++; if(s->pict_type == FF_P_TYPE){ s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0; } } else { if(first_mb){ assert(s->mb_skip_run == 0); encode_mb_skip_run(s, s->mb_x); }else{ encode_mb_skip_run(s, s->mb_skip_run); } if (s->pict_type == FF_I_TYPE) { if(s->dquant && cbp){ put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */ put_qscale(s); }else{ put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */ s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); s->i_count++; } else if (s->mb_intra) { if(s->dquant && cbp){ put_mb_modes(s, 6, 0x01, 0, 0); put_qscale(s); }else{ put_mb_modes(s, 5, 0x03, 0, 0); s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); s->i_count++; memset(s->last_mv, 0, sizeof(s->last_mv)); } else if (s->pict_type == FF_P_TYPE) { if(s->mv_type == MV_TYPE_16X16){ if (cbp != 0) { if ((motion_x|motion_y) == 0) { if(s->dquant){ put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */ put_qscale(s); }else{ put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */ } s->misc_bits+= get_bits_diff(s); } else { if(s->dquant){ put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */ put_qscale(s); }else{ put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */ } s->misc_bits+= get_bits_diff(s); mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added s->mv_bits+= get_bits_diff(s); } } else { put_bits(&s->pb, 3, 1); /* motion only */ if (!s->frame_pred_frame_dct) put_bits(&s->pb, 2, 2); /* motion_type: frame */ s->misc_bits+= get_bits_diff(s); mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added s->qscale -= s->dquant; s->mv_bits+= get_bits_diff(s); } s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x; s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y; }else{ assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD); if (cbp) { if(s->dquant){ put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */ put_qscale(s); }else{ put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */ } } else { put_bits(&s->pb, 3, 1); /* motion only */ put_bits(&s->pb, 2, 1); /* motion_type: field */ s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); for(i=0; i<2; i++){ put_bits(&s->pb, 1, s->field_select[0][i]); mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); s->last_mv[0][i][0]= s->mv[0][i][0]; s->last_mv[0][i][1]= 2*s->mv[0][i][1]; } s->mv_bits+= get_bits_diff(s); } if(cbp) { if (s->chroma_y_shift) { put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp][1], ff_mpeg12_mbPatTable[cbp][0]); } else { put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp>>2][1], ff_mpeg12_mbPatTable[cbp>>2][0]); put_sbits(&s->pb, 2, cbp); } } s->f_count++; } else{ if(s->mv_type == MV_TYPE_16X16){ if (cbp){ // With coded bloc pattern if (s->dquant) { if(s->mv_dir == MV_DIR_FORWARD) put_mb_modes(s, 6, 3, 1, 0); else put_mb_modes(s, 8-s->mv_dir, 2, 1, 0); put_qscale(s); } else { put_mb_modes(s, 5-s->mv_dir, 3, 1, 0); } }else{ // No coded bloc pattern put_bits(&s->pb, 5-s->mv_dir, 2); if (!s->frame_pred_frame_dct) put_bits(&s->pb, 2, 2); /* motion_type: frame */ s->qscale -= s->dquant; } s->misc_bits += get_bits_diff(s); if (s->mv_dir&MV_DIR_FORWARD){ mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0]; s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1]; s->f_count++; } if (s->mv_dir&MV_DIR_BACKWARD){ mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0]; s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1]; s->b_count++; } }else{ assert(s->mv_type == MV_TYPE_FIELD); assert(!s->frame_pred_frame_dct); if (cbp){ // With coded bloc pattern if (s->dquant) { if(s->mv_dir == MV_DIR_FORWARD) put_mb_modes(s, 6, 3, 1, 1); else put_mb_modes(s, 8-s->mv_dir, 2, 1, 1); put_qscale(s); } else { put_mb_modes(s, 5-s->mv_dir, 3, 1, 1); } }else{ // No coded bloc pattern put_bits(&s->pb, 5-s->mv_dir, 2); put_bits(&s->pb, 2, 1); /* motion_type: field */ s->qscale -= s->dquant; } s->misc_bits += get_bits_diff(s); if (s->mv_dir&MV_DIR_FORWARD){ for(i=0; i<2; i++){ put_bits(&s->pb, 1, s->field_select[0][i]); mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); s->last_mv[0][i][0]= s->mv[0][i][0]; s->last_mv[0][i][1]= 2*s->mv[0][i][1]; } s->f_count++; } if (s->mv_dir&MV_DIR_BACKWARD){ for(i=0; i<2; i++){ put_bits(&s->pb, 1, s->field_select[1][i]); mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code); mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code); s->last_mv[1][i][0]= s->mv[1][i][0]; s->last_mv[1][i][1]= 2*s->mv[1][i][1]; } s->b_count++; } } s->mv_bits += get_bits_diff(s); if(cbp) { if (s->chroma_y_shift) { put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp][1], ff_mpeg12_mbPatTable[cbp][0]); } else { put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp>>2][1], ff_mpeg12_mbPatTable[cbp>>2][0]); put_sbits(&s->pb, 2, cbp); } } } for(i=0;i<mb_block_count;i++) { if (cbp & (1 << (mb_block_count - 1 - i))) { mpeg1_encode_block(s, block[i], i); } } s->mb_skip_run = 0; if(s->mb_intra) s->i_tex_bits+= get_bits_diff(s); else s->p_tex_bits+= get_bits_diff(s); } } void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y) { if (s->chroma_format == CHROMA_420) mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6); else mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8); } // RAL: Parameter added: f_or_b_code static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code) { if (val == 0) { /* zero vector */ put_bits(&s->pb, ff_mpeg12_mbMotionVectorTable[0][1], ff_mpeg12_mbMotionVectorTable[0][0]); } else { int code, sign, bits; int bit_size = f_or_b_code - 1; int range = 1 << bit_size; /* modulo encoding */ int l= INT_BIT - 5 - bit_size; val= (val<<l)>>l; if (val >= 0) { val--; code = (val >> bit_size) + 1; bits = val & (range - 1); sign = 0; } else { val = -val; val--; code = (val >> bit_size) + 1; bits = val & (range - 1); sign = 1; } assert(code > 0 && code <= 16); put_bits(&s->pb, ff_mpeg12_mbMotionVectorTable[code][1], ff_mpeg12_mbMotionVectorTable[code][0]); put_bits(&s->pb, 1, sign); if (bit_size > 0) { put_bits(&s->pb, bit_size, bits); } } } void ff_mpeg1_encode_init(MpegEncContext *s) { static int done=0; ff_mpeg12_common_init(s); if(!done){ int f_code; int mv; int i; done=1; init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]); init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]); for(i=0; i<64; i++) { mpeg1_max_level[0][i]= ff_rl_mpeg1.max_level[0][i]; mpeg1_index_run[0][i]= ff_rl_mpeg1.index_run[0][i]; } init_uni_ac_vlc(&ff_rl_mpeg1, uni_mpeg1_ac_vlc_len); if(s->intra_vlc_format) init_uni_ac_vlc(&ff_rl_mpeg2, uni_mpeg2_ac_vlc_len); /* build unified dc encoding tables */ for(i=-255; i<256; i++) { int adiff, index; int bits, code; int diff=i; adiff = FFABS(diff); if(diff<0) diff--; index = av_log2(2*adiff); bits= ff_mpeg12_vlc_dc_lum_bits[index] + index; code= (ff_mpeg12_vlc_dc_lum_code[index]<<index) + (diff & ((1 << index) - 1)); mpeg1_lum_dc_uni[i+255]= bits + (code<<8); bits= ff_mpeg12_vlc_dc_chroma_bits[index] + index; code= (ff_mpeg12_vlc_dc_chroma_code[index]<<index) + (diff & ((1 << index) - 1)); mpeg1_chr_dc_uni[i+255]= bits + (code<<8); } for(f_code=1; f_code<=MAX_FCODE; f_code++){ for(mv=-MAX_MV; mv<=MAX_MV; mv++){ int len; if(mv==0) len= ff_mpeg12_mbMotionVectorTable[0][1]; else{ int val, bit_size, range, code; bit_size = f_code - 1; range = 1 << bit_size; val=mv; if (val < 0) val = -val; val--; code = (val >> bit_size) + 1; if(code<17){ len= ff_mpeg12_mbMotionVectorTable[code][1] + 1 + bit_size; }else{ len= ff_mpeg12_mbMotionVectorTable[16][1] + 2 + bit_size; } } mv_penalty[f_code][mv+MAX_MV]= len; } } for(f_code=MAX_FCODE; f_code>0; f_code--){ for(mv=-(8<<f_code); mv<(8<<f_code); mv++){ fcode_tab[mv+MAX_MV]= f_code; } } } s->me.mv_penalty= mv_penalty; s->fcode_tab= fcode_tab; if(s->codec_id == CODEC_ID_MPEG1VIDEO){ s->min_qcoeff=-255; s->max_qcoeff= 255; }else{ s->min_qcoeff=-2047; s->max_qcoeff= 2047; } if (s->intra_vlc_format) { s->intra_ac_vlc_length= s->intra_ac_vlc_last_length= uni_mpeg2_ac_vlc_len; } else { s->intra_ac_vlc_length= s->intra_ac_vlc_last_length= uni_mpeg1_ac_vlc_len; } s->inter_ac_vlc_length= s->inter_ac_vlc_last_length= uni_mpeg1_ac_vlc_len; } static inline void encode_dc(MpegEncContext *s, int diff, int component) { if(((unsigned) (diff+255)) >= 511){ int index; if(diff<0){ index= av_log2_16bit(-2*diff); diff--; }else{ index= av_log2_16bit(2*diff); } if (component == 0) { put_bits( &s->pb, ff_mpeg12_vlc_dc_lum_bits[index] + index, (ff_mpeg12_vlc_dc_lum_code[index]<<index) + (diff & ((1 << index) - 1))); }else{ put_bits( &s->pb, ff_mpeg12_vlc_dc_chroma_bits[index] + index, (ff_mpeg12_vlc_dc_chroma_code[index]<<index) + (diff & ((1 << index) - 1))); } }else{ if (component == 0) { put_bits( &s->pb, mpeg1_lum_dc_uni[diff+255]&0xFF, mpeg1_lum_dc_uni[diff+255]>>8); } else { put_bits( &s->pb, mpeg1_chr_dc_uni[diff+255]&0xFF, mpeg1_chr_dc_uni[diff+255]>>8); } } } static void mpeg1_encode_block(MpegEncContext *s, DCTELEM *block, int n) { int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign; int code, component; const uint16_t (*table_vlc)[2] = ff_rl_mpeg1.table_vlc; last_index = s->block_last_index[n]; /* DC coef */ if (s->mb_intra) { component = (n <= 3 ? 0 : (n&1) + 1); dc = block[0]; /* overflow is impossible */ diff = dc - s->last_dc[component]; encode_dc(s, diff, component); s->last_dc[component] = dc; i = 1; if (s->intra_vlc_format) table_vlc = ff_rl_mpeg2.table_vlc; } else { /* encode the first coefficient : needs to be done here because it is handled slightly differently */ level = block[0]; if (abs(level) == 1) { code = ((uint32_t)level >> 31); /* the sign bit */ put_bits(&s->pb, 2, code | 0x02); i = 1; } else { i = 0; last_non_zero = -1; goto next_coef; } } /* now quantify & encode AC coefs */ last_non_zero = i - 1; for(;i<=last_index;i++) { j = s->intra_scantable.permutated[i]; level = block[j]; next_coef: #if 0 if (level != 0) dprintf(s->avctx, "level[%d]=%d\n", i, level); #endif /* encode using VLC */ if (level != 0) { run = i - last_non_zero - 1; alevel= level; MASK_ABS(sign, alevel) sign&=1; if (alevel <= mpeg1_max_level[0][run]){ code= mpeg1_index_run[0][run] + alevel - 1; /* store the vlc & sign at once */ put_bits(&s->pb, table_vlc[code][1]+1, (table_vlc[code][0]<<1) + sign); } else { /* escape seems to be pretty rare <5% so I do not optimize it */ put_bits(&s->pb, table_vlc[111][1], table_vlc[111][0]); /* escape: only clip in this case */ put_bits(&s->pb, 6, run); if(s->codec_id == CODEC_ID_MPEG1VIDEO){ if (alevel < 128) { put_sbits(&s->pb, 8, level); } else { if (level < 0) { put_bits(&s->pb, 16, 0x8001 + level + 255); } else { put_sbits(&s->pb, 16, level); } } }else{ put_sbits(&s->pb, 12, level); } } last_non_zero = i; } } /* end of block */ put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]); } AVCodec mpeg1video_encoder = { "mpeg1video", AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG1VIDEO, sizeof(MpegEncContext), encode_init, MPV_encode_picture, MPV_encode_end, .supported_framerates= ff_frame_rate_tab+1, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .capabilities= CODEC_CAP_DELAY, .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), }; AVCodec mpeg2video_encoder = { "mpeg2video", AVMEDIA_TYPE_VIDEO, CODEC_ID_MPEG2VIDEO, sizeof(MpegEncContext), encode_init, MPV_encode_picture, MPV_encode_end, .supported_framerates= ff_frame_rate_tab+1, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE}, .capabilities= CODEC_CAP_DELAY, .long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"), };
123linslouis-android-video-cutter
jni/libavcodec/mpeg12enc.c
C
asf20
35,270
/* * Copyright (c) 2002 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "mpegvideo.h" #include "msmpeg4data.h" #include "simple_idct.h" #include "wmv2.h" av_cold void ff_wmv2_common_init(Wmv2Context * w){ MpegEncContext * const s= &w->s; ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[0], wmv2_scantableA); ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[1], wmv2_scantableB); } static void wmv2_add_block(Wmv2Context *w, DCTELEM *block1, uint8_t *dst, int stride, int n){ MpegEncContext * const s= &w->s; if (s->block_last_index[n] >= 0) { switch(w->abt_type_table[n]){ case 0: s->dsp.idct_add (dst, stride, block1); break; case 1: ff_simple_idct84_add(dst , stride, block1); ff_simple_idct84_add(dst + 4*stride, stride, w->abt_block2[n]); s->dsp.clear_block(w->abt_block2[n]); break; case 2: ff_simple_idct48_add(dst , stride, block1); ff_simple_idct48_add(dst + 4 , stride, w->abt_block2[n]); s->dsp.clear_block(w->abt_block2[n]); break; default: av_log(s->avctx, AV_LOG_ERROR, "internal error in WMV2 abt\n"); } } } void ff_wmv2_add_mb(MpegEncContext *s, DCTELEM block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr){ Wmv2Context * const w= (Wmv2Context*)s; wmv2_add_block(w, block1[0], dest_y , s->linesize, 0); wmv2_add_block(w, block1[1], dest_y + 8 , s->linesize, 1); wmv2_add_block(w, block1[2], dest_y + 8*s->linesize, s->linesize, 2); wmv2_add_block(w, block1[3], dest_y + 8 + 8*s->linesize, s->linesize, 3); if(s->flags&CODEC_FLAG_GRAY) return; wmv2_add_block(w, block1[4], dest_cb , s->uvlinesize, 4); wmv2_add_block(w, block1[5], dest_cr , s->uvlinesize, 5); } void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func (*pix_op)[4], int motion_x, int motion_y, int h) { Wmv2Context * const w= (Wmv2Context*)s; uint8_t *ptr; int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize; int emu=0; dxy = ((motion_y & 1) << 1) | (motion_x & 1); dxy = 2*dxy + w->hshift; src_x = s->mb_x * 16 + (motion_x >> 1); src_y = s->mb_y * 16 + (motion_y >> 1); /* WARNING: do no forget half pels */ v_edge_pos = s->v_edge_pos; src_x = av_clip(src_x, -16, s->width); src_y = av_clip(src_y, -16, s->height); if(src_x<=-16 || src_x >= s->width) dxy &= ~3; if(src_y<=-16 || src_y >= s->height) dxy &= ~4; linesize = s->linesize; uvlinesize = s->uvlinesize; ptr = ref_picture[0] + (src_y * linesize) + src_x; if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos || src_y + h+1 >= v_edge_pos){ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19, src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer + 1 + s->linesize; emu=1; } } s->dsp.put_mspel_pixels_tab[dxy](dest_y , ptr , linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y+8 , ptr+8 , linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y +8*linesize, ptr +8*linesize, linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y+8+8*linesize, ptr+8+8*linesize, linesize); if(s->flags&CODEC_FLAG_GRAY) return; if (s->out_format == FMT_H263) { dxy = 0; if ((motion_x & 3) != 0) dxy |= 1; if ((motion_y & 3) != 0) dxy |= 2; mx = motion_x >> 2; my = motion_y >> 2; } else { mx = motion_x / 2; my = motion_y / 2; dxy = ((my & 1) << 1) | (mx & 1); mx >>= 1; my >>= 1; } src_x = s->mb_x * 8 + mx; src_y = s->mb_y * 8 + my; src_x = av_clip(src_x, -8, s->width >> 1); if (src_x == (s->width >> 1)) dxy &= ~1; src_y = av_clip(src_y, -8, s->height >> 1); if (src_y == (s->height >> 1)) dxy &= ~2; offset = (src_y * uvlinesize) + src_x; ptr = ref_picture[1] + offset; if(emu){ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1); ptr = ref_picture[2] + offset; if(emu){ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1); }
123linslouis-android-video-cutter
jni/libavcodec/wmv2.c
C
asf20
5,733
/* * JPEG-LS common code * Copyright (c) 2003 Michael Niedermayer * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * JPEG-LS common code. */ #ifndef AVCODEC_JPEGLS_H #define AVCODEC_JPEGLS_H #include "avcodec.h" typedef struct JpeglsContext{ AVCodecContext *avctx; AVFrame picture; }JpeglsContext; typedef struct JLSState{ int T1, T2, T3; int A[367], B[367], C[365], N[367]; int limit, reset, bpp, qbpp, maxval, range; int near, twonear; int run_index[3]; }JLSState; extern const uint8_t ff_log2_run[32]; /** * Calculate initial JPEG-LS parameters */ void ff_jpegls_init_state(JLSState *state); /** * Calculate quantized gradient value, used for context determination */ static inline int ff_jpegls_quantize(JLSState *s, int v){ //FIXME optimize if(v==0) return 0; if(v < 0){ if(v <= -s->T3) return -4; if(v <= -s->T2) return -3; if(v <= -s->T1) return -2; if(v < -s->near) return -1; return 0; }else{ if(v <= s->near) return 0; if(v < s->T1) return 1; if(v < s->T2) return 2; if(v < s->T3) return 3; return 4; } } /** * Calculate JPEG-LS codec values */ void ff_jpegls_reset_coding_parameters(JLSState *s, int reset_all); static inline void ff_jpegls_downscale_state(JLSState *state, int Q){ if(state->N[Q] == state->reset){ state->A[Q] >>=1; state->B[Q] >>=1; state->N[Q] >>=1; } state->N[Q]++; } static inline int ff_jpegls_update_state_regular(JLSState *state, int Q, int err){ state->A[Q] += FFABS(err); err *= state->twonear; state->B[Q] += err; ff_jpegls_downscale_state(state, Q); if(state->B[Q] <= -state->N[Q]) { state->B[Q]= FFMAX(state->B[Q] + state->N[Q], 1-state->N[Q]); if(state->C[Q] > -128) state->C[Q]--; }else if(state->B[Q] > 0){ state->B[Q]= FFMIN(state->B[Q] - state->N[Q], 0); if(state->C[Q] < 127) state->C[Q]++; } return err; } #define R(a, i ) (bits == 8 ? ((uint8_t*)(a))[i] : ((uint16_t*)(a))[i] ) #define W(a, i, v) (bits == 8 ? (((uint8_t*)(a))[i]=v) : (((uint16_t*)(a))[i]=v)) #endif /* AVCODEC_JPEGLS_H */
123linslouis-android-video-cutter
jni/libavcodec/jpegls.h
C
asf20
2,994
/* * H.263 decoder * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.263 decoder. */ #include "internal.h" #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "h263.h" #include "h263_parser.h" #include "mpeg4video_parser.h" #include "msmpeg4.h" #include "vdpau_internal.h" #include "flv.h" #include "mpeg4video.h" //#define DEBUG //#define PRINT_FRAME_TIME av_cold int ff_h263_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; s->avctx = avctx; s->out_format = FMT_H263; s->width = avctx->coded_width; s->height = avctx->coded_height; s->workaround_bugs= avctx->workaround_bugs; // set defaults MPV_decode_defaults(s); s->quant_precision=5; s->decode_mb= ff_h263_decode_mb; s->low_delay= 1; avctx->pix_fmt= avctx->get_format(avctx, avctx->codec->pix_fmts); s->unrestricted_mv= 1; /* select sub codec */ switch(avctx->codec->id) { case CODEC_ID_H263: s->unrestricted_mv= 0; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; break; case CODEC_ID_MPEG4: break; case CODEC_ID_MSMPEG4V1: s->h263_msmpeg4 = 1; s->h263_pred = 1; s->msmpeg4_version=1; break; case CODEC_ID_MSMPEG4V2: s->h263_msmpeg4 = 1; s->h263_pred = 1; s->msmpeg4_version=2; break; case CODEC_ID_MSMPEG4V3: s->h263_msmpeg4 = 1; s->h263_pred = 1; s->msmpeg4_version=3; break; case CODEC_ID_WMV1: s->h263_msmpeg4 = 1; s->h263_pred = 1; s->msmpeg4_version=4; break; case CODEC_ID_WMV2: s->h263_msmpeg4 = 1; s->h263_pred = 1; s->msmpeg4_version=5; break; case CODEC_ID_VC1: case CODEC_ID_WMV3: s->h263_msmpeg4 = 1; s->h263_pred = 1; s->msmpeg4_version=6; avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break; case CODEC_ID_H263I: break; case CODEC_ID_FLV1: s->h263_flv = 1; break; default: return -1; } s->codec_id= avctx->codec->id; avctx->hwaccel= ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); /* for h263, we allocate the images after having read the header */ if (avctx->codec->id != CODEC_ID_H263 && avctx->codec->id != CODEC_ID_MPEG4) if (MPV_common_init(s) < 0) return -1; h263_decode_init_vlc(s); return 0; } av_cold int ff_h263_decode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; MPV_common_end(s); return 0; } /** * returns the number of bytes consumed for building the current frame */ static int get_consumed_bytes(MpegEncContext *s, int buf_size){ int pos= (get_bits_count(&s->gb)+7)>>3; if(s->divx_packed || s->avctx->hwaccel){ //we would have to scan through the whole buf to handle the weird reordering ... return buf_size; }else if(s->flags&CODEC_FLAG_TRUNCATED){ pos -= s->parse_context.last_index; if(pos<0) pos=0; // padding is not really read so this might be -1 return pos; }else{ if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...) if(pos+10>buf_size) pos=buf_size; // oops ;) return pos; } } static int decode_slice(MpegEncContext *s){ const int part_mask= s->partitioned_frame ? (AC_END|AC_ERROR) : 0x7F; const int mb_size= 16>>s->avctx->lowres; s->last_resync_gb= s->gb; s->first_slice_line= 1; s->resync_mb_x= s->mb_x; s->resync_mb_y= s->mb_y; ff_set_qscale(s, s->qscale); if (s->avctx->hwaccel) { const uint8_t *start= s->gb.buffer + get_bits_count(&s->gb)/8; const uint8_t *end = ff_h263_find_resync_marker(start + 1, s->gb.buffer_end); skip_bits_long(&s->gb, 8*(end - start)); return s->avctx->hwaccel->decode_slice(s->avctx, start, end - start); } if(s->partitioned_frame){ const int qscale= s->qscale; if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4){ if(ff_mpeg4_decode_partitions(s) < 0) return -1; } /* restore variables which were modified */ s->first_slice_line=1; s->mb_x= s->resync_mb_x; s->mb_y= s->resync_mb_y; ff_set_qscale(s, qscale); } for(; s->mb_y < s->mb_height; s->mb_y++) { /* per-row end of slice checks */ if(s->msmpeg4_version){ if(s->resync_mb_y + s->slice_height == s->mb_y){ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); return 0; } } if(s->msmpeg4_version==1){ s->last_dc[0]= s->last_dc[1]= s->last_dc[2]= 128; } ff_init_block_index(s); for(; s->mb_x < s->mb_width; s->mb_x++) { int ret; ff_update_block_index(s); if(s->resync_mb_x == s->mb_x && s->resync_mb_y+1 == s->mb_y){ s->first_slice_line=0; } /* DCT & quantize */ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; // s->mb_skipped = 0; //printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24)); ret= s->decode_mb(s, s->block); if (s->pict_type!=FF_B_TYPE) ff_h263_update_motion_val(s); if(ret<0){ const int xy= s->mb_x + s->mb_y*s->mb_stride; if(ret==SLICE_END){ MPV_decode_mb(s, s->block); if(s->loop_filter) ff_h263_loop_filter(s); //printf("%d %d %d %06X\n", s->mb_x, s->mb_y, s->gb.size*8 - get_bits_count(&s->gb), show_bits(&s->gb, 24)); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); s->padding_bug_score--; if(++s->mb_x >= s->mb_width){ s->mb_x=0; ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size); s->mb_y++; } return 0; }else if(ret==SLICE_NOEND){ av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x+1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return -1; } av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); return -1; } MPV_decode_mb(s, s->block); if(s->loop_filter) ff_h263_loop_filter(s); } ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size); s->mb_x= 0; } assert(s->mb_x==0 && s->mb_y==s->mb_height); /* try to detect the padding bug */ if( s->codec_id==CODEC_ID_MPEG4 && (s->workaround_bugs&FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >=0 && get_bits_left(&s->gb) < 48 // && !s->resync_marker && !s->data_partitioning){ const int bits_count= get_bits_count(&s->gb); const int bits_left = s->gb.size_in_bits - bits_count; if(bits_left==0){ s->padding_bug_score+=16; } else if(bits_left != 1){ int v= show_bits(&s->gb, 8); v|= 0x7F >> (7-(bits_count&7)); if(v==0x7F && bits_left<=8) s->padding_bug_score--; else if(v==0x7F && ((get_bits_count(&s->gb)+8)&8) && bits_left<=16) s->padding_bug_score+= 4; else s->padding_bug_score++; } } if(s->workaround_bugs&FF_BUG_AUTODETECT){ if(s->padding_bug_score > -2 && !s->data_partitioning /*&& (s->divx_version>=0 || !s->resync_marker)*/) s->workaround_bugs |= FF_BUG_NO_PADDING; else s->workaround_bugs &= ~FF_BUG_NO_PADDING; } // handle formats which don't have unique end markers if(s->msmpeg4_version || (s->workaround_bugs&FF_BUG_NO_PADDING)){ //FIXME perhaps solve this more cleanly int left= get_bits_left(&s->gb); int max_extra=7; /* no markers in M$ crap */ if(s->msmpeg4_version && s->pict_type==FF_I_TYPE) max_extra+= 17; /* buggy padding but the frame should still end approximately at the bitstream end */ if((s->workaround_bugs&FF_BUG_NO_PADDING) && s->error_recognition>=3) max_extra+= 48; else if((s->workaround_bugs&FF_BUG_NO_PADDING)) max_extra+= 256*256*256*64; if(left>max_extra){ av_log(s->avctx, AV_LOG_ERROR, "discarding %d junk bits at end, next would be %X\n", left, show_bits(&s->gb, 24)); } else if(left<0){ av_log(s->avctx, AV_LOG_ERROR, "overreading %d bits\n", -left); }else ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); return 0; } av_log(s->avctx, AV_LOG_ERROR, "slice end not reached but screenspace end (%d left %06X, score= %d)\n", get_bits_left(&s->gb), show_bits(&s->gb, 24), s->padding_bug_score); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return -1; } int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MpegEncContext *s = avctx->priv_data; int ret; AVFrame *pict = data; #ifdef PRINT_FRAME_TIME uint64_t time= rdtsc(); #endif s->flags= avctx->flags; s->flags2= avctx->flags2; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay==0 && s->next_picture_ptr) { *pict= *(AVFrame*)s->next_picture_ptr; s->next_picture_ptr= NULL; *data_size = sizeof(AVFrame); } return 0; } if(s->flags&CODEC_FLAG_TRUNCATED){ int next; if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4){ next= ff_mpeg4_find_frame_end(&s->parse_context, buf, buf_size); }else if(CONFIG_H263_DECODER && s->codec_id==CODEC_ID_H263){ next= ff_h263_find_frame_end(&s->parse_context, buf, buf_size); }else{ av_log(s->avctx, AV_LOG_ERROR, "this codec does not support truncated bitstreams\n"); return -1; } if( ff_combine_frame(&s->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 ) return buf_size; } retry: if(s->bitstream_buffer_size && (s->divx_packed || buf_size<20)){ //divx 5.01+/xvid frame reorder init_get_bits(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size*8); }else init_get_bits(&s->gb, buf, buf_size*8); s->bitstream_buffer_size=0; if (!s->context_initialized) { if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix return -1; } /* We need to set current_picture_ptr before reading the header, * otherwise we cannot store anyting in there */ if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } /* let's go :-) */ if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5) { ret= ff_wmv2_decode_picture_header(s); } else if (CONFIG_MSMPEG4_DECODER && s->msmpeg4_version) { ret = msmpeg4_decode_picture_header(s); } else if (CONFIG_MPEG4_DECODER && s->h263_pred) { if(s->avctx->extradata_size && s->picture_number==0){ GetBitContext gb; init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8); ret = ff_mpeg4_decode_picture_header(s, &gb); } ret = ff_mpeg4_decode_picture_header(s, &s->gb); } else if (CONFIG_H263I_DECODER && s->codec_id == CODEC_ID_H263I) { ret = ff_intel_h263_decode_picture_header(s); } else if (CONFIG_FLV_DECODER && s->h263_flv) { ret = ff_flv_decode_picture_header(s); } else { ret = h263_decode_picture_header(s); } if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size); /* skip if the header was thrashed */ if (ret < 0){ av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } avctx->has_b_frames= !s->low_delay; if(s->xvid_build==-1 && s->divx_version==-1 && s->lavc_build==-1){ if(s->stream_codec_tag == AV_RL32("XVID") || s->codec_tag == AV_RL32("XVID") || s->codec_tag == AV_RL32("XVIX") || s->codec_tag == AV_RL32("RMP4") || s->codec_tag == AV_RL32("SIPP") ) s->xvid_build= 0; #if 0 if(s->codec_tag == AV_RL32("DIVX") && s->vo_type==0 && s->vol_control_parameters==1 && s->padding_bug_score > 0 && s->low_delay) // XVID with modified fourcc s->xvid_build= 0; #endif } if(s->xvid_build==-1 && s->divx_version==-1 && s->lavc_build==-1){ if(s->codec_tag == AV_RL32("DIVX") && s->vo_type==0 && s->vol_control_parameters==0) s->divx_version= 400; //divx 4 } if(s->xvid_build>=0 && s->divx_version>=0){ s->divx_version= s->divx_build= -1; } if(s->workaround_bugs&FF_BUG_AUTODETECT){ if(s->codec_tag == AV_RL32("XVIX")) s->workaround_bugs|= FF_BUG_XVID_ILACE; if(s->codec_tag == AV_RL32("UMP4")){ s->workaround_bugs|= FF_BUG_UMP4; } if(s->divx_version>=500 && s->divx_build<1814){ s->workaround_bugs|= FF_BUG_QPEL_CHROMA; } if(s->divx_version>502 && s->divx_build<1814){ s->workaround_bugs|= FF_BUG_QPEL_CHROMA2; } if(s->xvid_build<=3U) s->padding_bug_score= 256*256*256*64; if(s->xvid_build<=1U) s->workaround_bugs|= FF_BUG_QPEL_CHROMA; if(s->xvid_build<=12U) s->workaround_bugs|= FF_BUG_EDGE; if(s->xvid_build<=32U) s->workaround_bugs|= FF_BUG_DC_CLIP; #define SET_QPEL_FUNC(postfix1, postfix2) \ s->dsp.put_ ## postfix1 = ff_put_ ## postfix2;\ s->dsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2;\ s->dsp.avg_ ## postfix1 = ff_avg_ ## postfix2; if(s->lavc_build<4653U) s->workaround_bugs|= FF_BUG_STD_QPEL; if(s->lavc_build<4655U) s->workaround_bugs|= FF_BUG_DIRECT_BLOCKSIZE; if(s->lavc_build<4670U){ s->workaround_bugs|= FF_BUG_EDGE; } if(s->lavc_build<=4712U) s->workaround_bugs|= FF_BUG_DC_CLIP; if(s->divx_version>=0) s->workaround_bugs|= FF_BUG_DIRECT_BLOCKSIZE; //printf("padding_bug_score: %d\n", s->padding_bug_score); if(s->divx_version==501 && s->divx_build==20020416) s->padding_bug_score= 256*256*256*64; if(s->divx_version<500U){ s->workaround_bugs|= FF_BUG_EDGE; } if(s->divx_version>=0) s->workaround_bugs|= FF_BUG_HPEL_CHROMA; #if 0 if(s->divx_version==500) s->padding_bug_score= 256*256*256*64; /* very ugly XVID padding bug detection FIXME/XXX solve this differently * Let us hope this at least works. */ if( s->resync_marker==0 && s->data_partitioning==0 && s->divx_version==-1 && s->codec_id==CODEC_ID_MPEG4 && s->vo_type==0) s->workaround_bugs|= FF_BUG_NO_PADDING; if(s->lavc_build<4609U) //FIXME not sure about the version num but a 4609 file seems ok s->workaround_bugs|= FF_BUG_NO_PADDING; #endif } if(s->workaround_bugs& FF_BUG_STD_QPEL){ SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c) } if(avctx->debug & FF_DEBUG_BUGS) av_log(s->avctx, AV_LOG_DEBUG, "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n", s->workaround_bugs, s->lavc_build, s->xvid_build, s->divx_version, s->divx_build, s->divx_packed ? "p" : ""); #if 0 // dump bits per frame / qp / complexity { static FILE *f=NULL; if(!f) f=fopen("rate_qp_cplx.txt", "w"); fprintf(f, "%d %d %f\n", buf_size, s->qscale, buf_size*(double)s->qscale); } #endif #if HAVE_MMX if(s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (mm_flags & FF_MM_MMX)){ avctx->idct_algo= FF_IDCT_XVIDMMX; avctx->coded_width= 0; // force reinit // dsputil_init(&s->dsp, avctx); s->picture_number=0; } #endif /* After H263 & mpeg4 header decode we have the height, width,*/ /* and other parameters. So then we could init the picture */ /* FIXME: By the way H263 decoder is evolving it should have */ /* an H263EncContext */ if ( s->width != avctx->coded_width || s->height != avctx->coded_height) { /* H.263 could change picture size any time */ ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat s->parse_context.buffer=0; MPV_common_end(s); s->parse_context= pc; } if (!s->context_initialized) { avcodec_set_dimensions(avctx, s->width, s->height); goto retry; } if((s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P || s->codec_id == CODEC_ID_H263I)) s->gob_index = ff_h263_get_gob_height(s); // for hurry_up==5 s->current_picture.pict_type= s->pict_type; s->current_picture.key_frame= s->pict_type == FF_I_TYPE; /* skip B-frames if we don't have reference frames */ if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)) return get_consumed_bytes(s, buf_size); /* skip b frames if we are in a hurry */ if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size); if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); /* skip everything if we are in a hurry>=5 */ if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size); if(s->next_p_frame_damaged){ if(s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size); else s->next_p_frame_damaged=0; } if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==FF_B_TYPE){ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab; }else if((!s->no_rounding) || s->pict_type==FF_B_TYPE){ s->me.qpel_put= s->dsp.put_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; }else{ s->me.qpel_put= s->dsp.put_no_rnd_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; } if(MPV_frame_start(s, avctx) < 0) return -1; if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)) { ff_vdpau_mpeg4_decode_picture(s, s->gb.buffer, s->gb.buffer_end - s->gb.buffer); goto frame_end; } if (avctx->hwaccel) { if (avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer) < 0) return -1; } ff_er_frame_start(s); //the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type //which is not available before MPV_frame_start() if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5){ ret = ff_wmv2_decode_secondary_picture_header(s); if(ret<0) return ret; if(ret==1) goto intrax8_decoded; } /* decode each macroblock */ s->mb_x=0; s->mb_y=0; decode_slice(s); while(s->mb_y<s->mb_height){ if(s->msmpeg4_version){ if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits) break; }else{ if(ff_h263_resync(s)<0) break; } if(s->msmpeg4_version<4 && s->h263_pred) ff_mpeg4_clean_buffers(s); decode_slice(s); } if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==FF_I_TYPE) if(!CONFIG_MSMPEG4_DECODER || msmpeg4_decode_ext_header(s, buf_size) < 0){ s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR; } assert(s->bitstream_buffer_size==0); frame_end: /* divx 5.01+ bistream reorder stuff */ if(s->codec_id==CODEC_ID_MPEG4 && s->divx_packed){ int current_pos= get_bits_count(&s->gb)>>3; int startcode_found=0; if(buf_size - current_pos > 5){ int i; for(i=current_pos; i<buf_size-3; i++){ if(buf[i]==0 && buf[i+1]==0 && buf[i+2]==1 && buf[i+3]==0xB6){ startcode_found=1; break; } } } if(s->gb.buffer == s->bitstream_buffer && buf_size>7 && s->xvid_build>=0){ //xvid style startcode_found=1; current_pos=0; } if(startcode_found){ av_fast_malloc( &s->bitstream_buffer, &s->allocated_bitstream_buffer_size, buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE); if (!s->bitstream_buffer) return AVERROR(ENOMEM); memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); s->bitstream_buffer_size= buf_size - current_pos; } } intrax8_decoded: ff_er_frame_end(s); if (avctx->hwaccel) { if (avctx->hwaccel->end_frame(avctx) < 0) return -1; } MPV_frame_end(s); assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); assert(s->current_picture.pict_type == s->pict_type); if (s->pict_type == FF_B_TYPE || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; } else if (s->last_picture_ptr != NULL) { *pict= *(AVFrame*)s->last_picture_ptr; } if(s->last_picture_ptr || s->low_delay){ *data_size = sizeof(AVFrame); ff_print_debug_info(s, pict); } #ifdef PRINT_FRAME_TIME av_log(avctx, AV_LOG_DEBUG, "%"PRId64"\n", rdtsc()-time); #endif return get_consumed_bytes(s, buf_size); } AVCodec h263_decoder = { "h263", AVMEDIA_TYPE_VIDEO, CODEC_ID_H263, sizeof(MpegEncContext), ff_h263_decode_init, NULL, ff_h263_decode_end, ff_h263_decode_frame, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, .flush= ff_mpeg_flush, .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), .pix_fmts= ff_hwaccel_pixfmt_list_420, };
123linslouis-android-video-cutter
jni/libavcodec/h263dec.c
C
asf20
24,896
/* * Atrac 1 compatible decoder * Copyright (c) 2009 Maxim Poliakovski * Copyright (c) 2009 Benjamin Larsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Atrac 1 compatible decoder. * This decoder handles raw ATRAC1 data and probably SDDS data. */ /* Many thanks to Tim Craig for all the help! */ #include <math.h> #include <stddef.h> #include <stdio.h> #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "fft.h" #include "atrac.h" #include "atrac1data.h" #define AT1_MAX_BFU 52 ///< max number of block floating units in a sound unit #define AT1_SU_SIZE 212 ///< number of bytes in a sound unit #define AT1_SU_SAMPLES 512 ///< number of samples in a sound unit #define AT1_FRAME_SIZE AT1_SU_SIZE * 2 #define AT1_SU_MAX_BITS AT1_SU_SIZE * 8 #define AT1_MAX_CHANNELS 2 #define AT1_QMF_BANDS 3 #define IDX_LOW_BAND 0 #define IDX_MID_BAND 1 #define IDX_HIGH_BAND 2 /** * Sound unit struct, one unit is used per channel */ typedef struct { int log2_block_count[AT1_QMF_BANDS]; ///< log2 number of blocks in a band int num_bfus; ///< number of Block Floating Units float* spectrum[2]; DECLARE_ALIGNED(16, float, spec1)[AT1_SU_SAMPLES]; ///< mdct buffer DECLARE_ALIGNED(16, float, spec2)[AT1_SU_SAMPLES]; ///< mdct buffer DECLARE_ALIGNED(16, float, fst_qmf_delay)[46]; ///< delay line for the 1st stacked QMF filter DECLARE_ALIGNED(16, float, snd_qmf_delay)[46]; ///< delay line for the 2nd stacked QMF filter DECLARE_ALIGNED(16, float, last_qmf_delay)[256+23]; ///< delay line for the last stacked QMF filter } AT1SUCtx; /** * The atrac1 context, holds all needed parameters for decoding */ typedef struct { AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit DECLARE_ALIGNED(16, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer DECLARE_ALIGNED(16, float, low)[256]; DECLARE_ALIGNED(16, float, mid)[256]; DECLARE_ALIGNED(16, float, high)[512]; float* bands[3]; DECLARE_ALIGNED(16, float, out_samples)[AT1_MAX_CHANNELS][AT1_SU_SAMPLES]; FFTContext mdct_ctx[3]; int channels; DSPContext dsp; } AT1Ctx; /** size of the transform in samples in the long mode for each QMF band */ static const uint16_t samples_per_band[3] = {128, 128, 256}; static const uint8_t mdct_long_nbits[3] = {7, 7, 8}; static void at1_imdct(AT1Ctx *q, float *spec, float *out, int nbits, int rev_spec) { FFTContext* mdct_context = &q->mdct_ctx[nbits - 5 - (nbits > 6)]; int transf_size = 1 << nbits; if (rev_spec) { int i; for (i = 0; i < transf_size / 2; i++) FFSWAP(float, spec[i], spec[transf_size - 1 - i]); } ff_imdct_half(mdct_context, out, spec); } static int at1_imdct_block(AT1SUCtx* su, AT1Ctx *q) { int band_num, band_samples, log2_block_count, nbits, num_blocks, block_size; unsigned int start_pos, ref_pos = 0, pos = 0; for (band_num = 0; band_num < AT1_QMF_BANDS; band_num++) { float *prev_buf; int j; band_samples = samples_per_band[band_num]; log2_block_count = su->log2_block_count[band_num]; /* number of mdct blocks in the current QMF band: 1 - for long mode */ /* 4 for short mode(low/middle bands) and 8 for short mode(high band)*/ num_blocks = 1 << log2_block_count; if (num_blocks == 1) { /* mdct block size in samples: 128 (long mode, low & mid bands), */ /* 256 (long mode, high band) and 32 (short mode, all bands) */ block_size = band_samples >> log2_block_count; /* calc transform size in bits according to the block_size_mode */ nbits = mdct_long_nbits[band_num] - log2_block_count; if (nbits != 5 && nbits != 7 && nbits != 8) return -1; } else { block_size = 32; nbits = 5; } start_pos = 0; prev_buf = &su->spectrum[1][ref_pos + band_samples - 16]; for (j=0; j < num_blocks; j++) { at1_imdct(q, &q->spec[pos], &su->spectrum[0][ref_pos + start_pos], nbits, band_num); /* overlap and window */ q->dsp.vector_fmul_window(&q->bands[band_num][start_pos], prev_buf, &su->spectrum[0][ref_pos + start_pos], ff_sine_32, 0, 16); prev_buf = &su->spectrum[0][ref_pos+start_pos + 16]; start_pos += block_size; pos += block_size; } if (num_blocks == 1) memcpy(q->bands[band_num] + 32, &su->spectrum[0][ref_pos + 16], 240 * sizeof(float)); ref_pos += band_samples; } /* Swap buffers so the mdct overlap works */ FFSWAP(float*, su->spectrum[0], su->spectrum[1]); return 0; } /** * Parse the block size mode byte */ static int at1_parse_bsm(GetBitContext* gb, int log2_block_cnt[AT1_QMF_BANDS]) { int log2_block_count_tmp, i; for (i = 0; i < 2; i++) { /* low and mid band */ log2_block_count_tmp = get_bits(gb, 2); if (log2_block_count_tmp & 1) return -1; log2_block_cnt[i] = 2 - log2_block_count_tmp; } /* high band */ log2_block_count_tmp = get_bits(gb, 2); if (log2_block_count_tmp != 0 && log2_block_count_tmp != 3) return -1; log2_block_cnt[IDX_HIGH_BAND] = 3 - log2_block_count_tmp; skip_bits(gb, 2); return 0; } static int at1_unpack_dequant(GetBitContext* gb, AT1SUCtx* su, float spec[AT1_SU_SAMPLES]) { int bits_used, band_num, bfu_num, i; uint8_t idwls[AT1_MAX_BFU]; ///< the word length indexes for each BFU uint8_t idsfs[AT1_MAX_BFU]; ///< the scalefactor indexes for each BFU /* parse the info byte (2nd byte) telling how much BFUs were coded */ su->num_bfus = bfu_amount_tab1[get_bits(gb, 3)]; /* calc number of consumed bits: num_BFUs * (idwl(4bits) + idsf(6bits)) + log2_block_count(8bits) + info_byte(8bits) + info_byte_copy(8bits) + log2_block_count_copy(8bits) */ bits_used = su->num_bfus * 10 + 32 + bfu_amount_tab2[get_bits(gb, 2)] + (bfu_amount_tab3[get_bits(gb, 3)] << 1); /* get word length index (idwl) for each BFU */ for (i = 0; i < su->num_bfus; i++) idwls[i] = get_bits(gb, 4); /* get scalefactor index (idsf) for each BFU */ for (i = 0; i < su->num_bfus; i++) idsfs[i] = get_bits(gb, 6); /* zero idwl/idsf for empty BFUs */ for (i = su->num_bfus; i < AT1_MAX_BFU; i++) idwls[i] = idsfs[i] = 0; /* read in the spectral data and reconstruct MDCT spectrum of this channel */ for (band_num = 0; band_num < AT1_QMF_BANDS; band_num++) { for (bfu_num = bfu_bands_t[band_num]; bfu_num < bfu_bands_t[band_num+1]; bfu_num++) { int pos; int num_specs = specs_per_bfu[bfu_num]; int word_len = !!idwls[bfu_num] + idwls[bfu_num]; float scale_factor = sf_table[idsfs[bfu_num]]; bits_used += word_len * num_specs; /* add number of bits consumed by current BFU */ /* check for bitstream overflow */ if (bits_used > AT1_SU_MAX_BITS) return -1; /* get the position of the 1st spec according to the block size mode */ pos = su->log2_block_count[band_num] ? bfu_start_short[bfu_num] : bfu_start_long[bfu_num]; if (word_len) { float max_quant = 1.0 / (float)((1 << (word_len - 1)) - 1); for (i = 0; i < num_specs; i++) { /* read in a quantized spec and convert it to * signed int and then inverse quantization */ spec[pos+i] = get_sbits(gb, word_len) * scale_factor * max_quant; } } else { /* word_len = 0 -> empty BFU, zero all specs in the emty BFU */ memset(&spec[pos], 0, num_specs * sizeof(float)); } } } return 0; } static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut) { float temp[256]; float iqmf_temp[512 + 46]; /* combine low and middle bands */ atrac_iqmf(q->bands[0], q->bands[1], 128, temp, su->fst_qmf_delay, iqmf_temp); /* delay the signal of the high band by 23 samples */ memcpy( su->last_qmf_delay, &su->last_qmf_delay[256], sizeof(float) * 23); memcpy(&su->last_qmf_delay[23], q->bands[2], sizeof(float) * 256); /* combine (low + middle) and high bands */ atrac_iqmf(temp, su->last_qmf_delay, 256, pOut, su->snd_qmf_delay, iqmf_temp); } static int atrac1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AT1Ctx *q = avctx->priv_data; int ch, ret, i; GetBitContext gb; float* samples = data; if (buf_size < 212 * q->channels) { av_log(q,AV_LOG_ERROR,"Not enought data to decode!\n"); return -1; } for (ch = 0; ch < q->channels; ch++) { AT1SUCtx* su = &q->SUs[ch]; init_get_bits(&gb, &buf[212 * ch], 212 * 8); /* parse block_size_mode, 1st byte */ ret = at1_parse_bsm(&gb, su->log2_block_count); if (ret < 0) return ret; ret = at1_unpack_dequant(&gb, su, q->spec); if (ret < 0) return ret; ret = at1_imdct_block(su, q); if (ret < 0) return ret; at1_subband_synthesis(q, su, q->out_samples[ch]); } /* interleave; FIXME, should create/use a DSP function */ if (q->channels == 1) { /* mono */ memcpy(samples, q->out_samples[0], AT1_SU_SAMPLES * 4); } else { /* stereo */ for (i = 0; i < AT1_SU_SAMPLES; i++) { samples[i * 2] = q->out_samples[0][i]; samples[i * 2 + 1] = q->out_samples[1][i]; } } *data_size = q->channels * AT1_SU_SAMPLES * sizeof(*samples); return avctx->block_align; } static av_cold int atrac1_decode_init(AVCodecContext *avctx) { AT1Ctx *q = avctx->priv_data; avctx->sample_fmt = SAMPLE_FMT_FLT; q->channels = avctx->channels; /* Init the mdct transforms */ ff_mdct_init(&q->mdct_ctx[0], 6, 1, -1.0/ (1 << 15)); ff_mdct_init(&q->mdct_ctx[1], 8, 1, -1.0/ (1 << 15)); ff_mdct_init(&q->mdct_ctx[2], 9, 1, -1.0/ (1 << 15)); ff_init_ff_sine_windows(5); atrac_generate_tables(); dsputil_init(&q->dsp, avctx); q->bands[0] = q->low; q->bands[1] = q->mid; q->bands[2] = q->high; /* Prepare the mdct overlap buffers */ q->SUs[0].spectrum[0] = q->SUs[0].spec1; q->SUs[0].spectrum[1] = q->SUs[0].spec2; q->SUs[1].spectrum[0] = q->SUs[1].spec1; q->SUs[1].spectrum[1] = q->SUs[1].spec2; return 0; } static av_cold int atrac1_decode_end(AVCodecContext * avctx) { AT1Ctx *q = avctx->priv_data; ff_mdct_end(&q->mdct_ctx[0]); ff_mdct_end(&q->mdct_ctx[1]); ff_mdct_end(&q->mdct_ctx[2]); return 0; } AVCodec atrac1_decoder = { .name = "atrac1", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_ATRAC1, .priv_data_size = sizeof(AT1Ctx), .init = atrac1_decode_init, .close = atrac1_decode_end, .decode = atrac1_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Atrac 1 (Adaptive TRansform Acoustic Coding)"), };
123linslouis-android-video-cutter
jni/libavcodec/atrac1.c
C
asf20
12,482
/* * Copyright (c) 2007-2008 Ian Caulfield * 2009 Ramiro Polla * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/mlp.h" #include "dsputil.h" static void ff_mlp_filter_channel(int32_t *state, const int32_t *coeff, int firorder, int iirorder, unsigned int filter_shift, int32_t mask, int blocksize, int32_t *sample_buffer) { int32_t *firbuf = state; int32_t *iirbuf = state + MAX_BLOCKSIZE + MAX_FIR_ORDER; const int32_t *fircoeff = coeff; const int32_t *iircoeff = coeff + MAX_FIR_ORDER; int i; for (i = 0; i < blocksize; i++) { int32_t residual = *sample_buffer; unsigned int order; int64_t accum = 0; int32_t result; for (order = 0; order < firorder; order++) accum += (int64_t) firbuf[order] * fircoeff[order]; for (order = 0; order < iirorder; order++) accum += (int64_t) iirbuf[order] * iircoeff[order]; accum = accum >> filter_shift; result = (accum + residual) & mask; *--firbuf = result; *--iirbuf = result - accum; *sample_buffer = result; sample_buffer += MAX_CHANNELS; } } void ff_mlp_init(DSPContext* c, AVCodecContext *avctx) { c->mlp_filter_channel = ff_mlp_filter_channel; if (ARCH_X86) ff_mlp_init_x86(c, avctx); }
123linslouis-android-video-cutter
jni/libavcodec/mlpdsp.c
C
asf20
2,155
/* * H261 common code * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2004 Maarten Daniels * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * h261codec. */ #include "dsputil.h" #include "avcodec.h" #include "h261.h" #define IS_FIL(a) ((a)&MB_TYPE_H261_FIL) uint8_t ff_h261_rl_table_store[2][2*MAX_RUN + MAX_LEVEL + 3]; void ff_h261_loop_filter(MpegEncContext *s){ H261Context * h= (H261Context*)s; const int linesize = s->linesize; const int uvlinesize= s->uvlinesize; uint8_t *dest_y = s->dest[0]; uint8_t *dest_cb= s->dest[1]; uint8_t *dest_cr= s->dest[2]; if(!(IS_FIL (h->mtype))) return; s->dsp.h261_loop_filter(dest_y , linesize); s->dsp.h261_loop_filter(dest_y + 8, linesize); s->dsp.h261_loop_filter(dest_y + 8 * linesize , linesize); s->dsp.h261_loop_filter(dest_y + 8 * linesize + 8, linesize); s->dsp.h261_loop_filter(dest_cb, uvlinesize); s->dsp.h261_loop_filter(dest_cr, uvlinesize); }
123linslouis-android-video-cutter
jni/libavcodec/h261.c
C
asf20
1,771
/* * Tiertex Limited SEQ Video Decoder * Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net) * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Tiertex Limited SEQ video decoder */ #include "avcodec.h" #define ALT_BITSTREAM_READER_LE #include "get_bits.h" typedef struct SeqVideoContext { AVCodecContext *avctx; AVFrame frame; } SeqVideoContext; static const unsigned char *seq_unpack_rle_block(const unsigned char *src, unsigned char *dst, int dst_size) { int i, len, sz; GetBitContext gb; int code_table[64]; /* get the rle codes (at most 64 bytes) */ init_get_bits(&gb, src, 64 * 8); for (i = 0, sz = 0; i < 64 && sz < dst_size; i++) { code_table[i] = get_sbits(&gb, 4); sz += FFABS(code_table[i]); } src += (get_bits_count(&gb) + 7) / 8; /* do the rle unpacking */ for (i = 0; i < 64 && dst_size > 0; i++) { len = code_table[i]; if (len < 0) { len = -len; memset(dst, *src++, FFMIN(len, dst_size)); } else { memcpy(dst, src, FFMIN(len, dst_size)); src += len; } dst += len; dst_size -= len; } return src; } static const unsigned char *seq_decode_op1(SeqVideoContext *seq, const unsigned char *src, unsigned char *dst) { const unsigned char *color_table; int b, i, len, bits; GetBitContext gb; unsigned char block[8 * 8]; len = *src++; if (len & 0x80) { switch (len & 3) { case 1: src = seq_unpack_rle_block(src, block, sizeof(block)); for (b = 0; b < 8; b++) { memcpy(dst, &block[b * 8], 8); dst += seq->frame.linesize[0]; } break; case 2: src = seq_unpack_rle_block(src, block, sizeof(block)); for (i = 0; i < 8; i++) { for (b = 0; b < 8; b++) dst[b * seq->frame.linesize[0]] = block[i * 8 + b]; ++dst; } break; } } else { color_table = src; src += len; bits = ff_log2_tab[len - 1] + 1; init_get_bits(&gb, src, bits * 8 * 8); src += bits * 8; for (b = 0; b < 8; b++) { for (i = 0; i < 8; i++) dst[i] = color_table[get_bits(&gb, bits)]; dst += seq->frame.linesize[0]; } } return src; } static const unsigned char *seq_decode_op2(SeqVideoContext *seq, const unsigned char *src, unsigned char *dst) { int i; for (i = 0; i < 8; i++) { memcpy(dst, src, 8); src += 8; dst += seq->frame.linesize[0]; } return src; } static const unsigned char *seq_decode_op3(SeqVideoContext *seq, const unsigned char *src, unsigned char *dst) { int pos, offset; do { pos = *src++; offset = ((pos >> 3) & 7) * seq->frame.linesize[0] + (pos & 7); dst[offset] = *src++; } while (!(pos & 0x80)); return src; } static void seqvideo_decode(SeqVideoContext *seq, const unsigned char *data, int data_size) { GetBitContext gb; int flags, i, j, x, y, op; unsigned char c[3]; unsigned char *dst; uint32_t *palette; flags = *data++; if (flags & 1) { palette = (uint32_t *)seq->frame.data[1]; for (i = 0; i < 256; i++) { for (j = 0; j < 3; j++, data++) c[j] = (*data << 2) | (*data >> 4); palette[i] = AV_RB24(c); } seq->frame.palette_has_changed = 1; } if (flags & 2) { init_get_bits(&gb, data, 128 * 8); data += 128; for (y = 0; y < 128; y += 8) for (x = 0; x < 256; x += 8) { dst = &seq->frame.data[0][y * seq->frame.linesize[0] + x]; op = get_bits(&gb, 2); switch (op) { case 1: data = seq_decode_op1(seq, data, dst); break; case 2: data = seq_decode_op2(seq, data, dst); break; case 3: data = seq_decode_op3(seq, data, dst); break; } } } } static av_cold int seqvideo_decode_init(AVCodecContext *avctx) { SeqVideoContext *seq = avctx->priv_data; seq->avctx = avctx; avctx->pix_fmt = PIX_FMT_PAL8; seq->frame.data[0] = NULL; return 0; } static int seqvideo_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; SeqVideoContext *seq = avctx->priv_data; seq->frame.reference = 1; seq->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &seq->frame)) { av_log(seq->avctx, AV_LOG_ERROR, "tiertexseqvideo: reget_buffer() failed\n"); return -1; } seqvideo_decode(seq, buf, buf_size); *data_size = sizeof(AVFrame); *(AVFrame *)data = seq->frame; return buf_size; } static av_cold int seqvideo_decode_end(AVCodecContext *avctx) { SeqVideoContext *seq = avctx->priv_data; if (seq->frame.data[0]) avctx->release_buffer(avctx, &seq->frame); return 0; } AVCodec tiertexseqvideo_decoder = { "tiertexseqvideo", AVMEDIA_TYPE_VIDEO, CODEC_ID_TIERTEXSEQVIDEO, sizeof(SeqVideoContext), seqvideo_decode_init, NULL, seqvideo_decode_end, seqvideo_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Tiertex Limited SEQ video"), };
123linslouis-android-video-cutter
jni/libavcodec/tiertexseqv.c
C
asf20
6,415
/* * Header file for hardcoded motionpixels RGB to YUV table * * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef MOTIONPIXELS_TABLEGEN_H #define MOTIONPIXELS_TABLEGEN_H #include <stdint.h> typedef struct YuvPixel { int8_t y, v, u; } YuvPixel; static int mp_yuv_to_rgb(int y, int v, int u, int clip_rgb) { static const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int r, g, b; r = (1000 * y + 701 * v) / 1000; g = (1000 * y - 357 * v - 172 * u) / 1000; b = (1000 * y + 886 * u) / 1000; if (clip_rgb) return ((cm[r * 8] & 0xF8) << 7) | ((cm[g * 8] & 0xF8) << 2) | (cm[b * 8] >> 3); if ((unsigned)r < 32 && (unsigned)g < 32 && (unsigned)b < 32) return (r << 10) | (g << 5) | b; return 1 << 15; } #if CONFIG_HARDCODED_TABLES #define motionpixels_tableinit() #include "libavcodec/motionpixels_tables.h" #else static YuvPixel mp_rgb_yuv_table[1 << 15]; static void mp_set_zero_yuv(YuvPixel *p) { int i, j; for (i = 0; i < 31; ++i) { for (j = 31; j > i; --j) if (!(p[j].u | p[j].v | p[j].y)) p[j] = p[j - 1]; for (j = 0; j < 31 - i; ++j) if (!(p[j].u | p[j].v | p[j].y)) p[j] = p[j + 1]; } } static void mp_build_rgb_yuv_table(YuvPixel *p) { int y, v, u, i; for (y = 0; y <= 31; ++y) for (v = -31; v <= 31; ++v) for (u = -31; u <= 31; ++u) { i = mp_yuv_to_rgb(y, v, u, 0); if (i < (1 << 15) && !(p[i].u | p[i].v | p[i].y)) { p[i].y = y; p[i].v = v; p[i].u = u; } } for (i = 0; i < 1024; ++i) mp_set_zero_yuv(p + i * 32); } static void motionpixels_tableinit(void) { if (!mp_rgb_yuv_table[0].u) mp_build_rgb_yuv_table(mp_rgb_yuv_table); } #endif /* CONFIG_HARDCODED_TABLES */ #endif /* MOTIONPIXELS_TABLEGEN_H */
123linslouis-android-video-cutter
jni/libavcodec/motionpixels_tablegen.h
C
asf20
2,711
/* * Musepack decoder * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Musepack decoder * MPEG Audio Layer 1/2 -like codec with frames of 1152 samples * divided into 32 subbands. */ #ifndef AVCODEC_MPC_H #define AVCODEC_MPC_H #include "libavutil/lfg.h" #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "mpegaudio.h" #include "mpcdata.h" #define BANDS 32 #define SAMPLES_PER_BAND 36 #define MPC_FRAME_SIZE (BANDS * SAMPLES_PER_BAND) /** Subband structure - hold all variables for each subband */ typedef struct { int msf; ///< mid-stereo flag int res[2]; int scfi[2]; int scf_idx[2][3]; int Q[2]; }Band; typedef struct { DSPContext dsp; GetBitContext gb; int IS, MSS, gapless; int lastframelen; int maxbands, last_max_band; int last_bits_used; int oldDSCF[2][BANDS]; Band bands[BANDS]; int Q[2][MPC_FRAME_SIZE]; int cur_frame, frames; uint8_t *bits; int buf_size; AVLFG rnd; int frames_to_skip; /* for synthesis */ DECLARE_ALIGNED(16, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512*2]; int synth_buf_offset[MPA_MAX_CHANNELS]; DECLARE_ALIGNED(16, int32_t, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT]; } MPCContext; void ff_mpc_init(void); void ff_mpc_dequantize_and_synth(MPCContext *c, int maxband, void *dst); #endif /* AVCODEC_MPC_H */
123linslouis-android-video-cutter
jni/libavcodec/mpc.h
C
asf20
2,145
/* * AC-3 parser prototypes * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_AC3_PARSER_H #define AVCODEC_AC3_PARSER_H #include "ac3.h" #include "get_bits.h" /** * Parses AC-3 frame header. * Parses the header up to the lfeon element, which is the first 52 or 54 bits * depending on the audio coding mode. * @param gbc[in] BitContext containing the first 54 bits of the frame. * @param hdr[out] Pointer to struct where header info is written. * @return Returns 0 on success, -1 if there is a sync word mismatch, * -2 if the bsid (version) element is invalid, -3 if the fscod (sample rate) * element is invalid, or -4 if the frmsizecod (bit rate) element is invalid. */ int ff_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr); /** * Parses AC-3 frame header and sets channel_map * Parses the header up to the lfeon (channel_map in E-AC-3) * element, which is the first 52, 54 or 104 bits depending * on the audio coding mode. * @param gbc[in] BitContext containing the first 54 bits of the frame. * @param hdr[out] Pointer to struct where header info is written. * @return value returned by ff_ac3_parse_header */ int ff_ac3_parse_header_full(GetBitContext *gbc, AC3HeaderInfo *hdr); #endif /* AVCODEC_AC3_PARSER_H */
123linslouis-android-video-cutter
jni/libavcodec/ac3_parser.h
C
asf20
2,058
/* * (I)RDFT transforms * Copyright (c) 2009 Alex Converse <alex dot converse at gmail dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include <math.h> #include "libavutil/mathematics.h" #include "fft.h" /** * @file * (Inverse) Real Discrete Fourier Transforms. */ /* sin(2*pi*x/n) for 0<=x<n/4, followed by n/2<=x<3n/4 */ #if !CONFIG_HARDCODED_TABLES SINTABLE(16); SINTABLE(32); SINTABLE(64); SINTABLE(128); SINTABLE(256); SINTABLE(512); SINTABLE(1024); SINTABLE(2048); SINTABLE(4096); SINTABLE(8192); SINTABLE(16384); SINTABLE(32768); SINTABLE(65536); #endif SINTABLE_CONST FFTSample * const ff_sin_tabs[] = { NULL, NULL, NULL, NULL, ff_sin_16, ff_sin_32, ff_sin_64, ff_sin_128, ff_sin_256, ff_sin_512, ff_sin_1024, ff_sin_2048, ff_sin_4096, ff_sin_8192, ff_sin_16384, ff_sin_32768, ff_sin_65536, }; /** Map one real FFT into two parallel real even and odd FFTs. Then interleave * the two real FFTs into one complex FFT. Unmangle the results. * ref: http://www.engineeringproductivitytools.com/stuff/T0001/PT10.HTM */ static void ff_rdft_calc_c(RDFTContext* s, FFTSample* data) { int i, i1, i2; FFTComplex ev, od; const int n = 1 << s->nbits; const float k1 = 0.5; const float k2 = 0.5 - s->inverse; const FFTSample *tcos = s->tcos; const FFTSample *tsin = s->tsin; if (!s->inverse) { ff_fft_permute(&s->fft, (FFTComplex*)data); ff_fft_calc(&s->fft, (FFTComplex*)data); } /* i=0 is a special case because of packing, the DC term is real, so we are going to throw the N/2 term (also real) in with it. */ ev.re = data[0]; data[0] = ev.re+data[1]; data[1] = ev.re-data[1]; for (i = 1; i < (n>>2); i++) { i1 = 2*i; i2 = n-i1; /* Separate even and odd FFTs */ ev.re = k1*(data[i1 ]+data[i2 ]); od.im = -k2*(data[i1 ]-data[i2 ]); ev.im = k1*(data[i1+1]-data[i2+1]); od.re = k2*(data[i1+1]+data[i2+1]); /* Apply twiddle factors to the odd FFT and add to the even FFT */ data[i1 ] = ev.re + od.re*tcos[i] - od.im*tsin[i]; data[i1+1] = ev.im + od.im*tcos[i] + od.re*tsin[i]; data[i2 ] = ev.re - od.re*tcos[i] + od.im*tsin[i]; data[i2+1] = -ev.im + od.im*tcos[i] + od.re*tsin[i]; } data[2*i+1]=s->sign_convention*data[2*i+1]; if (s->inverse) { data[0] *= k1; data[1] *= k1; ff_fft_permute(&s->fft, (FFTComplex*)data); ff_fft_calc(&s->fft, (FFTComplex*)data); } } av_cold int ff_rdft_init(RDFTContext *s, int nbits, enum RDFTransformType trans) { int n = 1 << nbits; int i; const double theta = (trans == DFT_R2C || trans == DFT_C2R ? -1 : 1)*2*M_PI/n; s->nbits = nbits; s->inverse = trans == IDFT_C2R || trans == DFT_C2R; s->sign_convention = trans == IDFT_R2C || trans == DFT_C2R ? 1 : -1; if (nbits < 4 || nbits > 16) return -1; if (ff_fft_init(&s->fft, nbits-1, trans == IDFT_C2R || trans == IDFT_R2C) < 0) return -1; ff_init_ff_cos_tabs(nbits); s->tcos = ff_cos_tabs[nbits]; s->tsin = ff_sin_tabs[nbits]+(trans == DFT_R2C || trans == DFT_C2R)*(n>>2); #if !CONFIG_HARDCODED_TABLES for (i = 0; i < (n>>2); i++) { s->tsin[i] = sin(i*theta); } #endif s->rdft_calc = ff_rdft_calc_c; if (ARCH_ARM) ff_rdft_init_arm(s); return 0; } av_cold void ff_rdft_end(RDFTContext *s) { ff_fft_end(&s->fft); }
123linslouis-android-video-cutter
jni/libavcodec/rdft.c
C
asf20
4,211
/* * MSMPEG4 backend for ffmpeg encoder and decoder * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * msmpeg4v1 & v2 stuff by Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MSMPEG4 backend for ffmpeg encoder and decoder. */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "msmpeg4.h" #include "libavutil/x86_cpu.h" #include "h263.h" #include "mpeg4video.h" /* * You can also call this codec : MPEG4 with a twist ! * * TODO: * - (encoding) select best mv table (two choices) * - (encoding) select best vlc/dc table */ //#define DEBUG #define DC_VLC_BITS 9 #define V2_INTRA_CBPC_VLC_BITS 3 #define V2_MB_TYPE_VLC_BITS 7 #define MV_VLC_BITS 9 #define V2_MV_VLC_BITS 9 #define TEX_VLC_BITS 9 #define II_BITRATE 128*1024 #define MBAC_BITRATE 50*1024 #define DEFAULT_INTER_INDEX 3 static uint32_t v2_dc_lum_table[512][2]; static uint32_t v2_dc_chroma_table[512][2]; /* vc1 externs */ extern const uint8_t wmv3_dc_scale_table[32]; #ifdef DEBUG int frame_count = 0; #endif #include "msmpeg4data.h" #if CONFIG_ENCODERS //strangely gcc includes this even if it is not referenced static uint8_t rl_length[NB_RL_TABLES][MAX_LEVEL+1][MAX_RUN+1][2]; #endif //CONFIG_ENCODERS static uint8_t static_rl_table_store[NB_RL_TABLES][2][2*MAX_RUN + MAX_LEVEL + 3]; /* This table is practically identical to the one from h263 * except that it is inverted. */ static av_cold void init_h263_dc_for_msmpeg4(void) { int level, uni_code, uni_len; for(level=-256; level<256; level++){ int size, v, l; /* find number of bits */ size = 0; v = abs(level); while (v) { v >>= 1; size++; } if (level < 0) l= (-level) ^ ((1 << size) - 1); else l= level; /* luminance h263 */ uni_code= ff_mpeg4_DCtab_lum[size][0]; uni_len = ff_mpeg4_DCtab_lum[size][1]; uni_code ^= (1<<uni_len)-1; //M$ does not like compatibility if (size > 0) { uni_code<<=size; uni_code|=l; uni_len+=size; if (size > 8){ uni_code<<=1; uni_code|=1; uni_len++; } } v2_dc_lum_table[level+256][0]= uni_code; v2_dc_lum_table[level+256][1]= uni_len; /* chrominance h263 */ uni_code= ff_mpeg4_DCtab_chrom[size][0]; uni_len = ff_mpeg4_DCtab_chrom[size][1]; uni_code ^= (1<<uni_len)-1; //M$ does not like compatibility if (size > 0) { uni_code<<=size; uni_code|=l; uni_len+=size; if (size > 8){ uni_code<<=1; uni_code|=1; uni_len++; } } v2_dc_chroma_table[level+256][0]= uni_code; v2_dc_chroma_table[level+256][1]= uni_len; } } static av_cold void common_init(MpegEncContext * s) { static int initialized=0; switch(s->msmpeg4_version){ case 1: case 2: s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; break; case 3: if(s->workaround_bugs){ s->y_dc_scale_table= old_ff_y_dc_scale_table; s->c_dc_scale_table= wmv1_c_dc_scale_table; } else{ s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table; s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table; } break; case 4: case 5: s->y_dc_scale_table= wmv1_y_dc_scale_table; s->c_dc_scale_table= wmv1_c_dc_scale_table; break; #if CONFIG_VC1_DECODER case 6: s->y_dc_scale_table= wmv3_dc_scale_table; s->c_dc_scale_table= wmv3_dc_scale_table; break; #endif } if(s->msmpeg4_version>=4){ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , wmv1_scantable[1]); ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, wmv1_scantable[2]); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, wmv1_scantable[3]); ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , wmv1_scantable[0]); } //Note the default tables are set in common_init in mpegvideo.c if(!initialized){ initialized=1; init_h263_dc_for_msmpeg4(); } } #if CONFIG_ENCODERS /* build the table which associate a (x,y) motion vector to a vlc */ static void init_mv_table(MVTable *tab) { int i, x, y; tab->table_mv_index = av_malloc(sizeof(uint16_t) * 4096); /* mark all entries as not used */ for(i=0;i<4096;i++) tab->table_mv_index[i] = tab->n; for(i=0;i<tab->n;i++) { x = tab->table_mvx[i]; y = tab->table_mvy[i]; tab->table_mv_index[(x << 6) | y] = i; } } void ff_msmpeg4_code012(PutBitContext *pb, int n) { if (n == 0) { put_bits(pb, 1, 0); } else { put_bits(pb, 1, 1); put_bits(pb, 1, (n >= 2)); } } static int get_size_of_code(MpegEncContext * s, RLTable *rl, int last, int run, int level, int intra){ int size=0; int code; int run_diff= intra ? 0 : 1; code = get_rl_index(rl, last, run, level); size+= rl->table_vlc[code][1]; if (code == rl->n) { int level1, run1; level1 = level - rl->max_level[last][run]; if (level1 < 1) goto esc2; code = get_rl_index(rl, last, run, level1); if (code == rl->n) { esc2: size++; if (level > MAX_LEVEL) goto esc3; run1 = run - rl->max_run[last][level] - run_diff; if (run1 < 0) goto esc3; code = get_rl_index(rl, last, run1, level); if (code == rl->n) { esc3: /* third escape */ size+=1+1+6+8; } else { /* second escape */ size+= 1+1+ rl->table_vlc[code][1]; } } else { /* first escape */ size+= 1+1+ rl->table_vlc[code][1]; } } else { size++; } return size; } av_cold void ff_msmpeg4_encode_init(MpegEncContext *s) { static int init_done=0; int i; common_init(s); if(s->msmpeg4_version>=4){ s->min_qcoeff= -255; s->max_qcoeff= 255; } if (!init_done) { /* init various encoding tables */ init_done = 1; init_mv_table(&mv_tables[0]); init_mv_table(&mv_tables[1]); for(i=0;i<NB_RL_TABLES;i++) init_rl(&rl_table[i], static_rl_table_store[i]); for(i=0; i<NB_RL_TABLES; i++){ int level; for(level=0; level<=MAX_LEVEL; level++){ int run; for(run=0; run<=MAX_RUN; run++){ int last; for(last=0; last<2; last++){ rl_length[i][level][run][last]= get_size_of_code(s, &rl_table[ i], last, run, level, 0); } } } } } } static void find_best_tables(MpegEncContext * s) { int i; int best =-1, best_size =9999999; int chroma_best=-1, best_chroma_size=9999999; for(i=0; i<3; i++){ int level; int chroma_size=0; int size=0; if(i>0){// ;) size++; chroma_size++; } for(level=0; level<=MAX_LEVEL; level++){ int run; for(run=0; run<=MAX_RUN; run++){ int last; const int last_size= size + chroma_size; for(last=0; last<2; last++){ int inter_count = s->ac_stats[0][0][level][run][last] + s->ac_stats[0][1][level][run][last]; int intra_luma_count = s->ac_stats[1][0][level][run][last]; int intra_chroma_count= s->ac_stats[1][1][level][run][last]; if(s->pict_type==FF_I_TYPE){ size += intra_luma_count *rl_length[i ][level][run][last]; chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last]; }else{ size+= intra_luma_count *rl_length[i ][level][run][last] +intra_chroma_count*rl_length[i+3][level][run][last] +inter_count *rl_length[i+3][level][run][last]; } } if(last_size == size+chroma_size) break; } } if(size<best_size){ best_size= size; best= i; } if(chroma_size<best_chroma_size){ best_chroma_size= chroma_size; chroma_best= i; } } // printf("type:%d, best:%d, qp:%d, var:%d, mcvar:%d, size:%d //\n", // s->pict_type, best, s->qscale, s->mb_var_sum, s->mc_mb_var_sum, best_size); if(s->pict_type==FF_P_TYPE) chroma_best= best; memset(s->ac_stats, 0, sizeof(int)*(MAX_LEVEL+1)*(MAX_RUN+1)*2*2*2); s->rl_table_index = best; s->rl_chroma_table_index= chroma_best; if(s->pict_type != s->last_non_b_pict_type){ s->rl_table_index= 2; if(s->pict_type==FF_I_TYPE) s->rl_chroma_table_index= 1; else s->rl_chroma_table_index= 2; } } /* write MSMPEG4 compatible frame header */ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number) { find_best_tables(s); align_put_bits(&s->pb); put_bits(&s->pb, 2, s->pict_type - 1); put_bits(&s->pb, 5, s->qscale); if(s->msmpeg4_version<=2){ s->rl_table_index = 2; s->rl_chroma_table_index = 2; } s->dc_table_index = 1; s->mv_table_index = 1; /* only if P frame */ s->use_skip_mb_code = 1; /* only if P frame */ s->per_mb_rl_table = 0; if(s->msmpeg4_version==4) s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==FF_P_TYPE); //printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height); if (s->pict_type == FF_I_TYPE) { s->slice_height= s->mb_height/1; put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height); if(s->msmpeg4_version==4){ msmpeg4_encode_ext_header(s); if(s->bit_rate>MBAC_BITRATE) put_bits(&s->pb, 1, s->per_mb_rl_table); } if(s->msmpeg4_version>2){ if(!s->per_mb_rl_table){ ff_msmpeg4_code012(&s->pb, s->rl_chroma_table_index); ff_msmpeg4_code012(&s->pb, s->rl_table_index); } put_bits(&s->pb, 1, s->dc_table_index); } } else { put_bits(&s->pb, 1, s->use_skip_mb_code); if(s->msmpeg4_version==4 && s->bit_rate>MBAC_BITRATE) put_bits(&s->pb, 1, s->per_mb_rl_table); if(s->msmpeg4_version>2){ if(!s->per_mb_rl_table) ff_msmpeg4_code012(&s->pb, s->rl_table_index); put_bits(&s->pb, 1, s->dc_table_index); put_bits(&s->pb, 1, s->mv_table_index); } } s->esc3_level_length= 0; s->esc3_run_length= 0; } void msmpeg4_encode_ext_header(MpegEncContext * s) { put_bits(&s->pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29 put_bits(&s->pb, 11, FFMIN(s->bit_rate/1024, 2047)); if(s->msmpeg4_version>=3) put_bits(&s->pb, 1, s->flipflop_rounding); else assert(s->flipflop_rounding==0); } #endif //CONFIG_ENCODERS /* predict coded block */ int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr) { int xy, wrap, pred, a, b, c; xy = s->block_index[n]; wrap = s->b8_stride; /* B C * A X */ a = s->coded_block[xy - 1 ]; b = s->coded_block[xy - 1 - wrap]; c = s->coded_block[xy - wrap]; if (b == c) { pred = a; } else { pred = c; } /* store value */ *coded_block_ptr = &s->coded_block[xy]; return pred; } #if CONFIG_ENCODERS void ff_msmpeg4_encode_motion(MpegEncContext * s, int mx, int my) { int code; MVTable *mv; /* modulo encoding */ /* WARNING : you cannot reach all the MVs even with the modulo encoding. This is a somewhat strange compromise they took !!! */ if (mx <= -64) mx += 64; else if (mx >= 64) mx -= 64; if (my <= -64) my += 64; else if (my >= 64) my -= 64; mx += 32; my += 32; #if 0 if ((unsigned)mx >= 64 || (unsigned)my >= 64) av_log(s->avctx, AV_LOG_ERROR, "error mx=%d my=%d\n", mx, my); #endif mv = &mv_tables[s->mv_table_index]; code = mv->table_mv_index[(mx << 6) | my]; put_bits(&s->pb, mv->table_mv_bits[code], mv->table_mv_code[code]); if (code == mv->n) { /* escape : code literally */ put_bits(&s->pb, 6, mx); put_bits(&s->pb, 6, my); } } void ff_msmpeg4_handle_slices(MpegEncContext *s){ if (s->mb_x == 0) { if (s->slice_height && (s->mb_y % s->slice_height) == 0) { if(s->msmpeg4_version < 4){ ff_mpeg4_clean_buffers(s); } s->first_slice_line = 1; } else { s->first_slice_line = 0; } } } static void msmpeg4v2_encode_motion(MpegEncContext * s, int val) { int range, bit_size, sign, code, bits; if (val == 0) { /* zero vector */ code = 0; put_bits(&s->pb, mvtab[code][1], mvtab[code][0]); } else { bit_size = s->f_code - 1; range = 1 << bit_size; if (val <= -64) val += 64; else if (val >= 64) val -= 64; if (val >= 0) { sign = 0; } else { val = -val; sign = 1; } val--; code = (val >> bit_size) + 1; bits = val & (range - 1); put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign); if (bit_size > 0) { put_bits(&s->pb, bit_size, bits); } } } void msmpeg4_encode_mb(MpegEncContext * s, DCTELEM block[6][64], int motion_x, int motion_y) { int cbp, coded_cbp, i; int pred_x, pred_y; uint8_t *coded_block; ff_msmpeg4_handle_slices(s); if (!s->mb_intra) { /* compute cbp */ cbp = 0; for (i = 0; i < 6; i++) { if (s->block_last_index[i] >= 0) cbp |= 1 << (5 - i); } if (s->use_skip_mb_code && (cbp | motion_x | motion_y) == 0) { /* skip macroblock */ put_bits(&s->pb, 1, 1); s->last_bits++; s->misc_bits++; s->skip_count++; return; } if (s->use_skip_mb_code) put_bits(&s->pb, 1, 0); /* mb coded */ if(s->msmpeg4_version<=2){ put_bits(&s->pb, v2_mb_type[cbp&3][1], v2_mb_type[cbp&3][0]); if((cbp&3) != 3) coded_cbp= cbp ^ 0x3C; else coded_cbp= cbp; put_bits(&s->pb, ff_h263_cbpy_tab[coded_cbp>>2][1], ff_h263_cbpy_tab[coded_cbp>>2][0]); s->misc_bits += get_bits_diff(s); h263_pred_motion(s, 0, 0, &pred_x, &pred_y); msmpeg4v2_encode_motion(s, motion_x - pred_x); msmpeg4v2_encode_motion(s, motion_y - pred_y); }else{ put_bits(&s->pb, table_mb_non_intra[cbp + 64][1], table_mb_non_intra[cbp + 64][0]); s->misc_bits += get_bits_diff(s); /* motion vector */ h263_pred_motion(s, 0, 0, &pred_x, &pred_y); ff_msmpeg4_encode_motion(s, motion_x - pred_x, motion_y - pred_y); } s->mv_bits += get_bits_diff(s); for (i = 0; i < 6; i++) { ff_msmpeg4_encode_block(s, block[i], i); } s->p_tex_bits += get_bits_diff(s); } else { /* compute cbp */ cbp = 0; coded_cbp = 0; for (i = 0; i < 6; i++) { int val, pred; val = (s->block_last_index[i] >= 1); cbp |= val << (5 - i); if (i < 4) { /* predict value for close blocks only for luma */ pred = ff_msmpeg4_coded_block_pred(s, i, &coded_block); *coded_block = val; val = val ^ pred; } coded_cbp |= val << (5 - i); } #if 0 if (coded_cbp) printf("cbp=%x %x\n", cbp, coded_cbp); #endif if(s->msmpeg4_version<=2){ if (s->pict_type == FF_I_TYPE) { put_bits(&s->pb, v2_intra_cbpc[cbp&3][1], v2_intra_cbpc[cbp&3][0]); } else { if (s->use_skip_mb_code) put_bits(&s->pb, 1, 0); /* mb coded */ put_bits(&s->pb, v2_mb_type[(cbp&3) + 4][1], v2_mb_type[(cbp&3) + 4][0]); } put_bits(&s->pb, 1, 0); /* no AC prediction yet */ put_bits(&s->pb, ff_h263_cbpy_tab[cbp>>2][1], ff_h263_cbpy_tab[cbp>>2][0]); }else{ if (s->pict_type == FF_I_TYPE) { put_bits(&s->pb, ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]); } else { if (s->use_skip_mb_code) put_bits(&s->pb, 1, 0); /* mb coded */ put_bits(&s->pb, table_mb_non_intra[cbp][1], table_mb_non_intra[cbp][0]); } put_bits(&s->pb, 1, 0); /* no AC prediction yet */ if(s->inter_intra_pred){ s->h263_aic_dir=0; put_bits(&s->pb, table_inter_intra[s->h263_aic_dir][1], table_inter_intra[s->h263_aic_dir][0]); } } s->misc_bits += get_bits_diff(s); for (i = 0; i < 6; i++) { ff_msmpeg4_encode_block(s, block[i], i); } s->i_tex_bits += get_bits_diff(s); s->i_count++; } } #endif //CONFIG_ENCODERS static inline int msmpeg4v1_pred_dc(MpegEncContext * s, int n, int32_t **dc_val_ptr) { int i; if (n < 4) { i= 0; } else { i= n-3; } *dc_val_ptr= &s->last_dc[i]; return s->last_dc[i]; } static int get_dc(uint8_t *src, int stride, int scale) { int y; int sum=0; for(y=0; y<8; y++){ int x; for(x=0; x<8; x++){ sum+=src[x + y*stride]; } } return FASTDIV((sum + (scale>>1)), scale); } /* dir = 0: left, dir = 1: top prediction */ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr, int *dir_ptr) { int a, b, c, wrap, pred, scale; int16_t *dc_val; /* find prediction */ if (n < 4) { scale = s->y_dc_scale; } else { scale = s->c_dc_scale; } wrap = s->block_wrap[n]; dc_val= s->dc_val[0] + s->block_index[n]; /* B C * A X */ a = dc_val[ - 1]; b = dc_val[ - 1 - wrap]; c = dc_val[ - wrap]; if(s->first_slice_line && (n&2)==0 && s->msmpeg4_version<4){ b=c=1024; } /* XXX: the following solution consumes divisions, but it does not necessitate to modify mpegvideo.c. The problem comes from the fact they decided to store the quantized DC (which would lead to problems if Q could vary !) */ #if ARCH_X86 && HAVE_7REGS && HAVE_EBX_AVAILABLE __asm__ volatile( "movl %3, %%eax \n\t" "shrl $1, %%eax \n\t" "addl %%eax, %2 \n\t" "addl %%eax, %1 \n\t" "addl %0, %%eax \n\t" "mull %4 \n\t" "movl %%edx, %0 \n\t" "movl %1, %%eax \n\t" "mull %4 \n\t" "movl %%edx, %1 \n\t" "movl %2, %%eax \n\t" "mull %4 \n\t" "movl %%edx, %2 \n\t" : "+b" (a), "+c" (b), "+D" (c) : "g" (scale), "S" (ff_inverse[scale]) : "%eax", "%edx" ); #else /* #elif ARCH_ALPHA */ /* Divisions are extremely costly on Alpha; optimize the most common case. But they are costly everywhere... */ if (scale == 8) { a = (a + (8 >> 1)) / 8; b = (b + (8 >> 1)) / 8; c = (c + (8 >> 1)) / 8; } else { a = FASTDIV((a + (scale >> 1)), scale); b = FASTDIV((b + (scale >> 1)), scale); c = FASTDIV((c + (scale >> 1)), scale); } #endif /* XXX: WARNING: they did not choose the same test as MPEG4. This is very important ! */ if(s->msmpeg4_version>3){ if(s->inter_intra_pred){ uint8_t *dest; int wrap; if(n==1){ pred=a; *dir_ptr = 0; }else if(n==2){ pred=c; *dir_ptr = 1; }else if(n==3){ if (abs(a - b) < abs(b - c)) { pred = c; *dir_ptr = 1; } else { pred = a; *dir_ptr = 0; } }else{ if(n<4){ wrap= s->linesize; dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8; }else{ wrap= s->uvlinesize; dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8; } if(s->mb_x==0) a= (1024 + (scale>>1))/scale; else a= get_dc(dest-8, wrap, scale*8); if(s->mb_y==0) c= (1024 + (scale>>1))/scale; else c= get_dc(dest-8*wrap, wrap, scale*8); if (s->h263_aic_dir==0) { pred= a; *dir_ptr = 0; }else if (s->h263_aic_dir==1) { if(n==0){ pred= c; *dir_ptr = 1; }else{ pred= a; *dir_ptr = 0; } }else if (s->h263_aic_dir==2) { if(n==0){ pred= a; *dir_ptr = 0; }else{ pred= c; *dir_ptr = 1; } } else { pred= c; *dir_ptr = 1; } } }else{ if (abs(a - b) < abs(b - c)) { pred = c; *dir_ptr = 1; } else { pred = a; *dir_ptr = 0; } } }else{ if (abs(a - b) <= abs(b - c)) { pred = c; *dir_ptr = 1; } else { pred = a; *dir_ptr = 0; } } /* update predictor */ *dc_val_ptr = &dc_val[0]; return pred; } #define DC_MAX 119 static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr) { int sign, code; int pred, extquant; int extrabits = 0; if(s->msmpeg4_version==1){ int32_t *dc_val; pred = msmpeg4v1_pred_dc(s, n, &dc_val); /* update predictor */ *dc_val= level; }else{ int16_t *dc_val; pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr); /* update predictor */ if (n < 4) { *dc_val = level * s->y_dc_scale; } else { *dc_val = level * s->c_dc_scale; } } /* do the prediction */ level -= pred; if(s->msmpeg4_version<=2){ if (n < 4) { put_bits(&s->pb, v2_dc_lum_table[level+256][1], v2_dc_lum_table[level+256][0]); }else{ put_bits(&s->pb, v2_dc_chroma_table[level+256][1], v2_dc_chroma_table[level+256][0]); } }else{ sign = 0; if (level < 0) { level = -level; sign = 1; } code = level; if (code > DC_MAX) code = DC_MAX; else if( s->msmpeg4_version>=6 ) { if( s->qscale == 1 ) { extquant = (level + 3) & 0x3; code = ((level+3)>>2); } else if( s->qscale == 2 ) { extquant = (level + 1) & 0x1; code = ((level+1)>>1); } } if (s->dc_table_index == 0) { if (n < 4) { put_bits(&s->pb, ff_table0_dc_lum[code][1], ff_table0_dc_lum[code][0]); } else { put_bits(&s->pb, ff_table0_dc_chroma[code][1], ff_table0_dc_chroma[code][0]); } } else { if (n < 4) { put_bits(&s->pb, ff_table1_dc_lum[code][1], ff_table1_dc_lum[code][0]); } else { put_bits(&s->pb, ff_table1_dc_chroma[code][1], ff_table1_dc_chroma[code][0]); } } if(s->msmpeg4_version>=6 && s->qscale<=2) extrabits = 3 - s->qscale; if (code == DC_MAX) put_bits(&s->pb, 8 + extrabits, level); else if(extrabits > 0)//== VC1 && s->qscale<=2 put_bits(&s->pb, extrabits, extquant); if (level != 0) { put_bits(&s->pb, 1, sign); } } } /* Encoding of a block. Very similar to MPEG4 except for a different escape coding (same as H263) and more vlc tables. */ void ff_msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n) { int level, run, last, i, j, last_index; int last_non_zero, sign, slevel; int code, run_diff, dc_pred_dir; const RLTable *rl; const uint8_t *scantable; if (s->mb_intra) { msmpeg4_encode_dc(s, block[0], n, &dc_pred_dir); i = 1; if (n < 4) { rl = &rl_table[s->rl_table_index]; } else { rl = &rl_table[3 + s->rl_chroma_table_index]; } run_diff = s->msmpeg4_version>=4; scantable= s->intra_scantable.permutated; } else { i = 0; rl = &rl_table[3 + s->rl_table_index]; if(s->msmpeg4_version<=2) run_diff = 0; else run_diff = 1; scantable= s->inter_scantable.permutated; } /* recalculate block_last_index for M$ wmv1 */ if(s->msmpeg4_version>=4 && s->msmpeg4_version<6 && s->block_last_index[n]>0){ for(last_index=63; last_index>=0; last_index--){ if(block[scantable[last_index]]) break; } s->block_last_index[n]= last_index; }else last_index = s->block_last_index[n]; /* AC coefs */ last_non_zero = i - 1; for (; i <= last_index; i++) { j = scantable[i]; level = block[j]; if (level) { run = i - last_non_zero - 1; last = (i == last_index); sign = 0; slevel = level; if (level < 0) { sign = 1; level = -level; } if(level<=MAX_LEVEL && run<=MAX_RUN){ s->ac_stats[s->mb_intra][n>3][level][run][last]++; } #if 0 else s->ac_stats[s->mb_intra][n>3][40][63][0]++; //esc3 like #endif code = get_rl_index(rl, last, run, level); put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); if (code == rl->n) { int level1, run1; level1 = level - rl->max_level[last][run]; if (level1 < 1) goto esc2; code = get_rl_index(rl, last, run, level1); if (code == rl->n) { esc2: put_bits(&s->pb, 1, 0); if (level > MAX_LEVEL) goto esc3; run1 = run - rl->max_run[last][level] - run_diff; if (run1 < 0) goto esc3; code = get_rl_index(rl, last, run1+1, level); if (s->msmpeg4_version == 4 && code == rl->n) goto esc3; code = get_rl_index(rl, last, run1, level); if (code == rl->n) { esc3: /* third escape */ put_bits(&s->pb, 1, 0); put_bits(&s->pb, 1, last); if(s->msmpeg4_version>=4){ if(s->esc3_level_length==0){ s->esc3_level_length=8; s->esc3_run_length= 6; //ESCLVLSZ + ESCRUNSZ if(s->qscale<8) put_bits(&s->pb, 6 + (s->msmpeg4_version>=6), 3); else put_bits(&s->pb, 8, 3); } put_bits(&s->pb, s->esc3_run_length, run); put_bits(&s->pb, 1, sign); put_bits(&s->pb, s->esc3_level_length, level); }else{ put_bits(&s->pb, 6, run); put_sbits(&s->pb, 8, slevel); } } else { /* second escape */ put_bits(&s->pb, 1, 1); put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); put_bits(&s->pb, 1, sign); } } else { /* first escape */ put_bits(&s->pb, 1, 1); put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); put_bits(&s->pb, 1, sign); } } else { put_bits(&s->pb, 1, sign); } last_non_zero = i; } } } /****************************************/ /* decoding stuff */ VLC ff_mb_non_intra_vlc[4]; static VLC v2_dc_lum_vlc; static VLC v2_dc_chroma_vlc; static VLC v2_intra_cbpc_vlc; static VLC v2_mb_type_vlc; static VLC v2_mv_vlc; VLC ff_inter_intra_vlc; /* This is identical to h263 except that its range is multiplied by 2. */ static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code) { int code, val, sign, shift; code = get_vlc2(&s->gb, v2_mv_vlc.table, V2_MV_VLC_BITS, 2); // printf("MV code %d at %d %d pred: %d\n", code, s->mb_x,s->mb_y, pred); if (code < 0) return 0xffff; if (code == 0) return pred; sign = get_bits1(&s->gb); shift = f_code - 1; val = code; if (shift) { val = (val - 1) << shift; val |= get_bits(&s->gb, shift); val++; } if (sign) val = -val; val += pred; if (val <= -64) val += 64; else if (val >= 64) val -= 64; return val; } static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { int cbp, code, i; if (s->pict_type == FF_P_TYPE) { if (s->use_skip_mb_code) { if (get_bits1(&s->gb)) { /* skip mb */ s->mb_intra = 0; for(i=0;i<6;i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = 1; return 0; } } if(s->msmpeg4_version==2) code = get_vlc2(&s->gb, v2_mb_type_vlc.table, V2_MB_TYPE_VLC_BITS, 1); else code = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); if(code<0 || code>7){ av_log(s->avctx, AV_LOG_ERROR, "cbpc %d invalid at %d %d\n", code, s->mb_x, s->mb_y); return -1; } s->mb_intra = code >>2; cbp = code & 0x3; } else { s->mb_intra = 1; if(s->msmpeg4_version==2) cbp= get_vlc2(&s->gb, v2_intra_cbpc_vlc.table, V2_INTRA_CBPC_VLC_BITS, 1); else cbp= get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 1); if(cbp<0 || cbp>3){ av_log(s->avctx, AV_LOG_ERROR, "cbpc %d invalid at %d %d\n", cbp, s->mb_x, s->mb_y); return -1; } } if (!s->mb_intra) { int mx, my, cbpy; cbpy= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if(cbpy<0){ av_log(s->avctx, AV_LOG_ERROR, "cbpy %d invalid at %d %d\n", cbp, s->mb_x, s->mb_y); return -1; } cbp|= cbpy<<2; if(s->msmpeg4_version==1 || (cbp&3) != 3) cbp^= 0x3C; h263_pred_motion(s, 0, 0, &mx, &my); mx= msmpeg4v2_decode_motion(s, mx, 1); my= msmpeg4v2_decode_motion(s, my, 1); s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; } else { if(s->msmpeg4_version==2){ s->ac_pred = get_bits1(&s->gb); cbp|= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1)<<2; //FIXME check errors } else{ s->ac_pred = 0; cbp|= get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1)<<2; //FIXME check errors if(s->pict_type==FF_P_TYPE) cbp^=0x3C; } } s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (ff_msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); return -1; } } return 0; } static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { int cbp, code, i; uint8_t *coded_val; uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]; if (s->pict_type == FF_P_TYPE) { if (s->use_skip_mb_code) { if (get_bits1(&s->gb)) { /* skip mb */ s->mb_intra = 0; for(i=0;i<6;i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = 1; *mb_type_ptr = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; return 0; } } code = get_vlc2(&s->gb, ff_mb_non_intra_vlc[DEFAULT_INTER_INDEX].table, MB_NON_INTRA_VLC_BITS, 3); if (code < 0) return -1; //s->mb_intra = (code & 0x40) ? 0 : 1; s->mb_intra = (~code & 0x40) >> 6; cbp = code & 0x3f; } else { s->mb_intra = 1; code = get_vlc2(&s->gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2); if (code < 0) return -1; /* predict coded block pattern */ cbp = 0; for(i=0;i<6;i++) { int val = ((code >> (5 - i)) & 1); if (i < 4) { int pred = ff_msmpeg4_coded_block_pred(s, i, &coded_val); val = val ^ pred; *coded_val = val; } cbp |= val << (5 - i); } } if (!s->mb_intra) { int mx, my; //printf("P at %d %d\n", s->mb_x, s->mb_y); if(s->per_mb_rl_table && cbp){ s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; } h263_pred_motion(s, 0, 0, &mx, &my); if (ff_msmpeg4_decode_motion(s, &mx, &my) < 0) return -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; *mb_type_ptr = MB_TYPE_L0 | MB_TYPE_16x16; } else { //printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24)); s->ac_pred = get_bits1(&s->gb); *mb_type_ptr = MB_TYPE_INTRA; if(s->inter_intra_pred){ s->h263_aic_dir= get_vlc2(&s->gb, ff_inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1); // printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y); } if(s->per_mb_rl_table && cbp){ s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; } } s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (ff_msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); return -1; } } return 0; } /* init all vlc decoding tables */ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; static int done = 0; int i; MVTable *mv; ff_h263_decode_init(avctx); common_init(s); if (!done) { done = 1; for(i=0;i<NB_RL_TABLES;i++) { init_rl(&rl_table[i], static_rl_table_store[i]); } INIT_VLC_RL(rl_table[0], 642); INIT_VLC_RL(rl_table[1], 1104); INIT_VLC_RL(rl_table[2], 554); INIT_VLC_RL(rl_table[3], 940); INIT_VLC_RL(rl_table[4], 962); INIT_VLC_RL(rl_table[5], 554); mv = &mv_tables[0]; INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1, mv->table_mv_bits, 1, 1, mv->table_mv_code, 2, 2, 3714); mv = &mv_tables[1]; INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1, mv->table_mv_bits, 1, 1, mv->table_mv_code, 2, 2, 2694); INIT_VLC_STATIC(&ff_msmp4_dc_luma_vlc[0], DC_VLC_BITS, 120, &ff_table0_dc_lum[0][1], 8, 4, &ff_table0_dc_lum[0][0], 8, 4, 1158); INIT_VLC_STATIC(&ff_msmp4_dc_chroma_vlc[0], DC_VLC_BITS, 120, &ff_table0_dc_chroma[0][1], 8, 4, &ff_table0_dc_chroma[0][0], 8, 4, 1118); INIT_VLC_STATIC(&ff_msmp4_dc_luma_vlc[1], DC_VLC_BITS, 120, &ff_table1_dc_lum[0][1], 8, 4, &ff_table1_dc_lum[0][0], 8, 4, 1476); INIT_VLC_STATIC(&ff_msmp4_dc_chroma_vlc[1], DC_VLC_BITS, 120, &ff_table1_dc_chroma[0][1], 8, 4, &ff_table1_dc_chroma[0][0], 8, 4, 1216); INIT_VLC_STATIC(&v2_dc_lum_vlc, DC_VLC_BITS, 512, &v2_dc_lum_table[0][1], 8, 4, &v2_dc_lum_table[0][0], 8, 4, 1472); INIT_VLC_STATIC(&v2_dc_chroma_vlc, DC_VLC_BITS, 512, &v2_dc_chroma_table[0][1], 8, 4, &v2_dc_chroma_table[0][0], 8, 4, 1506); INIT_VLC_STATIC(&v2_intra_cbpc_vlc, V2_INTRA_CBPC_VLC_BITS, 4, &v2_intra_cbpc[0][1], 2, 1, &v2_intra_cbpc[0][0], 2, 1, 8); INIT_VLC_STATIC(&v2_mb_type_vlc, V2_MB_TYPE_VLC_BITS, 8, &v2_mb_type[0][1], 2, 1, &v2_mb_type[0][0], 2, 1, 128); INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33, &mvtab[0][1], 2, 1, &mvtab[0][0], 2, 1, 538); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[0][0][1], 8, 4, &wmv2_inter_table[0][0][0], 8, 4, 1636); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[1], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[1][0][1], 8, 4, &wmv2_inter_table[1][0][0], 8, 4, 2648); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[2], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[2][0][1], 8, 4, &wmv2_inter_table[2][0][0], 8, 4, 1532); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[3], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[3][0][1], 8, 4, &wmv2_inter_table[3][0][0], 8, 4, 2488); INIT_VLC_STATIC(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64, &ff_msmp4_mb_i_table[0][1], 4, 2, &ff_msmp4_mb_i_table[0][0], 4, 2, 536); INIT_VLC_STATIC(&ff_inter_intra_vlc, INTER_INTRA_VLC_BITS, 4, &table_inter_intra[0][1], 2, 1, &table_inter_intra[0][0], 2, 1, 8); } switch(s->msmpeg4_version){ case 1: case 2: s->decode_mb= msmpeg4v12_decode_mb; break; case 3: case 4: s->decode_mb= msmpeg4v34_decode_mb; break; case 5: if (CONFIG_WMV2_DECODER) s->decode_mb= ff_wmv2_decode_mb; case 6: //FIXME + TODO VC1 decode mb break; } s->slice_height= s->mb_height; //to avoid 1/0 if the first frame is not a keyframe return 0; } int msmpeg4_decode_picture_header(MpegEncContext * s) { int code; #if 0 { int i; for(i=0; i<s->gb.size_in_bits; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb)); // get_bits1(&s->gb); av_log(s->avctx, AV_LOG_DEBUG, "END\n"); return -1; } #endif if(s->msmpeg4_version==1){ int start_code; start_code = (get_bits(&s->gb, 16)<<16) | get_bits(&s->gb, 16); if(start_code!=0x00000100){ av_log(s->avctx, AV_LOG_ERROR, "invalid startcode\n"); return -1; } skip_bits(&s->gb, 5); // frame number */ } s->pict_type = get_bits(&s->gb, 2) + 1; if (s->pict_type != FF_I_TYPE && s->pict_type != FF_P_TYPE){ av_log(s->avctx, AV_LOG_ERROR, "invalid picture type\n"); return -1; } #if 0 { static int had_i=0; if(s->pict_type == FF_I_TYPE) had_i=1; if(!had_i) return -1; } #endif s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); if(s->qscale==0){ av_log(s->avctx, AV_LOG_ERROR, "invalid qscale\n"); return -1; } if (s->pict_type == FF_I_TYPE) { code = get_bits(&s->gb, 5); if(s->msmpeg4_version==1){ if(code==0 || code>s->mb_height){ av_log(s->avctx, AV_LOG_ERROR, "invalid slice height %d\n", code); return -1; } s->slice_height = code; }else{ /* 0x17: one slice, 0x18: two slices, ... */ if (code < 0x17){ av_log(s->avctx, AV_LOG_ERROR, "error, slice code was %X\n", code); return -1; } s->slice_height = s->mb_height / (code - 0x16); } switch(s->msmpeg4_version){ case 1: case 2: s->rl_chroma_table_index = 2; s->rl_table_index = 2; s->dc_table_index = 0; //not used break; case 3: s->rl_chroma_table_index = decode012(&s->gb); s->rl_table_index = decode012(&s->gb); s->dc_table_index = get_bits1(&s->gb); break; case 4: msmpeg4_decode_ext_header(s, (2+5+5+17+7)/8); if(s->bit_rate > MBAC_BITRATE) s->per_mb_rl_table= get_bits1(&s->gb); else s->per_mb_rl_table= 0; if(!s->per_mb_rl_table){ s->rl_chroma_table_index = decode012(&s->gb); s->rl_table_index = decode012(&s->gb); } s->dc_table_index = get_bits1(&s->gb); s->inter_intra_pred= 0; break; } s->no_rounding = 1; if(s->avctx->debug&FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d slice:%d \n", s->qscale, s->rl_chroma_table_index, s->rl_table_index, s->dc_table_index, s->per_mb_rl_table, s->slice_height); } else { switch(s->msmpeg4_version){ case 1: case 2: if(s->msmpeg4_version==1) s->use_skip_mb_code = 1; else s->use_skip_mb_code = get_bits1(&s->gb); s->rl_table_index = 2; s->rl_chroma_table_index = s->rl_table_index; s->dc_table_index = 0; //not used s->mv_table_index = 0; break; case 3: s->use_skip_mb_code = get_bits1(&s->gb); s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; s->dc_table_index = get_bits1(&s->gb); s->mv_table_index = get_bits1(&s->gb); break; case 4: s->use_skip_mb_code = get_bits1(&s->gb); if(s->bit_rate > MBAC_BITRATE) s->per_mb_rl_table= get_bits1(&s->gb); else s->per_mb_rl_table= 0; if(!s->per_mb_rl_table){ s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; } s->dc_table_index = get_bits1(&s->gb); s->mv_table_index = get_bits1(&s->gb); s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE); break; } if(s->avctx->debug&FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "skip:%d rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d \n", s->use_skip_mb_code, s->rl_table_index, s->rl_chroma_table_index, s->dc_table_index, s->mv_table_index, s->per_mb_rl_table, s->qscale); if(s->flipflop_rounding){ s->no_rounding ^= 1; }else{ s->no_rounding = 0; } } //printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height); s->esc3_level_length= 0; s->esc3_run_length= 0; return 0; } int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size) { int left= buf_size*8 - get_bits_count(&s->gb); int length= s->msmpeg4_version>=3 ? 17 : 16; /* the alt_bitstream reader could read over the end so we need to check it */ if(left>=length && left<length+8) { int fps; fps= get_bits(&s->gb, 5); s->bit_rate= get_bits(&s->gb, 11)*1024; if(s->msmpeg4_version>=3) s->flipflop_rounding= get_bits1(&s->gb); else s->flipflop_rounding= 0; // printf("fps:%2d bps:%2d roundingType:%1d\n", fps, s->bit_rate/1024, s->flipflop_rounding); } else if(left<length+8) { s->flipflop_rounding= 0; if(s->msmpeg4_version != 2) av_log(s->avctx, AV_LOG_ERROR, "ext header missing, %d left\n", left); } else { av_log(s->avctx, AV_LOG_ERROR, "I frame too long, ignoring ext header\n"); } return 0; } static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr) { int level, pred; if(s->msmpeg4_version<=2){ if (n < 4) { level = get_vlc2(&s->gb, v2_dc_lum_vlc.table, DC_VLC_BITS, 3); } else { level = get_vlc2(&s->gb, v2_dc_chroma_vlc.table, DC_VLC_BITS, 3); } if (level < 0) return -1; level-=256; }else{ //FIXME optimize use unified tables & index if (n < 4) { level = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); } else { level = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); } if (level < 0){ av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n"); return -1; } if (level == DC_MAX) { level = get_bits(&s->gb, 8); if (get_bits1(&s->gb)) level = -level; } else if (level != 0) { if (get_bits1(&s->gb)) level = -level; } } if(s->msmpeg4_version==1){ int32_t *dc_val; pred = msmpeg4v1_pred_dc(s, n, &dc_val); level += pred; /* update predictor */ *dc_val= level; }else{ int16_t *dc_val; pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr); level += pred; /* update predictor */ if (n < 4) { *dc_val = level * s->y_dc_scale; } else { *dc_val = level * s->c_dc_scale; } } return level; } //#define ERROR_DETAILS int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block, int n, int coded, const uint8_t *scan_table) { int level, i, last, run, run_diff; int av_uninit(dc_pred_dir); RLTable *rl; RL_VLC_ELEM *rl_vlc; int qmul, qadd; if (s->mb_intra) { qmul=1; qadd=0; /* DC coef */ level = msmpeg4_decode_dc(s, n, &dc_pred_dir); if (level < 0){ av_log(s->avctx, AV_LOG_ERROR, "dc overflow- block: %d qscale: %d//\n", n, s->qscale); if(s->inter_intra_pred) level=0; else return -1; } if (n < 4) { rl = &rl_table[s->rl_table_index]; if(level > 256*s->y_dc_scale){ av_log(s->avctx, AV_LOG_ERROR, "dc overflow+ L qscale: %d//\n", s->qscale); if(!s->inter_intra_pred) return -1; } } else { rl = &rl_table[3 + s->rl_chroma_table_index]; if(level > 256*s->c_dc_scale){ av_log(s->avctx, AV_LOG_ERROR, "dc overflow+ C qscale: %d//\n", s->qscale); if(!s->inter_intra_pred) return -1; } } block[0] = level; run_diff = s->msmpeg4_version >= 4; i = 0; if (!coded) { goto not_coded; } if (s->ac_pred) { if (dc_pred_dir == 0) scan_table = s->intra_v_scantable.permutated; /* left */ else scan_table = s->intra_h_scantable.permutated; /* top */ } else { scan_table = s->intra_scantable.permutated; } rl_vlc= rl->rl_vlc[0]; } else { qmul = s->qscale << 1; qadd = (s->qscale - 1) | 1; i = -1; rl = &rl_table[3 + s->rl_table_index]; if(s->msmpeg4_version==2) run_diff = 0; else run_diff = 1; if (!coded) { s->block_last_index[n] = i; return 0; } if(!scan_table) scan_table = s->inter_scantable.permutated; rl_vlc= rl->rl_vlc[s->qscale]; } { OPEN_READER(re, &s->gb); for(;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0); if (level==0) { int cache; cache= GET_CACHE(re, &s->gb); /* escape */ if (s->msmpeg4_version==1 || (cache&0x80000000)==0) { if (s->msmpeg4_version==1 || (cache&0x40000000)==0) { /* third escape */ if(s->msmpeg4_version!=1) LAST_SKIP_BITS(re, &s->gb, 2); UPDATE_CACHE(re, &s->gb); if(s->msmpeg4_version<=3){ last= SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1); run= SHOW_UBITS(re, &s->gb, 6); SKIP_CACHE(re, &s->gb, 6); level= SHOW_SBITS(re, &s->gb, 8); LAST_SKIP_CACHE(re, &s->gb, 8); SKIP_COUNTER(re, &s->gb, 1+6+8); }else{ int sign; last= SHOW_UBITS(re, &s->gb, 1); SKIP_BITS(re, &s->gb, 1); if(!s->esc3_level_length){ int ll; //printf("ESC-3 %X at %d %d\n", show_bits(&s->gb, 24), s->mb_x, s->mb_y); if(s->qscale<8){ ll= SHOW_UBITS(re, &s->gb, 3); SKIP_BITS(re, &s->gb, 3); if(ll==0){ ll= 8+SHOW_UBITS(re, &s->gb, 1); SKIP_BITS(re, &s->gb, 1); } }else{ ll=2; while(ll<8 && SHOW_UBITS(re, &s->gb, 1)==0){ ll++; SKIP_BITS(re, &s->gb, 1); } if(ll<8) SKIP_BITS(re, &s->gb, 1); } s->esc3_level_length= ll; s->esc3_run_length= SHOW_UBITS(re, &s->gb, 2) + 3; SKIP_BITS(re, &s->gb, 2); //printf("level length:%d, run length: %d\n", ll, s->esc3_run_length); UPDATE_CACHE(re, &s->gb); } run= SHOW_UBITS(re, &s->gb, s->esc3_run_length); SKIP_BITS(re, &s->gb, s->esc3_run_length); sign= SHOW_UBITS(re, &s->gb, 1); SKIP_BITS(re, &s->gb, 1); level= SHOW_UBITS(re, &s->gb, s->esc3_level_length); SKIP_BITS(re, &s->gb, s->esc3_level_length); if(sign) level= -level; } //printf("level: %d, run: %d at %d %d\n", level, run, s->mb_x, s->mb_y); #if 0 // waste of time / this will detect very few errors { const int abs_level= FFABS(level); const int run1= run - rl->max_run[last][abs_level] - run_diff; if(abs_level<=MAX_LEVEL && run<=MAX_RUN){ if(abs_level <= rl->max_level[last][run]){ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n"); return DECODING_AC_LOST; } if(abs_level <= rl->max_level[last][run]*2){ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n"); return DECODING_AC_LOST; } if(run1>=0 && abs_level <= rl->max_level[last][run1]){ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n"); return DECODING_AC_LOST; } } } #endif //level = level * qmul + (level>0) * qadd - (level<=0) * qadd ; if (level>0) level= level * qmul + qadd; else level= level * qmul - qadd; #if 0 // waste of time too :( if(level>2048 || level<-2048){ av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc\n"); return DECODING_AC_LOST; } #endif i+= run + 1; if(last) i+=192; #ifdef ERROR_DETAILS if(run==66) av_log(s->avctx, AV_LOG_ERROR, "illegal vlc code in ESC3 level=%d\n", level); else if((i>62 && i<192) || i>192+63) av_log(s->avctx, AV_LOG_ERROR, "run overflow in ESC3 i=%d run=%d level=%d\n", i, run, level); #endif } else { /* second escape */ #if MIN_CACHE_BITS < 23 LAST_SKIP_BITS(re, &s->gb, 2); UPDATE_CACHE(re, &s->gb); #else SKIP_BITS(re, &s->gb, 2); #endif GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1); i+= run + rl->max_run[run>>7][level/qmul] + run_diff; //FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); #ifdef ERROR_DETAILS if(run==66) av_log(s->avctx, AV_LOG_ERROR, "illegal vlc code in ESC2 level=%d\n", level); else if((i>62 && i<192) || i>192+63) av_log(s->avctx, AV_LOG_ERROR, "run overflow in ESC2 i=%d run=%d level=%d\n", i, run, level); #endif } } else { /* first escape */ #if MIN_CACHE_BITS < 22 LAST_SKIP_BITS(re, &s->gb, 1); UPDATE_CACHE(re, &s->gb); #else SKIP_BITS(re, &s->gb, 1); #endif GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1); i+= run; level = level + rl->max_level[run>>7][(run-1)&63] * qmul;//FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); #ifdef ERROR_DETAILS if(run==66) av_log(s->avctx, AV_LOG_ERROR, "illegal vlc code in ESC1 level=%d\n", level); else if((i>62 && i<192) || i>192+63) av_log(s->avctx, AV_LOG_ERROR, "run overflow in ESC1 i=%d run=%d level=%d\n", i, run, level); #endif } } else { i+= run; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); #ifdef ERROR_DETAILS if(run==66) av_log(s->avctx, AV_LOG_ERROR, "illegal vlc code level=%d\n", level); else if((i>62 && i<192) || i>192+63) av_log(s->avctx, AV_LOG_ERROR, "run overflow i=%d run=%d level=%d\n", i, run, level); #endif } if (i > 62){ i-= 192; if(i&(~63)){ const int left= get_bits_left(&s->gb); if(((i+192 == 64 && level/qmul==-1) || s->error_recognition<=1) && left>=0){ av_log(s->avctx, AV_LOG_ERROR, "ignoring overflow at %d %d\n", s->mb_x, s->mb_y); break; }else{ av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } } block[scan_table[i]] = level; break; } block[scan_table[i]] = level; } CLOSE_READER(re, &s->gb); } not_coded: if (s->mb_intra) { mpeg4_pred_ac(s, block, n, dc_pred_dir); if (s->ac_pred) { i = 63; /* XXX: not optimal */ } } if(s->msmpeg4_version>=4 && i>0) i=63; //FIXME/XXX optimize s->block_last_index[n] = i; return 0; } int ff_msmpeg4_decode_motion(MpegEncContext * s, int *mx_ptr, int *my_ptr) { MVTable *mv; int code, mx, my; mv = &mv_tables[s->mv_table_index]; code = get_vlc2(&s->gb, mv->vlc.table, MV_VLC_BITS, 2); if (code < 0){ av_log(s->avctx, AV_LOG_ERROR, "illegal MV code at %d %d\n", s->mb_x, s->mb_y); return -1; } if (code == mv->n) { //printf("MV ESC %X at %d %d\n", show_bits(&s->gb, 24), s->mb_x, s->mb_y); mx = get_bits(&s->gb, 6); my = get_bits(&s->gb, 6); } else { mx = mv->table_mvx[code]; my = mv->table_mvy[code]; } mx += *mx_ptr - 32; my += *my_ptr - 32; /* WARNING : they do not do exactly modulo encoding */ if (mx <= -64) mx += 64; else if (mx >= 64) mx -= 64; if (my <= -64) my += 64; else if (my >= 64) my -= 64; *mx_ptr = mx; *my_ptr = my; return 0; } AVCodec msmpeg4v1_decoder = { "msmpeg4v1", AVMEDIA_TYPE_VIDEO, CODEC_ID_MSMPEG4V1, sizeof(MpegEncContext), ff_msmpeg4_decode_init, NULL, ff_h263_decode_end, ff_h263_decode_frame, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"), .pix_fmts= ff_pixfmt_list_420, }; AVCodec msmpeg4v2_decoder = { "msmpeg4v2", AVMEDIA_TYPE_VIDEO, CODEC_ID_MSMPEG4V2, sizeof(MpegEncContext), ff_msmpeg4_decode_init, NULL, ff_h263_decode_end, ff_h263_decode_frame, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), .pix_fmts= ff_pixfmt_list_420, }; AVCodec msmpeg4v3_decoder = { "msmpeg4", AVMEDIA_TYPE_VIDEO, CODEC_ID_MSMPEG4V3, sizeof(MpegEncContext), ff_msmpeg4_decode_init, NULL, ff_h263_decode_end, ff_h263_decode_frame, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), .pix_fmts= ff_pixfmt_list_420, }; AVCodec wmv1_decoder = { "wmv1", AVMEDIA_TYPE_VIDEO, CODEC_ID_WMV1, sizeof(MpegEncContext), ff_msmpeg4_decode_init, NULL, ff_h263_decode_end, ff_h263_decode_frame, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"), .pix_fmts= ff_pixfmt_list_420, };
123linslouis-android-video-cutter
jni/libavcodec/msmpeg4.c
C
asf20
62,709
/* * RV40 decoder * Copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * RV40 decoder */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "golomb.h" #include "rv34.h" #include "rv40vlc2.h" #include "rv40data.h" static VLC aic_top_vlc; static VLC aic_mode1_vlc[AIC_MODE1_NUM], aic_mode2_vlc[AIC_MODE2_NUM]; static VLC ptype_vlc[NUM_PTYPE_VLCS], btype_vlc[NUM_BTYPE_VLCS]; static const int16_t mode2_offs[] = { 0, 614, 1222, 1794, 2410, 3014, 3586, 4202, 4792, 5382, 5966, 6542, 7138, 7716, 8292, 8864, 9444, 10030, 10642, 11212, 11814 }; /** * Initialize all tables. */ static av_cold void rv40_init_tables(void) { int i; static VLC_TYPE aic_table[1 << AIC_TOP_BITS][2]; static VLC_TYPE aic_mode1_table[AIC_MODE1_NUM << AIC_MODE1_BITS][2]; static VLC_TYPE aic_mode2_table[11814][2]; static VLC_TYPE ptype_table[NUM_PTYPE_VLCS << PTYPE_VLC_BITS][2]; static VLC_TYPE btype_table[NUM_BTYPE_VLCS << BTYPE_VLC_BITS][2]; aic_top_vlc.table = aic_table; aic_top_vlc.table_allocated = 1 << AIC_TOP_BITS; init_vlc(&aic_top_vlc, AIC_TOP_BITS, AIC_TOP_SIZE, rv40_aic_top_vlc_bits, 1, 1, rv40_aic_top_vlc_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); for(i = 0; i < AIC_MODE1_NUM; i++){ // Every tenth VLC table is empty if((i % 10) == 9) continue; aic_mode1_vlc[i].table = &aic_mode1_table[i << AIC_MODE1_BITS]; aic_mode1_vlc[i].table_allocated = 1 << AIC_MODE1_BITS; init_vlc(&aic_mode1_vlc[i], AIC_MODE1_BITS, AIC_MODE1_SIZE, aic_mode1_vlc_bits[i], 1, 1, aic_mode1_vlc_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); } for(i = 0; i < AIC_MODE2_NUM; i++){ aic_mode2_vlc[i].table = &aic_mode2_table[mode2_offs[i]]; aic_mode2_vlc[i].table_allocated = mode2_offs[i + 1] - mode2_offs[i]; init_vlc(&aic_mode2_vlc[i], AIC_MODE2_BITS, AIC_MODE2_SIZE, aic_mode2_vlc_bits[i], 1, 1, aic_mode2_vlc_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC); } for(i = 0; i < NUM_PTYPE_VLCS; i++){ ptype_vlc[i].table = &ptype_table[i << PTYPE_VLC_BITS]; ptype_vlc[i].table_allocated = 1 << PTYPE_VLC_BITS; init_vlc_sparse(&ptype_vlc[i], PTYPE_VLC_BITS, PTYPE_VLC_SIZE, ptype_vlc_bits[i], 1, 1, ptype_vlc_codes[i], 1, 1, ptype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); } for(i = 0; i < NUM_BTYPE_VLCS; i++){ btype_vlc[i].table = &btype_table[i << BTYPE_VLC_BITS]; btype_vlc[i].table_allocated = 1 << BTYPE_VLC_BITS; init_vlc_sparse(&btype_vlc[i], BTYPE_VLC_BITS, BTYPE_VLC_SIZE, btype_vlc_bits[i], 1, 1, btype_vlc_codes[i], 1, 1, btype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); } } /** * Get stored dimension from bitstream. * * If the width/height is the standard one then it's coded as a 3-bit index. * Otherwise it is coded as escaped 8-bit portions. */ static int get_dimension(GetBitContext *gb, const int *dim) { int t = get_bits(gb, 3); int val = dim[t]; if(val < 0) val = dim[get_bits1(gb) - val]; if(!val){ do{ t = get_bits(gb, 8); val += t << 2; }while(t == 0xFF); } return val; } /** * Get encoded picture size - usually this is called from rv40_parse_slice_header. */ static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h) { *w = get_dimension(gb, rv40_standard_widths); *h = get_dimension(gb, rv40_standard_heights); } static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si) { int mb_bits; int w = r->s.width, h = r->s.height; int mb_size; memset(si, 0, sizeof(SliceInfo)); if(get_bits1(gb)) return -1; si->type = get_bits(gb, 2); if(si->type == 1) si->type = 0; si->quant = get_bits(gb, 5); if(get_bits(gb, 2)) return -1; si->vlc_set = get_bits(gb, 2); skip_bits1(gb); si->pts = get_bits(gb, 13); if(!si->type || !get_bits1(gb)) rv40_parse_picture_size(gb, &w, &h); if(avcodec_check_dimensions(r->s.avctx, w, h) < 0) return -1; si->width = w; si->height = h; mb_size = ((w + 15) >> 4) * ((h + 15) >> 4); mb_bits = ff_rv34_get_start_offset(gb, mb_size); si->start = get_bits(gb, mb_bits); return 0; } /** * Decode 4x4 intra types array. */ static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst) { MpegEncContext *s = &r->s; int i, j, k, v; int A, B, C; int pattern; int8_t *ptr; for(i = 0; i < 4; i++, dst += r->intra_types_stride){ if(!i && s->first_slice_line){ pattern = get_vlc2(gb, aic_top_vlc.table, AIC_TOP_BITS, 1); dst[0] = (pattern >> 2) & 2; dst[1] = (pattern >> 1) & 2; dst[2] = pattern & 2; dst[3] = (pattern << 1) & 2; continue; } ptr = dst; for(j = 0; j < 4; j++){ /* Coefficients are read using VLC chosen by the prediction pattern * The first one (used for retrieving a pair of coefficients) is * constructed from the top, top right and left coefficients * The second one (used for retrieving only one coefficient) is * top + 10 * left. */ A = ptr[-r->intra_types_stride + 1]; // it won't be used for the last coefficient in a row B = ptr[-r->intra_types_stride]; C = ptr[-1]; pattern = A + (B << 4) + (C << 8); for(k = 0; k < MODE2_PATTERNS_NUM; k++) if(pattern == rv40_aic_table_index[k]) break; if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients v = get_vlc2(gb, aic_mode2_vlc[k].table, AIC_MODE2_BITS, 2); *ptr++ = v/9; *ptr++ = v%9; j++; }else{ if(B != -1 && C != -1) v = get_vlc2(gb, aic_mode1_vlc[B + C*10].table, AIC_MODE1_BITS, 1); else{ // tricky decoding v = 0; switch(C){ case -1: // code 0 -> 1, 1 -> 0 if(B < 2) v = get_bits1(gb) ^ 1; break; case 0: case 2: // code 0 -> 2, 1 -> 0 v = (get_bits1(gb) ^ 1) << 1; break; } } *ptr++ = v; } } } return 0; } /** * Decode macroblock information. */ static int rv40_decode_mb_info(RV34DecContext *r) { MpegEncContext *s = &r->s; GetBitContext *gb = &s->gb; int q, i; int prev_type = 0; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; int blocks[RV34_MB_TYPES] = {0}; int count = 0; if(!r->s.mb_skip_run) r->s.mb_skip_run = svq3_get_ue_golomb(gb) + 1; if(--r->s.mb_skip_run) return RV34_MB_SKIP; if(r->avail_cache[6-1]) blocks[r->mb_type[mb_pos - 1]]++; if(r->avail_cache[6-4]){ blocks[r->mb_type[mb_pos - s->mb_stride]]++; if(r->avail_cache[6-2]) blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++; if(r->avail_cache[6-5]) blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++; } for(i = 0; i < RV34_MB_TYPES; i++){ if(blocks[i] > count){ count = blocks[i]; prev_type = i; } } if(s->pict_type == FF_P_TYPE){ prev_type = block_num_to_ptype_vlc_num[prev_type]; q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1); if(q < PBTYPE_ESCAPE) return q; q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1); av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n"); }else{ prev_type = block_num_to_btype_vlc_num[prev_type]; q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1); if(q < PBTYPE_ESCAPE) return q; q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1); av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n"); } return 0; } #define CLIP_SYMM(a, b) av_clip(a, -(b), b) /** * weaker deblocking very similar to the one described in 4.4.2 of JVT-A003r1 */ static inline void rv40_weak_loop_filter(uint8_t *src, const int step, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1, const int diff_p1p0, const int diff_q1q0, const int diff_p1p2, const int diff_q1q2) { uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int t, u, diff; t = src[0*step] - src[-1*step]; if(!t) return; u = (alpha * FFABS(t)) >> 7; if(u > 3 - (filter_p1 && filter_q1)) return; t <<= 2; if(filter_p1 && filter_q1) t += src[-2*step] - src[1*step]; diff = CLIP_SYMM((t + 4) >> 3, lim_p0q0); src[-1*step] = cm[src[-1*step] + diff]; src[ 0*step] = cm[src[ 0*step] - diff]; if(FFABS(diff_p1p2) <= beta && filter_p1){ t = (diff_p1p0 + diff_p1p2 - diff) >> 1; src[-2*step] = cm[src[-2*step] - CLIP_SYMM(t, lim_p1)]; } if(FFABS(diff_q1q2) <= beta && filter_q1){ t = (diff_q1q0 + diff_q1q2 + diff) >> 1; src[ 1*step] = cm[src[ 1*step] - CLIP_SYMM(t, lim_q1)]; } } static inline void rv40_adaptive_loop_filter(uint8_t *src, const int step, const int stride, const int dmode, const int lim_q1, const int lim_p1, const int alpha, const int beta, const int beta2, const int chroma, const int edge) { int diff_p1p0[4], diff_q1q0[4], diff_p1p2[4], diff_q1q2[4]; int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0; uint8_t *ptr; int flag_strong0 = 1, flag_strong1 = 1; int filter_p1, filter_q1; int i; int lims; for(i = 0, ptr = src; i < 4; i++, ptr += stride){ diff_p1p0[i] = ptr[-2*step] - ptr[-1*step]; diff_q1q0[i] = ptr[ 1*step] - ptr[ 0*step]; sum_p1p0 += diff_p1p0[i]; sum_q1q0 += diff_q1q0[i]; } filter_p1 = FFABS(sum_p1p0) < (beta<<2); filter_q1 = FFABS(sum_q1q0) < (beta<<2); if(!filter_p1 && !filter_q1) return; for(i = 0, ptr = src; i < 4; i++, ptr += stride){ diff_p1p2[i] = ptr[-2*step] - ptr[-3*step]; diff_q1q2[i] = ptr[ 1*step] - ptr[ 2*step]; sum_p1p2 += diff_p1p2[i]; sum_q1q2 += diff_q1q2[i]; } if(edge){ flag_strong0 = filter_p1 && (FFABS(sum_p1p2) < beta2); flag_strong1 = filter_q1 && (FFABS(sum_q1q2) < beta2); }else{ flag_strong0 = flag_strong1 = 0; } lims = filter_p1 + filter_q1 + ((lim_q1 + lim_p1) >> 1) + 1; if(flag_strong0 && flag_strong1){ /* strong filtering */ for(i = 0; i < 4; i++, src += stride){ int sflag, p0, q0, p1, q1; int t = src[0*step] - src[-1*step]; if(!t) continue; sflag = (alpha * FFABS(t)) >> 7; if(sflag > 1) continue; p0 = (25*src[-3*step] + 26*src[-2*step] + 26*src[-1*step] + 26*src[ 0*step] + 25*src[ 1*step] + rv40_dither_l[dmode + i]) >> 7; q0 = (25*src[-2*step] + 26*src[-1*step] + 26*src[ 0*step] + 26*src[ 1*step] + 25*src[ 2*step] + rv40_dither_r[dmode + i]) >> 7; if(sflag){ p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims); q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims); } p1 = (25*src[-4*step] + 26*src[-3*step] + 26*src[-2*step] + 26*p0 + 25*src[ 0*step] + rv40_dither_l[dmode + i]) >> 7; q1 = (25*src[-1*step] + 26*q0 + 26*src[ 1*step] + 26*src[ 2*step] + 25*src[ 3*step] + rv40_dither_r[dmode + i]) >> 7; if(sflag){ p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims); q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims); } src[-2*step] = p1; src[-1*step] = p0; src[ 0*step] = q0; src[ 1*step] = q1; if(!chroma){ src[-3*step] = (25*src[-1*step] + 26*src[-2*step] + 51*src[-3*step] + 26*src[-4*step] + 64) >> 7; src[ 2*step] = (25*src[ 0*step] + 26*src[ 1*step] + 51*src[ 2*step] + 26*src[ 3*step] + 64) >> 7; } } }else if(filter_p1 && filter_q1){ for(i = 0; i < 4; i++, src += stride) rv40_weak_loop_filter(src, step, 1, 1, alpha, beta, lims, lim_q1, lim_p1, diff_p1p0[i], diff_q1q0[i], diff_p1p2[i], diff_q1q2[i]); }else{ for(i = 0; i < 4; i++, src += stride) rv40_weak_loop_filter(src, step, filter_p1, filter_q1, alpha, beta, lims>>1, lim_q1>>1, lim_p1>>1, diff_p1p0[i], diff_q1q0[i], diff_p1p2[i], diff_q1q2[i]); } } static void rv40_v_loop_filter(uint8_t *src, int stride, int dmode, int lim_q1, int lim_p1, int alpha, int beta, int beta2, int chroma, int edge){ rv40_adaptive_loop_filter(src, 1, stride, dmode, lim_q1, lim_p1, alpha, beta, beta2, chroma, edge); } static void rv40_h_loop_filter(uint8_t *src, int stride, int dmode, int lim_q1, int lim_p1, int alpha, int beta, int beta2, int chroma, int edge){ rv40_adaptive_loop_filter(src, stride, 1, dmode, lim_q1, lim_p1, alpha, beta, beta2, chroma, edge); } enum RV40BlockPos{ POS_CUR, POS_TOP, POS_LEFT, POS_BOTTOM, }; #define MASK_CUR 0x0001 #define MASK_RIGHT 0x0008 #define MASK_BOTTOM 0x0010 #define MASK_TOP 0x1000 #define MASK_Y_TOP_ROW 0x000F #define MASK_Y_LAST_ROW 0xF000 #define MASK_Y_LEFT_COL 0x1111 #define MASK_Y_RIGHT_COL 0x8888 #define MASK_C_TOP_ROW 0x0003 #define MASK_C_LAST_ROW 0x000C #define MASK_C_LEFT_COL 0x0005 #define MASK_C_RIGHT_COL 0x000A static const int neighbour_offs_x[4] = { 0, 0, -1, 0 }; static const int neighbour_offs_y[4] = { 0, -1, 0, 1 }; /** * RV40 loop filtering function */ static void rv40_loop_filter(RV34DecContext *r, int row) { MpegEncContext *s = &r->s; int mb_pos, mb_x; int i, j, k; uint8_t *Y, *C; int alpha, beta, betaY, betaC; int q; int mbtype[4]; ///< current macroblock and its neighbours types /** * flags indicating that macroblock can be filtered with strong filter * it is set only for intra coded MB and MB with DCs coded separately */ int mb_strong[4]; int clip[4]; ///< MB filter clipping value calculated from filtering strength /** * coded block patterns for luma part of current macroblock and its neighbours * Format: * LSB corresponds to the top left block, * each nibble represents one row of subblocks. */ int cbp[4]; /** * coded block patterns for chroma part of current macroblock and its neighbours * Format is the same as for luma with two subblocks in a row. */ int uvcbp[4][2]; /** * This mask represents the pattern of luma subblocks that should be filtered * in addition to the coded ones because because they lie at the edge of * 8x8 block with different enough motion vectors */ int mvmasks[4]; mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ int mbtype = s->current_picture_ptr->mb_type[mb_pos]; if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype)) r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF; if(IS_INTRA(mbtype)) r->cbp_chroma[mb_pos] = 0xFF; } mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ int y_h_deblock, y_v_deblock; int c_v_deblock[2], c_h_deblock[2]; int clip_left; int avail[4]; int y_to_deblock, c_to_deblock[2]; q = s->current_picture_ptr->qscale_table[mb_pos]; alpha = rv40_alpha_tab[q]; beta = rv40_beta_tab [q]; betaY = betaC = beta * 3; if(s->width * s->height <= 176*144) betaY += beta; avail[0] = 1; avail[1] = row; avail[2] = mb_x; avail[3] = row < s->mb_height - 1; for(i = 0; i < 4; i++){ if(avail[i]){ int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride; mvmasks[i] = r->deblock_coefs[pos]; mbtype [i] = s->current_picture_ptr->mb_type[pos]; cbp [i] = r->cbp_luma[pos]; uvcbp[i][0] = r->cbp_chroma[pos] & 0xF; uvcbp[i][1] = r->cbp_chroma[pos] >> 4; }else{ mvmasks[i] = 0; mbtype [i] = mbtype[0]; cbp [i] = 0; uvcbp[i][0] = uvcbp[i][1] = 0; } mb_strong[i] = IS_INTRA(mbtype[i]) || IS_SEPARATE_DC(mbtype[i]); clip[i] = rv40_filter_clip_tbl[mb_strong[i] + 1][q]; } y_to_deblock = mvmasks[POS_CUR] | (mvmasks[POS_BOTTOM] << 16); /* This pattern contains bits signalling that horizontal edges of * the current block can be filtered. * That happens when either of adjacent subblocks is coded or lies on * the edge of 8x8 blocks with motion vectors differing by more than * 3/4 pel in any component (any edge orientation for some reason). */ y_h_deblock = y_to_deblock | ((cbp[POS_CUR] << 4) & ~MASK_Y_TOP_ROW) | ((cbp[POS_TOP] & MASK_Y_LAST_ROW) >> 12); /* This pattern contains bits signalling that vertical edges of * the current block can be filtered. * That happens when either of adjacent subblocks is coded or lies on * the edge of 8x8 blocks with motion vectors differing by more than * 3/4 pel in any component (any edge orientation for some reason). */ y_v_deblock = y_to_deblock | ((cbp[POS_CUR] << 1) & ~MASK_Y_LEFT_COL) | ((cbp[POS_LEFT] & MASK_Y_RIGHT_COL) >> 3); if(!mb_x) y_v_deblock &= ~MASK_Y_LEFT_COL; if(!row) y_h_deblock &= ~MASK_Y_TOP_ROW; if(row == s->mb_height - 1 || (mb_strong[POS_CUR] || mb_strong[POS_BOTTOM])) y_h_deblock &= ~(MASK_Y_TOP_ROW << 16); /* Calculating chroma patterns is similar and easier since there is * no motion vector pattern for them. */ for(i = 0; i < 2; i++){ c_to_deblock[i] = (uvcbp[POS_BOTTOM][i] << 4) | uvcbp[POS_CUR][i]; c_v_deblock[i] = c_to_deblock[i] | ((uvcbp[POS_CUR] [i] << 1) & ~MASK_C_LEFT_COL) | ((uvcbp[POS_LEFT][i] & MASK_C_RIGHT_COL) >> 1); c_h_deblock[i] = c_to_deblock[i] | ((uvcbp[POS_TOP][i] & MASK_C_LAST_ROW) >> 2) | (uvcbp[POS_CUR][i] << 2); if(!mb_x) c_v_deblock[i] &= ~MASK_C_LEFT_COL; if(!row) c_h_deblock[i] &= ~MASK_C_TOP_ROW; if(row == s->mb_height - 1 || mb_strong[POS_CUR] || mb_strong[POS_BOTTOM]) c_h_deblock[i] &= ~(MASK_C_TOP_ROW << 4); } for(j = 0; j < 16; j += 4){ Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize; for(i = 0; i < 4; i++, Y += 4){ int ij = i + j; int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0; int dither = j ? ij : i*4; // if bottom block is coded then we can filter its top edge // (or bottom edge of this block, which is the same) if(y_h_deblock & (MASK_BOTTOM << ij)){ rv40_h_loop_filter(Y+4*s->linesize, s->linesize, dither, y_to_deblock & (MASK_BOTTOM << ij) ? clip[POS_CUR] : 0, clip_cur, alpha, beta, betaY, 0, 0); } // filter left block edge in ordinary mode (with low filtering strength) if(y_v_deblock & (MASK_CUR << ij) && (i || !(mb_strong[POS_CUR] || mb_strong[POS_LEFT]))){ if(!i) clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0; else clip_left = y_to_deblock & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0; rv40_v_loop_filter(Y, s->linesize, dither, clip_cur, clip_left, alpha, beta, betaY, 0, 0); } // filter top edge of the current macroblock when filtering strength is high if(!j && y_h_deblock & (MASK_CUR << i) && (mb_strong[POS_CUR] || mb_strong[POS_TOP])){ rv40_h_loop_filter(Y, s->linesize, dither, clip_cur, mvmasks[POS_TOP] & (MASK_TOP << i) ? clip[POS_TOP] : 0, alpha, beta, betaY, 0, 1); } // filter left block edge in edge mode (with high filtering strength) if(y_v_deblock & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] || mb_strong[POS_LEFT])){ clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0; rv40_v_loop_filter(Y, s->linesize, dither, clip_cur, clip_left, alpha, beta, betaY, 0, 1); } } } for(k = 0; k < 2; k++){ for(j = 0; j < 2; j++){ C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize; for(i = 0; i < 2; i++, C += 4){ int ij = i + j*2; int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0; if(c_h_deblock[k] & (MASK_CUR << (ij+2))){ int clip_bot = c_to_deblock[k] & (MASK_CUR << (ij+2)) ? clip[POS_CUR] : 0; rv40_h_loop_filter(C+4*s->uvlinesize, s->uvlinesize, i*8, clip_bot, clip_cur, alpha, beta, betaC, 1, 0); } if((c_v_deblock[k] & (MASK_CUR << ij)) && (i || !(mb_strong[POS_CUR] || mb_strong[POS_LEFT]))){ if(!i) clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0; else clip_left = c_to_deblock[k] & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0; rv40_v_loop_filter(C, s->uvlinesize, j*8, clip_cur, clip_left, alpha, beta, betaC, 1, 0); } if(!j && c_h_deblock[k] & (MASK_CUR << ij) && (mb_strong[POS_CUR] || mb_strong[POS_TOP])){ int clip_top = uvcbp[POS_TOP][k] & (MASK_CUR << (ij+2)) ? clip[POS_TOP] : 0; rv40_h_loop_filter(C, s->uvlinesize, i*8, clip_cur, clip_top, alpha, beta, betaC, 1, 1); } if(c_v_deblock[k] & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] || mb_strong[POS_LEFT])){ clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0; rv40_v_loop_filter(C, s->uvlinesize, j*8, clip_cur, clip_left, alpha, beta, betaC, 1, 1); } } } } } } /** * Initialize decoder. */ static av_cold int rv40_decode_init(AVCodecContext *avctx) { RV34DecContext *r = avctx->priv_data; r->rv30 = 0; ff_rv34_decode_init(avctx); if(!aic_top_vlc.bits) rv40_init_tables(); r->parse_slice_header = rv40_parse_slice_header; r->decode_intra_types = rv40_decode_intra_types; r->decode_mb_info = rv40_decode_mb_info; r->loop_filter = rv40_loop_filter; r->luma_dc_quant_i = rv40_luma_dc_quant[0]; r->luma_dc_quant_p = rv40_luma_dc_quant[1]; return 0; } AVCodec rv40_decoder = { "rv40", AVMEDIA_TYPE_VIDEO, CODEC_ID_RV40, sizeof(RV34DecContext), rv40_decode_init, NULL, ff_rv34_decode_end, ff_rv34_decode_frame, CODEC_CAP_DR1 | CODEC_CAP_DELAY, .flush = ff_mpeg_flush, .long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"), .pix_fmts= ff_pixfmt_list_420, };
123linslouis-android-video-cutter
jni/libavcodec/rv40.c
C
asf20
27,135
/* * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.264 / AVC / MPEG4 prediction functions. * @author Michael Niedermayer <michaelni@gmx.at> */ #ifndef AVCODEC_H264PRED_H #define AVCODEC_H264PRED_H #include "libavutil/common.h" #include "dsputil.h" /** * Prediction types */ //@{ #define VERT_PRED 0 #define HOR_PRED 1 #define DC_PRED 2 #define DIAG_DOWN_LEFT_PRED 3 #define DIAG_DOWN_RIGHT_PRED 4 #define VERT_RIGHT_PRED 5 #define HOR_DOWN_PRED 6 #define VERT_LEFT_PRED 7 #define HOR_UP_PRED 8 #define LEFT_DC_PRED 9 #define TOP_DC_PRED 10 #define DC_128_PRED 11 #define DIAG_DOWN_LEFT_PRED_RV40_NODOWN 12 #define HOR_UP_PRED_RV40_NODOWN 13 #define VERT_LEFT_PRED_RV40_NODOWN 14 #define DC_PRED8x8 0 #define HOR_PRED8x8 1 #define VERT_PRED8x8 2 #define PLANE_PRED8x8 3 #define LEFT_DC_PRED8x8 4 #define TOP_DC_PRED8x8 5 #define DC_128_PRED8x8 6 #define ALZHEIMER_DC_L0T_PRED8x8 7 #define ALZHEIMER_DC_0LT_PRED8x8 8 #define ALZHEIMER_DC_L00_PRED8x8 9 #define ALZHEIMER_DC_0L0_PRED8x8 10 //@} /** * Context for storing H.264 prediction functions */ typedef struct H264PredContext{ void (*pred4x4 [9+3+3])(uint8_t *src, uint8_t *topright, int stride);//FIXME move to dsp? void (*pred8x8l [9+3])(uint8_t *src, int topleft, int topright, int stride); void (*pred8x8 [4+3+4])(uint8_t *src, int stride); void (*pred16x16[4+3])(uint8_t *src, int stride); void (*pred4x4_add [2])(uint8_t *pix/*align 4*/, const DCTELEM *block/*align 16*/, int stride); void (*pred8x8l_add [2])(uint8_t *pix/*align 8*/, const DCTELEM *block/*align 16*/, int stride); void (*pred8x8_add [3])(uint8_t *pix/*align 8*/, const int *block_offset, const DCTELEM *block/*align 16*/, int stride); void (*pred16x16_add[3])(uint8_t *pix/*align 16*/, const int *block_offset, const DCTELEM *block/*align 16*/, int stride); }H264PredContext; void ff_h264_pred_init(H264PredContext *h, int codec_id); void ff_h264_pred_init_arm(H264PredContext *h, int codec_id); #endif /* AVCODEC_H264PRED_H */
123linslouis-android-video-cutter
jni/libavcodec/h264pred.h
C
asf20
3,037
/* * SIPR / ACELP.NET decoder * * Copyright (c) 2008 Vladimir Voroshilov * Copyright (c) 2009 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_SIPRDATA_H #define AVCODEC_SIPRDATA_H static const float mean_lsf[10] = { 0.297151, 0.452308, 0.765443, 1.134803, 1.421125, 1.773822, 2.049173, 2.375914, 2.585097, 0.075756 }; static const float lsf_cb1[64][2] = { { 0.007587, -0.005843}, { 0.042163, -0.028048}, {-0.017147, -0.060705}, { 0.013773, -0.038108}, {-0.041563, -0.078571}, {-0.076928, -0.119343}, { 0.012654, 0.176005}, { 0.059737, 0.156869}, { 0.171767, 0.231837}, { 0.114030, 0.242047}, { 0.168977, 0.283101}, { 0.146210, 0.397961}, { 0.249446, 0.268421}, { 0.137074, 0.186724}, {-0.057736, -0.135638}, {-0.109664, -0.124611}, {-0.021234, -0.031174}, {-0.013990, -0.091819}, {-0.040046, -0.111426}, {-0.016830, 0.055361}, { 0.057815, 0.071606}, { 0.060670, 0.114436}, { 0.106695, 0.140838}, { 0.093601, 0.092793}, { 0.039593, 0.006142}, {-0.066589, -0.092463}, {-0.102589, -0.171380}, {-0.059621, -0.050671}, { 0.166131, 0.139773}, { 0.213069, 0.190761}, { 0.061820, 0.037661}, { 0.136471, 0.090823}, {-0.019789, 0.013515}, { 0.022280, 0.079473}, { 0.215281, 0.461959}, { 0.206497, 0.340077}, { 0.012249, -0.065596}, { 0.091345, 0.190871}, { 0.019506, 0.037266}, {-0.050702, -0.013223}, {-0.057334, 0.028943}, { 0.291512, 0.371415}, {-0.053467, 0.084160}, { 0.025372, 0.375310}, { 0.269995, 0.566520}, {-0.095259, -0.012353}, { 0.050479, 0.212893}, { 0.101219, 0.049320}, { 0.072426, 0.283362}, {-0.084116, -0.150542}, {-0.031485, 0.144922}, { 0.012714, 0.256910}, {-0.009528, 0.102768}, {-0.039572, 0.204967}, {-0.098800, 0.055038}, { 0.020719, 0.128387}, {-0.045559, -0.178373}, {-0.082338, 0.136933}, {-0.058270, 0.292806}, { 0.084242, 0.505112}, { 0.121825, 0.326386}, {-0.102658, -0.069341}, { 0.071675, 0.004744}, {-0.117763, -0.202608} }; static const float lsf_cb2[128][2] = { { 0.025412, 0.006095}, {-0.069803, 0.010650}, {-0.175957, -0.185800}, {-0.139298, -0.048013}, {-0.156150, -0.129688}, {-0.160523, 0.068022}, { 0.199683, 0.259982}, { 0.258038, 0.236147}, { 0.367089, 0.304716}, { 0.251764, 0.305853}, { 0.394314, 0.382153}, { 0.448579, 0.337438}, { 0.323286, 0.425563}, { 0.015369, 0.123820}, {-0.026770, 0.083881}, {-0.112161, -0.097993}, {-0.221847, -0.161311}, {-0.050014, -0.092862}, {-0.214960, -0.398498}, {-0.114062, -0.241381}, { 0.137950, 0.138852}, { 0.031529, 0.065719}, { 0.208734, 0.084760}, { 0.157862, 0.057535}, { 0.124750, 0.011922}, {-0.035227, -0.154397}, {-0.105523, -0.291427}, {-0.073488, -0.201948}, {-0.224184, -0.273290}, {-0.168019, -0.240297}, {-0.271591, -0.384682}, {-0.124784, 0.014253}, { 0.004210, -0.110418}, { 0.074270, -0.014272}, { 0.053058, -0.068672}, {-0.090098, -0.145019}, { 0.303214, 0.210323}, { 0.413443, 0.272002}, { 0.356904, 0.230646}, {-0.035186, -0.028579}, {-0.117558, 0.115105}, {-0.159225, 0.218385}, {-0.230178, 0.172901}, {-0.216148, -0.110195}, { 0.309444, 0.101508}, { 0.250489, 0.118338}, { 0.293324, 0.151205}, {-0.023634, 0.033084}, { 0.076708, 0.114024}, { 0.123119, 0.087704}, {-0.060265, 0.126543}, {-0.223766, -0.021903}, {-0.241987, -0.328089}, { 0.205598, 0.147925}, {-0.087010, 0.064601}, {-0.287892, -0.286099}, {-0.179451, -0.350781}, {-0.219572, 0.043816}, {-0.217263, 0.245550}, {-0.286743, -0.180981}, { 0.172659, 0.112620}, {-0.105422, 0.176856}, { 0.006176, -0.051491}, { 0.099802, 0.176322}, {-0.186620, -0.068980}, { 0.164689, 0.185018}, { 0.519877, 0.376111}, { 0.521941, 0.533731}, { 0.473375, 0.439534}, { 0.214235, 0.202476}, { 0.579215, 0.466969}, { 0.310414, 0.271057}, { 0.257450, 0.058939}, { 0.023936, -0.169464}, {-0.268817, -0.064531}, {-0.174182, -0.000198}, {-0.268405, -0.234529}, {-0.296522, 0.247140}, { 0.115950, -0.072194}, {-0.303666, 0.149084}, {-0.347762, -0.011002}, {-0.223829, -0.214137}, {-0.278958, -0.457975}, { 0.135500, 0.238466}, { 0.312730, 0.342760}, { 0.071754, -0.125912}, { 0.485938, 0.260429}, { 0.037536, 0.179771}, { 0.391493, 0.156938}, { 0.397320, 0.484446}, {-0.308630, -0.342418}, {-0.269599, -0.128453}, {-0.086683, -0.043863}, { 0.421115, 0.213521}, { 0.082417, 0.049006}, {-0.087873, 0.238126}, { 0.338899, 0.166131}, {-0.166988, 0.147105}, {-0.167214, -0.294075}, { 0.588706, 0.328303}, { 0.207270, 0.017671}, {-0.141658, 0.291147}, {-0.140850, 0.374321}, { 0.028180, 0.322510}, {-0.229858, 0.328036}, {-0.060743, -0.260916}, {-0.011131, 0.246442}, {-0.058151, 0.310760}, {-0.127536, -0.186432}, {-0.128523, -0.334884}, {-0.283899, 0.077729}, {-0.031595, 0.181015}, {-0.329330, -0.108630}, {-0.215739, 0.107458}, { 0.175734, 0.327134}, { 0.255801, 0.176077}, { 0.228265, 0.396859}, {-0.370909, -0.185081}, {-0.355138, -0.300405}, { 0.061669, 0.242616}, { 0.104489, 0.307995}, {-0.320021, -0.234002}, { 0.077349, 0.416286}, {-0.339471, -0.407609}, {-0.019384, -0.215111}, { 0.168229, -0.032453}, {-0.040140, 0.399658}, {-0.275141, 0.008218} }; static const float lsf_cb3[128][2] = { { 0.024608, 0.006198}, {-0.216616, -0.398169}, {-0.089601, -0.201370}, {-0.121878, -0.305281}, { 0.037913, 0.059320}, { 0.245126, 0.244089}, { 0.266853, 0.182476}, { 0.319362, 0.203481}, { 0.349945, 0.252644}, { 0.393849, 0.279272}, { 0.445707, 0.258063}, { 0.387321, 0.200855}, {-0.038818, 0.129603}, {-0.009510, 0.076441}, {-0.023892, -0.028199}, {-0.117134, -0.145990}, {-0.186585, -0.052886}, {-0.034250, -0.084547}, {-0.087443, -0.095426}, {-0.453322, -0.174493}, {-0.363975, -0.148186}, {-0.334413, -0.202479}, {-0.221313, -0.181320}, {-0.131146, -0.050611}, {-0.104706, 0.115139}, { 0.192765, 0.275417}, { 0.014184, 0.194251}, { 0.154215, 0.226949}, { 0.084031, 0.221759}, { 0.189438, 0.164566}, { 0.130737, 0.170962}, {-0.066815, 0.062954}, {-0.177176, -0.145167}, {-0.247608, -0.129767}, {-0.187886, -0.293720}, {-0.244036, -0.344655}, {-0.203063, -0.234947}, {-0.292715, -0.158421}, { 0.064990, -0.028164}, { 0.147664, 0.085995}, { 0.107977, 0.002253}, { 0.071286, 0.027533}, { 0.021017, -0.049807}, {-0.272056, -0.217857}, {-0.065596, 0.008375}, {-0.150818, -0.195514}, {-0.012767, -0.150787}, { 0.238541, 0.136606}, { 0.291741, 0.114024}, { 0.202677, 0.103701}, { 0.140985, 0.037759}, {-0.257347, -0.442383}, {-0.320666, -0.319742}, {-0.488725, -0.603660}, {-0.319170, -0.469806}, { 0.014970, -0.101074}, { 0.102209, 0.066790}, {-0.076202, -0.044884}, { 0.073868, 0.152565}, { 0.070755, -0.091358}, {-0.016751, 0.027216}, { 0.071201, 0.096981}, {-0.060975, -0.145638}, { 0.114156, 0.117587}, {-0.284757, -0.029101}, {-0.253005, -0.073645}, {-0.204028, -0.098492}, {-0.114508, 0.001219}, {-0.225284, -0.011998}, {-0.235670, 0.084330}, { 0.161921, 0.128334}, { 0.025717, 0.119456}, {-0.255292, -0.281471}, {-0.392803, -0.095809}, { 0.039229, -0.152110}, {-0.310905, -0.099233}, {-0.268773, 0.032308}, {-0.340150, 0.013129}, {-0.344890, -0.045157}, {-0.188423, 0.265603}, {-0.168235, -0.000936}, { 0.000462, 0.297000}, { 0.263674, 0.371214}, {-0.146797, -0.098225}, {-0.386557, -0.282426}, {-0.070940, -0.255550}, { 0.293258, 0.252785}, { 0.408332, 0.387751}, {-0.381914, -0.358918}, {-0.463621, -0.315560}, {-0.323681, -0.258465}, { 0.250055, 0.071195}, {-0.405256, -0.429754}, {-0.135748, -0.251274}, { 0.186827, 0.060177}, { 0.116742, -0.053526}, {-0.403321, -0.220339}, {-0.414144, -0.021108}, {-0.416877, 0.050184}, {-0.470083, -0.079564}, {-0.315554, 0.219217}, {-0.273183, 0.138437}, { 0.253231, 0.306374}, { 0.177802, 0.346298}, { 0.210358, 0.207697}, {-0.323480, 0.077519}, {-0.193136, 0.048170}, { 0.114492, 0.292778}, {-0.130766, 0.056677}, {-0.171572, -0.349267}, {-0.370076, -0.536392}, {-0.311109, -0.389953}, { 0.334928, 0.367664}, { 0.351246, 0.438664}, { 0.518803, 0.331253}, { 0.437061, 0.327257}, { 0.318906, 0.307389}, {-0.025972, -0.206758}, { 0.373278, 0.325438}, { 0.473488, 0.389441}, { 0.478553, 0.477990}, { 0.332783, 0.153825}, { 0.212098, 0.452336}, { 0.161522, -0.011212}, { 0.209368, 0.020687}, {-0.086262, 0.204493}, {-0.388643, 0.133640}, {-0.177016, 0.134404} }; static const float lsf_cb4[128][2] = { {-0.003594, -0.022447}, { 0.070651, 0.028334}, {-0.290374, -0.018347}, {-0.224495, -0.370312}, {-0.269555, -0.131227}, {-0.122714, -0.267733}, { 0.173325, 0.138698}, { 0.161946, 0.020687}, { 0.111706, 0.022510}, { 0.097638, 0.056049}, { 0.139754, 0.059920}, { 0.056549, -0.050586}, { 0.036301, 0.021501}, {-0.066347, 0.012324}, {-0.066972, 0.096136}, {-0.120062, -0.084201}, { 0.011225, 0.047425}, {-0.012846, -0.067390}, {-0.116201, 0.122874}, {-0.027819, 0.035453}, {-0.024743, 0.072835}, {-0.034061, -0.001310}, { 0.077469, 0.081609}, { 0.128347, 0.139584}, { 0.183416, 0.086563}, {-0.155839, -0.053775}, {-0.190403, -0.018639}, {-0.202548, -0.062841}, {-0.373733, -0.275094}, {-0.394260, -0.186513}, {-0.465700, -0.220031}, { 0.064400, -0.095825}, {-0.262053, -0.199837}, {-0.167233, -0.094402}, { 0.048600, 0.057567}, {-0.007122, 0.168506}, { 0.050938, 0.156451}, {-0.060828, 0.147083}, {-0.171889, 0.195822}, {-0.218934, 0.138431}, {-0.270532, 0.195775}, {-0.405818, 0.075643}, {-0.440187, 0.193387}, {-0.484968, 0.157607}, {-0.480560, 0.067230}, {-0.436757, -0.111847}, {-0.040731, -0.040363}, {-0.202319, -0.170457}, {-0.158515, -0.134551}, {-0.356709, -0.378549}, {-0.268820, -0.289831}, {-0.188486, -0.289306}, {-0.148139, -0.177616}, {-0.071591, -0.191128}, {-0.052270, -0.150589}, {-0.020543, -0.116220}, { 0.039584, -0.012592}, {-0.268226, 0.042704}, {-0.209755, 0.069423}, {-0.168964, 0.124504}, {-0.363240, 0.188266}, {-0.524935, -0.025010}, {-0.105894, -0.002699}, {-0.251830, -0.062018}, {-0.310480, -0.082325}, { 0.014652, 0.083127}, {-0.136512, 0.033116}, {-0.073755, -0.025236}, { 0.110766, 0.095954}, { 0.002878, 0.011838}, {-0.074977, -0.244586}, {-0.047023, -0.081339}, {-0.183249, 0.029525}, { 0.263435, 0.206934}, {-0.156721, -0.229993}, {-0.112224, -0.208941}, {-0.116534, -0.123191}, {-0.073988, -0.111668}, { 0.029484, -0.137573}, {-0.009802, -0.161685}, {-0.023273, 0.114043}, {-0.332651, 0.049072}, {-0.394009, 0.018608}, {-0.433543, -0.035318}, {-0.368459, -0.108024}, {-0.350215, -0.037617}, {-0.321140, -0.178537}, { 0.020307, -0.048487}, {-0.210512, -0.232274}, {-0.082140, -0.065443}, { 0.081961, -0.009340}, { 0.146794, 0.101973}, { 0.213999, 0.124687}, { 0.100217, -0.054095}, {-0.114411, -0.041403}, {-0.097631, 0.037061}, {-0.099651, -0.157978}, {-0.215790, -0.116550}, {-0.107100, 0.076300}, { 0.084653, 0.126088}, { 0.246439, 0.091442}, { 0.160077, 0.188536}, { 0.273900, 0.279190}, { 0.320417, 0.232550}, { 0.132710, -0.018988}, { 0.018950, -0.091681}, {-0.032073, -0.202906}, { 0.212789, 0.178188}, { 0.208580, 0.239726}, { 0.049420, 0.099840}, {-0.145695, -0.010619}, {-0.132525, -0.322660}, { 0.019666, 0.126603}, { 0.260809, 0.147727}, {-0.232795, -0.001090}, {-0.049826, 0.225987}, {-0.154774, 0.076614}, { 0.045032, 0.221397}, { 0.321014, 0.161632}, {-0.062379, 0.053586}, { 0.132252, 0.246675}, { 0.392627, 0.271905}, {-0.264585, 0.102344}, {-0.327200, 0.121624}, {-0.399642, 0.124445}, {-0.108335, 0.179171}, { 0.100374, 0.182731}, { 0.203852, 0.049505} }; static const float lsf_cb5[32][2] = { {-0.047705, 0.008002}, { 0.011332, 0.065028}, {-0.021796, -0.034777}, {-0.147394, -0.001241}, {-0.001577, 0.020599}, {-0.083827, -0.028975}, {-0.177707, 0.066046}, {-0.043241, -0.165144}, { 0.053322, 0.096519}, {-0.097688, 0.106484}, {-0.023392, 0.111234}, {-0.146747, -0.159360}, { 0.027241, -0.011806}, {-0.043156, 0.057667}, { 0.019516, -0.062116}, { 0.025990, 0.162533}, { 0.091888, 0.009720}, {-0.098511, 0.036414}, { 0.013722, -0.116512}, { 0.054833, -0.180975}, { 0.119497, 0.128774}, { 0.118378, -0.125997}, { 0.065882, -0.030932}, { 0.120581, -0.039964}, {-0.050561, -0.088577}, { 0.050134, 0.033194}, {-0.129654, -0.075112}, {-0.225334, -0.040234}, { 0.070629, -0.084455}, { 0.095508, 0.063548}, { 0.150514, 0.034366}, { 0.186092, -0.069272} }; static const float *lsf_codebooks[] = { lsf_cb1[0], lsf_cb2[0], lsf_cb3[0], lsf_cb4[0], lsf_cb5[0] }; static const float gain_cb[128][2] = { {0.035230, 0.161540}, {0.049223, 0.448359}, {0.057443, 0.809043}, {0.072434, 1.760306}, {0.111491, 0.566418}, {0.112820, 1.098524}, {0.143493, 0.726856}, {0.144840, 0.347800}, {0.180341, 1.050010}, {0.188171, 2.197256}, {0.189771, 0.256947}, {0.198260, 0.484678}, {0.210622, 0.755825}, {0.220694, 0.590788}, {0.237062, 1.322214}, {0.255175, 0.338710}, {0.298980, 0.919051}, {0.314627, 0.520961}, {0.337106, 1.469863}, {0.341422, 2.804546}, {0.363257, 0.736222}, {0.363881, 0.367640}, {0.369850, 1.937934}, {0.370136, 1.075201}, {0.397152, 0.549410}, {0.426557, 0.876015}, {0.450686, 0.215588}, {0.468116, 0.671848}, {0.470495, 1.242034}, {0.474180, 1.739845}, {0.484875, 0.490564}, {0.498917, 0.971238}, {0.530996, 0.785765}, {0.539768, 2.130689}, {0.546021, 0.589544}, {0.546632, 3.050846}, {0.552336, 0.389775}, {0.556302, 1.400103}, {0.559688, 1.105421}, {0.574140, 0.667513}, {0.595547, 0.828943}, {0.597771, 0.496929}, {0.617079, 1.863075}, {0.619657, 1.221713}, {0.621172, 0.950275}, {0.628426, 0.630766}, {0.628689, 4.242164}, {0.640899, 1.529846}, {0.645813, 0.331127}, {0.653056, 0.748168}, {0.662909, 1.077438}, {0.669505, 2.631114}, {0.681570, 1.839298}, {0.687844, 0.903400}, {0.688660, 1.270830}, {0.695070, 0.578227}, {0.697926, 0.428440}, {0.715454, 0.812355}, {0.729981, 1.539357}, {0.737434, 1.106765}, {0.740241, 2.033374}, {0.740871, 0.568460}, {0.752689, 0.698461}, {0.756587, 0.893078}, {0.767797, 0.499246}, {0.768516, 3.712434}, {0.773153, 1.332360}, {0.786125, 1.042996}, {0.788792, 0.238388}, {0.790861, 2.273229}, {0.795338, 1.582767}, {0.809621, 0.595501}, {0.821032, 0.756460}, {0.824590, 0.922925}, {0.826019, 1.186793}, {0.827426, 1.885076}, {0.830080, 6.088666}, {0.837028, 2.819993}, {0.845561, 1.490623}, {0.848323, 0.410436}, {0.856522, 0.729725}, {0.862636, 0.966880}, {0.874561, 1.681660}, {0.874751, 1.177630}, {0.879289, 2.301300}, {0.886671, 0.613068}, {0.896729, 0.781097}, {0.904777, 3.484111}, {0.906098, 1.330892}, {0.919182, 1.877203}, {0.919901, 0.569511}, {0.921772, 1.034126}, {0.922439, 0.376000}, {0.934221, 1.485214}, {0.938842, 0.869135}, {0.939166, 2.378294}, {0.958933, 1.122722}, {0.959042, 0.694098}, {0.960995, 1.743430}, {0.970763, 2.884897}, {0.982881, 0.814506}, {0.990141, 1.330022}, {0.996447, 1.823381}, {1.000013, 0.967498}, {1.000743, 0.480597}, {1.008020, 5.095226}, {1.013883, 2.105435}, {1.026438, 0.691312}, {1.027361, 1.558169}, {1.030123, 3.586526}, {1.033916, 1.118036}, {1.039315, 2.543360}, {1.068596, 0.836380}, {1.081023, 1.318768}, {1.093150, 2.267843}, {1.095607, 1.712383}, {1.102816, 1.037334}, {1.103231, 3.536292}, {1.107320, 0.508615}, {1.150000, 7.999000}, {1.156731, 1.236772}, {1.168428, 2.268084}, {1.184130, 0.775839}, {1.210609, 1.511840}, {1.220663, 4.365683}, {1.224016, 0.983179}, {1.252236, 2.778535}, {1.301176, 1.923126} }; static const float pred[4] = { 0.200, 0.334, 0.504, 0.691 }; #endif /* AVCODEC_SIPRDATA_H */
123linslouis-android-video-cutter
jni/libavcodec/siprdata.h
C
asf20
16,696
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAN (Arai Agui Nakajima) (I)DCT tables */ #ifndef AVCODEC_AANDCTTAB_H #define AVCODEC_AANDCTTAB_H #include <stdint.h> extern const uint16_t ff_aanscales[64]; extern const uint16_t ff_inv_aanscales[64]; #endif /* AVCODEC_AANDCTTAB_H */
123linslouis-android-video-cutter
jni/libavcodec/aandcttab.h
C
asf20
1,020
/* * H261 decoder * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2004 Maarten Daniels * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * h261codec. */ #ifndef AVCODEC_H261_H #define AVCODEC_H261_H #include "mpegvideo.h" /** * H261Context */ typedef struct H261Context{ MpegEncContext s; int current_mba; int previous_mba; int mba_diff; int mtype; int current_mv_x; int current_mv_y; int gob_number; int gob_start_code_skipped; // 1 if gob start code is already read before gob header is read }H261Context; #define MB_TYPE_H261_FIL 0x800000 #endif /* AVCODEC_H261_H */
123linslouis-android-video-cutter
jni/libavcodec/h261.h
C
asf20
1,379
/* * Duck/ON2 TrueMotion 2 Decoder * Copyright (c) 2005 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Duck TrueMotion2 decoder. */ #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #define TM2_ESCAPE 0x80000000 #define TM2_DELTAS 64 /* Huffman-coded streams of different types of blocks */ enum TM2_STREAMS{ TM2_C_HI = 0, TM2_C_LO, TM2_L_HI, TM2_L_LO, TM2_UPD, TM2_MOT, TM2_TYPE, TM2_NUM_STREAMS}; /* Block types */ enum TM2_BLOCKS{ TM2_HI_RES = 0, TM2_MED_RES, TM2_LOW_RES, TM2_NULL_RES, TM2_UPDATE, TM2_STILL, TM2_MOTION}; typedef struct TM2Context{ AVCodecContext *avctx; AVFrame pic; GetBitContext gb; DSPContext dsp; /* TM2 streams */ int *tokens[TM2_NUM_STREAMS]; int tok_lens[TM2_NUM_STREAMS]; int tok_ptrs[TM2_NUM_STREAMS]; int deltas[TM2_NUM_STREAMS][TM2_DELTAS]; /* for blocks decoding */ int D[4]; int CD[4]; int *last; int *clast; /* data for current and previous frame */ int *Y1, *U1, *V1, *Y2, *U2, *V2; int cur; } TM2Context; /** * Huffman codes for each of streams */ typedef struct TM2Codes{ VLC vlc; ///< table for FFmpeg bitstream reader int bits; int *recode; ///< table for converting from code indexes to values int length; } TM2Codes; /** * structure for gathering Huffman codes information */ typedef struct TM2Huff{ int val_bits; ///< length of literal int max_bits; ///< maximum length of code int min_bits; ///< minimum length of code int nodes; ///< total number of nodes in tree int num; ///< current number filled int max_num; ///< total number of codes int *nums; ///< literals uint32_t *bits; ///< codes int *lens; ///< codelengths } TM2Huff; static int tm2_read_tree(TM2Context *ctx, uint32_t prefix, int length, TM2Huff *huff) { if(length > huff->max_bits) { av_log(ctx->avctx, AV_LOG_ERROR, "Tree exceeded its given depth (%i)\n", huff->max_bits); return -1; } if(!get_bits1(&ctx->gb)) { /* literal */ if (length == 0) { length = 1; } if(huff->num >= huff->max_num) { av_log(ctx->avctx, AV_LOG_DEBUG, "Too many literals\n"); return -1; } huff->nums[huff->num] = get_bits_long(&ctx->gb, huff->val_bits); huff->bits[huff->num] = prefix; huff->lens[huff->num] = length; huff->num++; return 0; } else { /* non-terminal node */ if(tm2_read_tree(ctx, prefix << 1, length + 1, huff) == -1) return -1; if(tm2_read_tree(ctx, (prefix << 1) | 1, length + 1, huff) == -1) return -1; } return 0; } static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code) { TM2Huff huff; int res = 0; huff.val_bits = get_bits(&ctx->gb, 5); huff.max_bits = get_bits(&ctx->gb, 5); huff.min_bits = get_bits(&ctx->gb, 5); huff.nodes = get_bits_long(&ctx->gb, 17); huff.num = 0; /* check for correct codes parameters */ if((huff.val_bits < 1) || (huff.val_bits > 32) || (huff.max_bits < 0) || (huff.max_bits > 32)) { av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal length: %i, max code length: %i\n", huff.val_bits, huff.max_bits); return -1; } if((huff.nodes < 0) || (huff.nodes > 0x10000)) { av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree nodes: %i\n", huff.nodes); return -1; } /* one-node tree */ if(huff.max_bits == 0) huff.max_bits = 1; /* allocate space for codes - it is exactly ceil(nodes / 2) entries */ huff.max_num = (huff.nodes + 1) >> 1; huff.nums = av_mallocz(huff.max_num * sizeof(int)); huff.bits = av_mallocz(huff.max_num * sizeof(uint32_t)); huff.lens = av_mallocz(huff.max_num * sizeof(int)); if(tm2_read_tree(ctx, 0, 0, &huff) == -1) res = -1; if(huff.num != huff.max_num) { av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n", huff.num, huff.max_num); res = -1; } /* convert codes to vlc_table */ if(res != -1) { int i; res = init_vlc(&code->vlc, huff.max_bits, huff.max_num, huff.lens, sizeof(int), sizeof(int), huff.bits, sizeof(uint32_t), sizeof(uint32_t), 0); if(res < 0) { av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n"); res = -1; } else res = 0; if(res != -1) { code->bits = huff.max_bits; code->length = huff.max_num; code->recode = av_malloc(code->length * sizeof(int)); for(i = 0; i < code->length; i++) code->recode[i] = huff.nums[i]; } } /* free allocated memory */ av_free(huff.nums); av_free(huff.bits); av_free(huff.lens); return res; } static void tm2_free_codes(TM2Codes *code) { if(code->recode) av_free(code->recode); if(code->vlc.table) free_vlc(&code->vlc); } static inline int tm2_get_token(GetBitContext *gb, TM2Codes *code) { int val; val = get_vlc2(gb, code->vlc.table, code->bits, 1); return code->recode[val]; } static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf) { uint32_t magic; const uint8_t *obuf; int length; obuf = buf; magic = AV_RL32(buf); buf += 4; if(magic == 0x00000100) { /* old header */ /* av_log (ctx->avctx, AV_LOG_ERROR, "TM2 old header: not implemented (yet)\n"); */ return 40; } else if(magic == 0x00000101) { /* new header */ int w, h, size, flags, xr, yr; length = AV_RL32(buf); buf += 4; init_get_bits(&ctx->gb, buf, 32 * 8); size = get_bits_long(&ctx->gb, 31); h = get_bits(&ctx->gb, 15); w = get_bits(&ctx->gb, 15); flags = get_bits_long(&ctx->gb, 31); yr = get_bits(&ctx->gb, 9); xr = get_bits(&ctx->gb, 9); return 40; } else { av_log (ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08X\n", magic); return -1; } return buf - obuf; } static int tm2_read_deltas(TM2Context *ctx, int stream_id) { int d, mb; int i, v; d = get_bits(&ctx->gb, 9); mb = get_bits(&ctx->gb, 5); if((d < 1) || (d > TM2_DELTAS) || (mb < 1) || (mb > 32)) { av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect delta table: %i deltas x %i bits\n", d, mb); return -1; } for(i = 0; i < d; i++) { v = get_bits_long(&ctx->gb, mb); if(v & (1 << (mb - 1))) ctx->deltas[stream_id][i] = v - (1 << mb); else ctx->deltas[stream_id][i] = v; } for(; i < TM2_DELTAS; i++) ctx->deltas[stream_id][i] = 0; return 0; } static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id) { int i; int cur = 0; int skip = 0; int len, toks; TM2Codes codes; /* get stream length in dwords */ len = AV_RB32(buf); buf += 4; cur += 4; skip = len * 4 + 4; if(len == 0) return 4; toks = AV_RB32(buf); buf += 4; cur += 4; if(toks & 1) { len = AV_RB32(buf); buf += 4; cur += 4; if(len == TM2_ESCAPE) { len = AV_RB32(buf); buf += 4; cur += 4; } if(len > 0) { init_get_bits(&ctx->gb, buf, (skip - cur) * 8); if(tm2_read_deltas(ctx, stream_id) == -1) return -1; buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; } } /* skip unused fields */ if(AV_RB32(buf) == TM2_ESCAPE) { buf += 4; cur += 4; /* some unknown length - could be escaped too */ } buf += 4; cur += 4; buf += 4; cur += 4; /* unused by decoder */ init_get_bits(&ctx->gb, buf, (skip - cur) * 8); if(tm2_build_huff_table(ctx, &codes) == -1) return -1; buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; toks >>= 1; /* check if we have sane number of tokens */ if((toks < 0) || (toks > 0xFFFFFF)){ av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks); tm2_free_codes(&codes); return -1; } ctx->tokens[stream_id] = av_realloc(ctx->tokens[stream_id], toks * sizeof(int)); ctx->tok_lens[stream_id] = toks; len = AV_RB32(buf); buf += 4; cur += 4; if(len > 0) { init_get_bits(&ctx->gb, buf, (skip - cur) * 8); for(i = 0; i < toks; i++) ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes); } else { for(i = 0; i < toks; i++) ctx->tokens[stream_id][i] = codes.recode[0]; } tm2_free_codes(&codes); return skip; } static inline int GET_TOK(TM2Context *ctx,int type) { if(ctx->tok_ptrs[type] >= ctx->tok_lens[type]) { av_log(ctx->avctx, AV_LOG_ERROR, "Read token from stream %i out of bounds (%i>=%i)\n", type, ctx->tok_ptrs[type], ctx->tok_lens[type]); return 0; } if(type <= TM2_MOT) return ctx->deltas[type][ctx->tokens[type][ctx->tok_ptrs[type]++]]; return ctx->tokens[type][ctx->tok_ptrs[type]++]; } /* blocks decoding routines */ /* common Y, U, V pointers initialisation */ #define TM2_INIT_POINTERS() \ int *last, *clast; \ int *Y, *U, *V;\ int Ystride, Ustride, Vstride;\ \ Ystride = ctx->avctx->width;\ Vstride = (ctx->avctx->width + 1) >> 1;\ Ustride = (ctx->avctx->width + 1) >> 1;\ Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\ V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\ U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\ last = ctx->last + bx * 4;\ clast = ctx->clast + bx * 4; #define TM2_INIT_POINTERS_2() \ int *Yo, *Uo, *Vo;\ int oYstride, oUstride, oVstride;\ \ TM2_INIT_POINTERS();\ oYstride = Ystride;\ oVstride = Vstride;\ oUstride = Ustride;\ Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\ Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\ Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2; /* recalculate last and delta values for next blocks */ #define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\ CD[0] = CHR[1] - last[1];\ CD[1] = (int)CHR[stride + 1] - (int)CHR[1];\ last[0] = (int)CHR[stride + 0];\ last[1] = (int)CHR[stride + 1];} /* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */ static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last) { int ct, d; int i, j; for(j = 0; j < 4; j++){ ct = ctx->D[j]; for(i = 0; i < 4; i++){ d = deltas[i + j * 4]; ct += d; last[i] += ct; Y[i] = av_clip_uint8(last[i]); } Y += stride; ctx->D[j] = ct; } } static inline void tm2_high_chroma(int *data, int stride, int *last, int *CD, int *deltas) { int i, j; for(j = 0; j < 2; j++){ for(i = 0; i < 2; i++){ CD[j] += deltas[i + j * 2]; last[i] += CD[j]; data[i] = last[i]; } data += stride; } } static inline void tm2_low_chroma(int *data, int stride, int *clast, int *CD, int *deltas, int bx) { int t; int l; int prev; if(bx > 0) prev = clast[-3]; else prev = 0; t = (CD[0] + CD[1]) >> 1; l = (prev - CD[0] - CD[1] + clast[1]) >> 1; CD[1] = CD[0] + CD[1] - t; CD[0] = t; clast[0] = l; tm2_high_chroma(data, stride, clast, CD, deltas); } static inline void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by) { int i; int deltas[16]; TM2_INIT_POINTERS(); /* hi-res chroma */ for(i = 0; i < 4; i++) { deltas[i] = GET_TOK(ctx, TM2_C_HI); deltas[i + 4] = GET_TOK(ctx, TM2_C_HI); } tm2_high_chroma(U, Ustride, clast, ctx->CD, deltas); tm2_high_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas + 4); /* hi-res luma */ for(i = 0; i < 16; i++) deltas[i] = GET_TOK(ctx, TM2_L_HI); tm2_apply_deltas(ctx, Y, Ystride, deltas, last); } static inline void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by) { int i; int deltas[16]; TM2_INIT_POINTERS(); /* low-res chroma */ deltas[0] = GET_TOK(ctx, TM2_C_LO); deltas[1] = deltas[2] = deltas[3] = 0; tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx); deltas[0] = GET_TOK(ctx, TM2_C_LO); deltas[1] = deltas[2] = deltas[3] = 0; tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx); /* hi-res luma */ for(i = 0; i < 16; i++) deltas[i] = GET_TOK(ctx, TM2_L_HI); tm2_apply_deltas(ctx, Y, Ystride, deltas, last); } static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by) { int i; int t1, t2; int deltas[16]; TM2_INIT_POINTERS(); /* low-res chroma */ deltas[0] = GET_TOK(ctx, TM2_C_LO); deltas[1] = deltas[2] = deltas[3] = 0; tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx); deltas[0] = GET_TOK(ctx, TM2_C_LO); deltas[1] = deltas[2] = deltas[3] = 0; tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx); /* low-res luma */ for(i = 0; i < 16; i++) deltas[i] = 0; deltas[ 0] = GET_TOK(ctx, TM2_L_LO); deltas[ 2] = GET_TOK(ctx, TM2_L_LO); deltas[ 8] = GET_TOK(ctx, TM2_L_LO); deltas[10] = GET_TOK(ctx, TM2_L_LO); if(bx > 0) last[0] = (last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1; else last[0] = (last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1; last[2] = (last[1] + last[3]) >> 1; t1 = ctx->D[0] + ctx->D[1]; ctx->D[0] = t1 >> 1; ctx->D[1] = t1 - (t1 >> 1); t2 = ctx->D[2] + ctx->D[3]; ctx->D[2] = t2 >> 1; ctx->D[3] = t2 - (t2 >> 1); tm2_apply_deltas(ctx, Y, Ystride, deltas, last); } static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by) { int i; int ct; int left, right, diff; int deltas[16]; TM2_INIT_POINTERS(); /* null chroma */ deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0; tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx); deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0; tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx); /* null luma */ for(i = 0; i < 16; i++) deltas[i] = 0; ct = ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3]; if(bx > 0) left = last[-1] - ct; else left = 0; right = last[3]; diff = right - left; last[0] = left + (diff >> 2); last[1] = left + (diff >> 1); last[2] = right - (diff >> 2); last[3] = right; { int tp = left; ctx->D[0] = (tp + (ct >> 2)) - left; left += ctx->D[0]; ctx->D[1] = (tp + (ct >> 1)) - left; left += ctx->D[1]; ctx->D[2] = ((tp + ct) - (ct >> 2)) - left; left += ctx->D[2]; ctx->D[3] = (tp + ct) - left; } tm2_apply_deltas(ctx, Y, Ystride, deltas, last); } static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by) { int i, j; TM2_INIT_POINTERS_2(); /* update chroma */ for(j = 0; j < 2; j++){ for(i = 0; i < 2; i++){ U[i] = Uo[i]; V[i] = Vo[i]; } U += Ustride; V += Vstride; Uo += oUstride; Vo += oVstride; } U -= Ustride * 2; V -= Vstride * 2; TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD); TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2)); /* update deltas */ ctx->D[0] = Yo[3] - last[3]; ctx->D[1] = Yo[3 + oYstride] - Yo[3]; ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride]; ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2]; for(j = 0; j < 4; j++){ for(i = 0; i < 4; i++){ Y[i] = Yo[i]; last[i] = Yo[i]; } Y += Ystride; Yo += oYstride; } } static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by) { int i, j; int d; TM2_INIT_POINTERS_2(); /* update chroma */ for(j = 0; j < 2; j++){ for(i = 0; i < 2; i++){ U[i] = Uo[i] + GET_TOK(ctx, TM2_UPD); V[i] = Vo[i] + GET_TOK(ctx, TM2_UPD); } U += Ustride; V += Vstride; Uo += oUstride; Vo += oVstride; } U -= Ustride * 2; V -= Vstride * 2; TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD); TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2)); /* update deltas */ ctx->D[0] = Yo[3] - last[3]; ctx->D[1] = Yo[3 + oYstride] - Yo[3]; ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride]; ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2]; for(j = 0; j < 4; j++){ d = last[3]; for(i = 0; i < 4; i++){ Y[i] = Yo[i] + GET_TOK(ctx, TM2_UPD); last[i] = Y[i]; } ctx->D[j] = last[3] - d; Y += Ystride; Yo += oYstride; } } static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by) { int i, j; int mx, my; TM2_INIT_POINTERS_2(); mx = GET_TOK(ctx, TM2_MOT); my = GET_TOK(ctx, TM2_MOT); Yo += my * oYstride + mx; Uo += (my >> 1) * oUstride + (mx >> 1); Vo += (my >> 1) * oVstride + (mx >> 1); /* copy chroma */ for(j = 0; j < 2; j++){ for(i = 0; i < 2; i++){ U[i] = Uo[i]; V[i] = Vo[i]; } U += Ustride; V += Vstride; Uo += oUstride; Vo += oVstride; } U -= Ustride * 2; V -= Vstride * 2; TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD); TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2)); /* copy luma */ for(j = 0; j < 4; j++){ for(i = 0; i < 4; i++){ Y[i] = Yo[i]; } Y += Ystride; Yo += oYstride; } /* calculate deltas */ Y -= Ystride * 4; ctx->D[0] = Y[3] - last[3]; ctx->D[1] = Y[3 + Ystride] - Y[3]; ctx->D[2] = Y[3 + Ystride * 2] - Y[3 + Ystride]; ctx->D[3] = Y[3 + Ystride * 3] - Y[3 + Ystride * 2]; for(i = 0; i < 4; i++) last[i] = Y[i + Ystride * 3]; } static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) { int i, j; int bw, bh; int type; int keyframe = 1; int *Y, *U, *V; uint8_t *dst; bw = ctx->avctx->width >> 2; bh = ctx->avctx->height >> 2; for(i = 0; i < TM2_NUM_STREAMS; i++) ctx->tok_ptrs[i] = 0; if (ctx->tok_lens[TM2_TYPE]<bw*bh){ av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh); return -1; } memset(ctx->last, 0, 4 * bw * sizeof(int)); memset(ctx->clast, 0, 4 * bw * sizeof(int)); for(j = 0; j < bh; j++) { memset(ctx->D, 0, 4 * sizeof(int)); memset(ctx->CD, 0, 4 * sizeof(int)); for(i = 0; i < bw; i++) { type = GET_TOK(ctx, TM2_TYPE); switch(type) { case TM2_HI_RES: tm2_hi_res_block(ctx, p, i, j); break; case TM2_MED_RES: tm2_med_res_block(ctx, p, i, j); break; case TM2_LOW_RES: tm2_low_res_block(ctx, p, i, j); break; case TM2_NULL_RES: tm2_null_res_block(ctx, p, i, j); break; case TM2_UPDATE: tm2_update_block(ctx, p, i, j); keyframe = 0; break; case TM2_STILL: tm2_still_block(ctx, p, i, j); keyframe = 0; break; case TM2_MOTION: tm2_motion_block(ctx, p, i, j); keyframe = 0; break; default: av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type); } } } /* copy data from our buffer to AVFrame */ Y = (ctx->cur?ctx->Y2:ctx->Y1); U = (ctx->cur?ctx->U2:ctx->U1); V = (ctx->cur?ctx->V2:ctx->V1); dst = p->data[0]; for(j = 0; j < ctx->avctx->height; j++){ for(i = 0; i < ctx->avctx->width; i++){ int y = Y[i], u = U[i >> 1], v = V[i >> 1]; dst[3*i+0] = av_clip_uint8(y + v); dst[3*i+1] = av_clip_uint8(y); dst[3*i+2] = av_clip_uint8(y + u); } Y += ctx->avctx->width; if (j & 1) { U += ctx->avctx->width >> 1; V += ctx->avctx->width >> 1; } dst += p->linesize[0]; } return keyframe; } static const int tm2_stream_order[TM2_NUM_STREAMS] = { TM2_C_HI, TM2_C_LO, TM2_L_HI, TM2_L_LO, TM2_UPD, TM2_MOT, TM2_TYPE }; static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TM2Context * const l = avctx->priv_data; AVFrame * const p= (AVFrame*)&l->pic; int i, skip, t; uint8_t *swbuf; swbuf = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if(!swbuf){ av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n"); return -1; } p->reference = 1; p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if(avctx->reget_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_free(swbuf); return -1; } l->dsp.bswap_buf((uint32_t*)swbuf, (const uint32_t*)buf, buf_size >> 2); skip = tm2_read_header(l, swbuf); if(skip == -1){ av_free(swbuf); return -1; } for(i = 0; i < TM2_NUM_STREAMS; i++){ t = tm2_read_stream(l, swbuf + skip, tm2_stream_order[i]); if(t == -1){ av_free(swbuf); return -1; } skip += t; } p->key_frame = tm2_decode_blocks(l, p); if(p->key_frame) p->pict_type = FF_I_TYPE; else p->pict_type = FF_P_TYPE; l->cur = !l->cur; *data_size = sizeof(AVFrame); *(AVFrame*)data = l->pic; av_free(swbuf); return buf_size; } static av_cold int decode_init(AVCodecContext *avctx){ TM2Context * const l = avctx->priv_data; int i; if((avctx->width & 3) || (avctx->height & 3)){ av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n"); return -1; } l->avctx = avctx; l->pic.data[0]=NULL; avctx->pix_fmt = PIX_FMT_BGR24; dsputil_init(&l->dsp, avctx); l->last = av_malloc(4 * sizeof(int) * (avctx->width >> 2)); l->clast = av_malloc(4 * sizeof(int) * (avctx->width >> 2)); for(i = 0; i < TM2_NUM_STREAMS; i++) { l->tokens[i] = NULL; l->tok_lens[i] = 0; } l->Y1 = av_malloc(sizeof(int) * avctx->width * avctx->height); l->U1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); l->V1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); l->Y2 = av_malloc(sizeof(int) * avctx->width * avctx->height); l->U2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); l->V2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); l->cur = 0; return 0; } static av_cold int decode_end(AVCodecContext *avctx){ TM2Context * const l = avctx->priv_data; AVFrame *pic = &l->pic; int i; if(l->last) av_free(l->last); if(l->clast) av_free(l->clast); for(i = 0; i < TM2_NUM_STREAMS; i++) if(l->tokens[i]) av_free(l->tokens[i]); if(l->Y1){ av_free(l->Y1); av_free(l->U1); av_free(l->V1); av_free(l->Y2); av_free(l->U2); av_free(l->V2); } if (pic->data[0]) avctx->release_buffer(avctx, pic); return 0; } AVCodec truemotion2_decoder = { "truemotion2", AVMEDIA_TYPE_VIDEO, CODEC_ID_TRUEMOTION2, sizeof(TM2Context), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0"), };
123linslouis-android-video-cutter
jni/libavcodec/truemotion2.c
C
asf20
25,211
/* * Atrac 3 compatible decoder * Copyright (c) 2006-2008 Maxim Poliakovski * Copyright (c) 2006-2008 Benjamin Larsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Atrac 3 compatible decoder. * This decoder handles Sony's ATRAC3 data. * * Container formats used to store atrac 3 data: * RealMedia (.rm), RIFF WAV (.wav, .at3), Sony OpenMG (.oma, .aa3). * * To use this decoder, a calling application must supply the extradata * bytes provided in the containers above. */ #include <math.h> #include <stddef.h> #include <stdio.h> #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "bytestream.h" #include "fft.h" #include "atrac.h" #include "atrac3data.h" #define JOINT_STEREO 0x12 #define STEREO 0x2 /* These structures are needed to store the parsed gain control data. */ typedef struct { int num_gain_data; int levcode[8]; int loccode[8]; } gain_info; typedef struct { gain_info gBlock[4]; } gain_block; typedef struct { int pos; int numCoefs; float coef[8]; } tonal_component; typedef struct { int bandsCoded; int numComponents; tonal_component components[64]; float prevFrame[1024]; int gcBlkSwitch; gain_block gainBlock[2]; DECLARE_ALIGNED(16, float, spectrum)[1024]; DECLARE_ALIGNED(16, float, IMDCT_buf)[1024]; float delayBuf1[46]; ///<qmf delay buffers float delayBuf2[46]; float delayBuf3[46]; } channel_unit; typedef struct { GetBitContext gb; //@{ /** stream data */ int channels; int codingMode; int bit_rate; int sample_rate; int samples_per_channel; int samples_per_frame; int bits_per_frame; int bytes_per_frame; int pBs; channel_unit* pUnits; //@} //@{ /** joint-stereo related variables */ int matrix_coeff_index_prev[4]; int matrix_coeff_index_now[4]; int matrix_coeff_index_next[4]; int weighting_delay[6]; //@} //@{ /** data buffers */ float outSamples[2048]; uint8_t* decoded_bytes_buffer; float tempBuf[1070]; //@} //@{ /** extradata */ int atrac3version; int delay; int scrambled_stream; int frame_factor; //@} } ATRAC3Context; static DECLARE_ALIGNED(16, float,mdct_window)[512]; static VLC spectral_coeff_tab[7]; static float gain_tab1[16]; static float gain_tab2[31]; static FFTContext mdct_ctx; static DSPContext dsp; /** * Regular 512 points IMDCT without overlapping, with the exception of the swapping of odd bands * caused by the reverse spectra of the QMF. * * @param pInput float input * @param pOutput float output * @param odd_band 1 if the band is an odd band */ static void IMLT(float *pInput, float *pOutput, int odd_band) { int i; if (odd_band) { /** * Reverse the odd bands before IMDCT, this is an effect of the QMF transform * or it gives better compression to do it this way. * FIXME: It should be possible to handle this in ff_imdct_calc * for that to happen a modification of the prerotation step of * all SIMD code and C code is needed. * Or fix the functions before so they generate a pre reversed spectrum. */ for (i=0; i<128; i++) FFSWAP(float, pInput[i], pInput[255-i]); } ff_imdct_calc(&mdct_ctx,pOutput,pInput); /* Perform windowing on the output. */ dsp.vector_fmul(pOutput,mdct_window,512); } /** * Atrac 3 indata descrambling, only used for data coming from the rm container * * @param in pointer to 8 bit array of indata * @param bits amount of bits * @param out pointer to 8 bit array of outdata */ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){ int i, off; uint32_t c; const uint32_t* buf; uint32_t* obuf = (uint32_t*) out; off = (intptr_t)inbuffer & 3; buf = (const uint32_t*) (inbuffer - off); c = be2me_32((0x537F6103 >> (off*8)) | (0x537F6103 << (32-(off*8)))); bytes += 3 + off; for (i = 0; i < bytes/4; i++) obuf[i] = c ^ buf[i]; if (off) av_log(NULL,AV_LOG_DEBUG,"Offset of %d not handled, post sample on ffmpeg-dev.\n",off); return off; } static av_cold void init_atrac3_transforms(ATRAC3Context *q) { float enc_window[256]; int i; /* Generate the mdct window, for details see * http://wiki.multimedia.cx/index.php?title=RealAudio_atrc#Windows */ for (i=0 ; i<256; i++) enc_window[i] = (sin(((i + 0.5) / 256.0 - 0.5) * M_PI) + 1.0) * 0.5; if (!mdct_window[0]) for (i=0 ; i<256; i++) { mdct_window[i] = enc_window[i]/(enc_window[i]*enc_window[i] + enc_window[255-i]*enc_window[255-i]); mdct_window[511-i] = mdct_window[i]; } /* Initialize the MDCT transform. */ ff_mdct_init(&mdct_ctx, 9, 1, 1.0); } /** * Atrac3 uninit, free all allocated memory */ static av_cold int atrac3_decode_close(AVCodecContext *avctx) { ATRAC3Context *q = avctx->priv_data; av_free(q->pUnits); av_free(q->decoded_bytes_buffer); return 0; } /** / * Mantissa decoding * * @param gb the GetBit context * @param selector what table is the output values coded with * @param codingFlag constant length coding or variable length coding * @param mantissas mantissa output table * @param numCodes amount of values to get */ static void readQuantSpectralCoeffs (GetBitContext *gb, int selector, int codingFlag, int* mantissas, int numCodes) { int numBits, cnt, code, huffSymb; if (selector == 1) numCodes /= 2; if (codingFlag != 0) { /* constant length coding (CLC) */ numBits = CLCLengthTab[selector]; if (selector > 1) { for (cnt = 0; cnt < numCodes; cnt++) { if (numBits) code = get_sbits(gb, numBits); else code = 0; mantissas[cnt] = code; } } else { for (cnt = 0; cnt < numCodes; cnt++) { if (numBits) code = get_bits(gb, numBits); //numBits is always 4 in this case else code = 0; mantissas[cnt*2] = seTab_0[code >> 2]; mantissas[cnt*2+1] = seTab_0[code & 3]; } } } else { /* variable length coding (VLC) */ if (selector != 1) { for (cnt = 0; cnt < numCodes; cnt++) { huffSymb = get_vlc2(gb, spectral_coeff_tab[selector-1].table, spectral_coeff_tab[selector-1].bits, 3); huffSymb += 1; code = huffSymb >> 1; if (huffSymb & 1) code = -code; mantissas[cnt] = code; } } else { for (cnt = 0; cnt < numCodes; cnt++) { huffSymb = get_vlc2(gb, spectral_coeff_tab[selector-1].table, spectral_coeff_tab[selector-1].bits, 3); mantissas[cnt*2] = decTable1[huffSymb*2]; mantissas[cnt*2+1] = decTable1[huffSymb*2+1]; } } } } /** * Restore the quantized band spectrum coefficients * * @param gb the GetBit context * @param pOut decoded band spectrum * @return outSubbands subband counter, fix for broken specification/files */ static int decodeSpectrum (GetBitContext *gb, float *pOut) { int numSubbands, codingMode, cnt, first, last, subbWidth, *pIn; int subband_vlc_index[32], SF_idxs[32]; int mantissas[128]; float SF; numSubbands = get_bits(gb, 5); // number of coded subbands codingMode = get_bits1(gb); // coding Mode: 0 - VLC/ 1-CLC /* Get the VLC selector table for the subbands, 0 means not coded. */ for (cnt = 0; cnt <= numSubbands; cnt++) subband_vlc_index[cnt] = get_bits(gb, 3); /* Read the scale factor indexes from the stream. */ for (cnt = 0; cnt <= numSubbands; cnt++) { if (subband_vlc_index[cnt] != 0) SF_idxs[cnt] = get_bits(gb, 6); } for (cnt = 0; cnt <= numSubbands; cnt++) { first = subbandTab[cnt]; last = subbandTab[cnt+1]; subbWidth = last - first; if (subband_vlc_index[cnt] != 0) { /* Decode spectral coefficients for this subband. */ /* TODO: This can be done faster is several blocks share the * same VLC selector (subband_vlc_index) */ readQuantSpectralCoeffs (gb, subband_vlc_index[cnt], codingMode, mantissas, subbWidth); /* Decode the scale factor for this subband. */ SF = sf_table[SF_idxs[cnt]] * iMaxQuant[subband_vlc_index[cnt]]; /* Inverse quantize the coefficients. */ for (pIn=mantissas ; first<last; first++, pIn++) pOut[first] = *pIn * SF; } else { /* This subband was not coded, so zero the entire subband. */ memset(pOut+first, 0, subbWidth*sizeof(float)); } } /* Clear the subbands that were not coded. */ first = subbandTab[cnt]; memset(pOut+first, 0, (1024 - first) * sizeof(float)); return numSubbands; } /** * Restore the quantized tonal components * * @param gb the GetBit context * @param pComponent tone component * @param numBands amount of coded bands */ static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent, int numBands) { int i,j,k,cnt; int components, coding_mode_selector, coding_mode, coded_values_per_component; int sfIndx, coded_values, max_coded_values, quant_step_index, coded_components; int band_flags[4], mantissa[8]; float *pCoef; float scalefactor; int component_count = 0; components = get_bits(gb,5); /* no tonal components */ if (components == 0) return 0; coding_mode_selector = get_bits(gb,2); if (coding_mode_selector == 2) return -1; coding_mode = coding_mode_selector & 1; for (i = 0; i < components; i++) { for (cnt = 0; cnt <= numBands; cnt++) band_flags[cnt] = get_bits1(gb); coded_values_per_component = get_bits(gb,3); quant_step_index = get_bits(gb,3); if (quant_step_index <= 1) return -1; if (coding_mode_selector == 3) coding_mode = get_bits1(gb); for (j = 0; j < (numBands + 1) * 4; j++) { if (band_flags[j >> 2] == 0) continue; coded_components = get_bits(gb,3); for (k=0; k<coded_components; k++) { sfIndx = get_bits(gb,6); pComponent[component_count].pos = j * 64 + (get_bits(gb,6)); max_coded_values = 1024 - pComponent[component_count].pos; coded_values = coded_values_per_component + 1; coded_values = FFMIN(max_coded_values,coded_values); scalefactor = sf_table[sfIndx] * iMaxQuant[quant_step_index]; readQuantSpectralCoeffs(gb, quant_step_index, coding_mode, mantissa, coded_values); pComponent[component_count].numCoefs = coded_values; /* inverse quant */ pCoef = pComponent[component_count].coef; for (cnt = 0; cnt < coded_values; cnt++) pCoef[cnt] = mantissa[cnt] * scalefactor; component_count++; } } } return component_count; } /** * Decode gain parameters for the coded bands * * @param gb the GetBit context * @param pGb the gainblock for the current band * @param numBands amount of coded bands */ static int decodeGainControl (GetBitContext *gb, gain_block *pGb, int numBands) { int i, cf, numData; int *pLevel, *pLoc; gain_info *pGain = pGb->gBlock; for (i=0 ; i<=numBands; i++) { numData = get_bits(gb,3); pGain[i].num_gain_data = numData; pLevel = pGain[i].levcode; pLoc = pGain[i].loccode; for (cf = 0; cf < numData; cf++){ pLevel[cf]= get_bits(gb,4); pLoc [cf]= get_bits(gb,5); if(cf && pLoc[cf] <= pLoc[cf-1]) return -1; } } /* Clear the unused blocks. */ for (; i<4 ; i++) pGain[i].num_gain_data = 0; return 0; } /** * Apply gain parameters and perform the MDCT overlapping part * * @param pIn input float buffer * @param pPrev previous float buffer to perform overlap against * @param pOut output float buffer * @param pGain1 current band gain info * @param pGain2 next band gain info */ static void gainCompensateAndOverlap (float *pIn, float *pPrev, float *pOut, gain_info *pGain1, gain_info *pGain2) { /* gain compensation function */ float gain1, gain2, gain_inc; int cnt, numdata, nsample, startLoc, endLoc; if (pGain2->num_gain_data == 0) gain1 = 1.0; else gain1 = gain_tab1[pGain2->levcode[0]]; if (pGain1->num_gain_data == 0) { for (cnt = 0; cnt < 256; cnt++) pOut[cnt] = pIn[cnt] * gain1 + pPrev[cnt]; } else { numdata = pGain1->num_gain_data; pGain1->loccode[numdata] = 32; pGain1->levcode[numdata] = 4; nsample = 0; // current sample = 0 for (cnt = 0; cnt < numdata; cnt++) { startLoc = pGain1->loccode[cnt] * 8; endLoc = startLoc + 8; gain2 = gain_tab1[pGain1->levcode[cnt]]; gain_inc = gain_tab2[(pGain1->levcode[cnt+1] - pGain1->levcode[cnt])+15]; /* interpolate */ for (; nsample < startLoc; nsample++) pOut[nsample] = (pIn[nsample] * gain1 + pPrev[nsample]) * gain2; /* interpolation is done over eight samples */ for (; nsample < endLoc; nsample++) { pOut[nsample] = (pIn[nsample] * gain1 + pPrev[nsample]) * gain2; gain2 *= gain_inc; } } for (; nsample < 256; nsample++) pOut[nsample] = (pIn[nsample] * gain1) + pPrev[nsample]; } /* Delay for the overlapping part. */ memcpy(pPrev, &pIn[256], 256*sizeof(float)); } /** * Combine the tonal band spectrum and regular band spectrum * Return position of the last tonal coefficient * * @param pSpectrum output spectrum buffer * @param numComponents amount of tonal components * @param pComponent tonal components for this band */ static int addTonalComponents (float *pSpectrum, int numComponents, tonal_component *pComponent) { int cnt, i, lastPos = -1; float *pIn, *pOut; for (cnt = 0; cnt < numComponents; cnt++){ lastPos = FFMAX(pComponent[cnt].pos + pComponent[cnt].numCoefs, lastPos); pIn = pComponent[cnt].coef; pOut = &(pSpectrum[pComponent[cnt].pos]); for (i=0 ; i<pComponent[cnt].numCoefs ; i++) pOut[i] += pIn[i]; } return lastPos; } #define INTERPOLATE(old,new,nsample) ((old) + (nsample)*0.125*((new)-(old))) static void reverseMatrixing(float *su1, float *su2, int *pPrevCode, int *pCurrCode) { int i, band, nsample, s1, s2; float c1, c2; float mc1_l, mc1_r, mc2_l, mc2_r; for (i=0,band = 0; band < 4*256; band+=256,i++) { s1 = pPrevCode[i]; s2 = pCurrCode[i]; nsample = 0; if (s1 != s2) { /* Selector value changed, interpolation needed. */ mc1_l = matrixCoeffs[s1*2]; mc1_r = matrixCoeffs[s1*2+1]; mc2_l = matrixCoeffs[s2*2]; mc2_r = matrixCoeffs[s2*2+1]; /* Interpolation is done over the first eight samples. */ for(; nsample < 8; nsample++) { c1 = su1[band+nsample]; c2 = su2[band+nsample]; c2 = c1 * INTERPOLATE(mc1_l,mc2_l,nsample) + c2 * INTERPOLATE(mc1_r,mc2_r,nsample); su1[band+nsample] = c2; su2[band+nsample] = c1 * 2.0 - c2; } } /* Apply the matrix without interpolation. */ switch (s2) { case 0: /* M/S decoding */ for (; nsample < 256; nsample++) { c1 = su1[band+nsample]; c2 = su2[band+nsample]; su1[band+nsample] = c2 * 2.0; su2[band+nsample] = (c1 - c2) * 2.0; } break; case 1: for (; nsample < 256; nsample++) { c1 = su1[band+nsample]; c2 = su2[band+nsample]; su1[band+nsample] = (c1 + c2) * 2.0; su2[band+nsample] = c2 * -2.0; } break; case 2: case 3: for (; nsample < 256; nsample++) { c1 = su1[band+nsample]; c2 = su2[band+nsample]; su1[band+nsample] = c1 + c2; su2[band+nsample] = c1 - c2; } break; default: assert(0); } } } static void getChannelWeights (int indx, int flag, float ch[2]){ if (indx == 7) { ch[0] = 1.0; ch[1] = 1.0; } else { ch[0] = (float)(indx & 7) / 7.0; ch[1] = sqrt(2 - ch[0]*ch[0]); if(flag) FFSWAP(float, ch[0], ch[1]); } } static void channelWeighting (float *su1, float *su2, int *p3) { int band, nsample; /* w[x][y] y=0 is left y=1 is right */ float w[2][2]; if (p3[1] != 7 || p3[3] != 7){ getChannelWeights(p3[1], p3[0], w[0]); getChannelWeights(p3[3], p3[2], w[1]); for(band = 1; band < 4; band++) { /* scale the channels by the weights */ for(nsample = 0; nsample < 8; nsample++) { su1[band*256+nsample] *= INTERPOLATE(w[0][0], w[0][1], nsample); su2[band*256+nsample] *= INTERPOLATE(w[1][0], w[1][1], nsample); } for(; nsample < 256; nsample++) { su1[band*256+nsample] *= w[1][0]; su2[band*256+nsample] *= w[1][1]; } } } } /** * Decode a Sound Unit * * @param gb the GetBit context * @param pSnd the channel unit to be used * @param pOut the decoded samples before IQMF in float representation * @param channelNum channel number * @param codingMode the coding mode (JOINT_STEREO or regular stereo/mono) */ static int decodeChannelSoundUnit (ATRAC3Context *q, GetBitContext *gb, channel_unit *pSnd, float *pOut, int channelNum, int codingMode) { int band, result=0, numSubbands, lastTonal, numBands; if (codingMode == JOINT_STEREO && channelNum == 1) { if (get_bits(gb,2) != 3) { av_log(NULL,AV_LOG_ERROR,"JS mono Sound Unit id != 3.\n"); return -1; } } else { if (get_bits(gb,6) != 0x28) { av_log(NULL,AV_LOG_ERROR,"Sound Unit id != 0x28.\n"); return -1; } } /* number of coded QMF bands */ pSnd->bandsCoded = get_bits(gb,2); result = decodeGainControl (gb, &(pSnd->gainBlock[pSnd->gcBlkSwitch]), pSnd->bandsCoded); if (result) return result; pSnd->numComponents = decodeTonalComponents (gb, pSnd->components, pSnd->bandsCoded); if (pSnd->numComponents == -1) return -1; numSubbands = decodeSpectrum (gb, pSnd->spectrum); /* Merge the decoded spectrum and tonal components. */ lastTonal = addTonalComponents (pSnd->spectrum, pSnd->numComponents, pSnd->components); /* calculate number of used MLT/QMF bands according to the amount of coded spectral lines */ numBands = (subbandTab[numSubbands] - 1) >> 8; if (lastTonal >= 0) numBands = FFMAX((lastTonal + 256) >> 8, numBands); /* Reconstruct time domain samples. */ for (band=0; band<4; band++) { /* Perform the IMDCT step without overlapping. */ if (band <= numBands) { IMLT(&(pSnd->spectrum[band*256]), pSnd->IMDCT_buf, band&1); } else memset(pSnd->IMDCT_buf, 0, 512 * sizeof(float)); /* gain compensation and overlapping */ gainCompensateAndOverlap (pSnd->IMDCT_buf, &(pSnd->prevFrame[band*256]), &(pOut[band*256]), &((pSnd->gainBlock[1 - (pSnd->gcBlkSwitch)]).gBlock[band]), &((pSnd->gainBlock[pSnd->gcBlkSwitch]).gBlock[band])); } /* Swap the gain control buffers for the next frame. */ pSnd->gcBlkSwitch ^= 1; return 0; } /** * Frame handling * * @param q Atrac3 private context * @param databuf the input data */ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf) { int result, i; float *p1, *p2, *p3, *p4; uint8_t *ptr1; if (q->codingMode == JOINT_STEREO) { /* channel coupling mode */ /* decode Sound Unit 1 */ init_get_bits(&q->gb,databuf,q->bits_per_frame); result = decodeChannelSoundUnit(q,&q->gb, q->pUnits, q->outSamples, 0, JOINT_STEREO); if (result != 0) return (result); /* Framedata of the su2 in the joint-stereo mode is encoded in * reverse byte order so we need to swap it first. */ if (databuf == q->decoded_bytes_buffer) { uint8_t *ptr2 = q->decoded_bytes_buffer+q->bytes_per_frame-1; ptr1 = q->decoded_bytes_buffer; for (i = 0; i < (q->bytes_per_frame/2); i++, ptr1++, ptr2--) { FFSWAP(uint8_t,*ptr1,*ptr2); } } else { const uint8_t *ptr2 = databuf+q->bytes_per_frame-1; for (i = 0; i < q->bytes_per_frame; i++) q->decoded_bytes_buffer[i] = *ptr2--; } /* Skip the sync codes (0xF8). */ ptr1 = q->decoded_bytes_buffer; for (i = 4; *ptr1 == 0xF8; i++, ptr1++) { if (i >= q->bytes_per_frame) return -1; } /* set the bitstream reader at the start of the second Sound Unit*/ init_get_bits(&q->gb,ptr1,q->bits_per_frame); /* Fill the Weighting coeffs delay buffer */ memmove(q->weighting_delay,&(q->weighting_delay[2]),4*sizeof(int)); q->weighting_delay[4] = get_bits1(&q->gb); q->weighting_delay[5] = get_bits(&q->gb,3); for (i = 0; i < 4; i++) { q->matrix_coeff_index_prev[i] = q->matrix_coeff_index_now[i]; q->matrix_coeff_index_now[i] = q->matrix_coeff_index_next[i]; q->matrix_coeff_index_next[i] = get_bits(&q->gb,2); } /* Decode Sound Unit 2. */ result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[1], &q->outSamples[1024], 1, JOINT_STEREO); if (result != 0) return (result); /* Reconstruct the channel coefficients. */ reverseMatrixing(q->outSamples, &q->outSamples[1024], q->matrix_coeff_index_prev, q->matrix_coeff_index_now); channelWeighting(q->outSamples, &q->outSamples[1024], q->weighting_delay); } else { /* normal stereo mode or mono */ /* Decode the channel sound units. */ for (i=0 ; i<q->channels ; i++) { /* Set the bitstream reader at the start of a channel sound unit. */ init_get_bits(&q->gb, databuf+((i*q->bytes_per_frame)/q->channels), (q->bits_per_frame)/q->channels); result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[i], &q->outSamples[i*1024], i, q->codingMode); if (result != 0) return (result); } } /* Apply the iQMF synthesis filter. */ p1= q->outSamples; for (i=0 ; i<q->channels ; i++) { p2= p1+256; p3= p2+256; p4= p3+256; atrac_iqmf (p1, p2, 256, p1, q->pUnits[i].delayBuf1, q->tempBuf); atrac_iqmf (p4, p3, 256, p3, q->pUnits[i].delayBuf2, q->tempBuf); atrac_iqmf (p1, p3, 512, p1, q->pUnits[i].delayBuf3, q->tempBuf); p1 +=1024; } return 0; } /** * Atrac frame decoding * * @param avctx pointer to the AVCodecContext */ static int atrac3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ATRAC3Context *q = avctx->priv_data; int result = 0, i; const uint8_t* databuf; int16_t* samples = data; if (buf_size < avctx->block_align) return buf_size; /* Check if we need to descramble and what buffer to pass on. */ if (q->scrambled_stream) { decode_bytes(buf, q->decoded_bytes_buffer, avctx->block_align); databuf = q->decoded_bytes_buffer; } else { databuf = buf; } result = decodeFrame(q, databuf); if (result != 0) { av_log(NULL,AV_LOG_ERROR,"Frame decoding error!\n"); return -1; } if (q->channels == 1) { /* mono */ for (i = 0; i<1024; i++) samples[i] = av_clip_int16(round(q->outSamples[i])); *data_size = 1024 * sizeof(int16_t); } else { /* stereo */ for (i = 0; i < 1024; i++) { samples[i*2] = av_clip_int16(round(q->outSamples[i])); samples[i*2+1] = av_clip_int16(round(q->outSamples[1024+i])); } *data_size = 2048 * sizeof(int16_t); } return avctx->block_align; } /** * Atrac3 initialization * * @param avctx pointer to the AVCodecContext */ static av_cold int atrac3_decode_init(AVCodecContext *avctx) { int i; const uint8_t *edata_ptr = avctx->extradata; ATRAC3Context *q = avctx->priv_data; static VLC_TYPE atrac3_vlc_table[4096][2]; static int vlcs_initialized = 0; /* Take data from the AVCodecContext (RM container). */ q->sample_rate = avctx->sample_rate; q->channels = avctx->channels; q->bit_rate = avctx->bit_rate; q->bits_per_frame = avctx->block_align * 8; q->bytes_per_frame = avctx->block_align; /* Take care of the codec-specific extradata. */ if (avctx->extradata_size == 14) { /* Parse the extradata, WAV format */ av_log(avctx,AV_LOG_DEBUG,"[0-1] %d\n",bytestream_get_le16(&edata_ptr)); //Unknown value always 1 q->samples_per_channel = bytestream_get_le32(&edata_ptr); q->codingMode = bytestream_get_le16(&edata_ptr); av_log(avctx,AV_LOG_DEBUG,"[8-9] %d\n",bytestream_get_le16(&edata_ptr)); //Dupe of coding mode q->frame_factor = bytestream_get_le16(&edata_ptr); //Unknown always 1 av_log(avctx,AV_LOG_DEBUG,"[12-13] %d\n",bytestream_get_le16(&edata_ptr)); //Unknown always 0 /* setup */ q->samples_per_frame = 1024 * q->channels; q->atrac3version = 4; q->delay = 0x88E; if (q->codingMode) q->codingMode = JOINT_STEREO; else q->codingMode = STEREO; q->scrambled_stream = 0; if ((q->bytes_per_frame == 96*q->channels*q->frame_factor) || (q->bytes_per_frame == 152*q->channels*q->frame_factor) || (q->bytes_per_frame == 192*q->channels*q->frame_factor)) { } else { av_log(avctx,AV_LOG_ERROR,"Unknown frame/channel/frame_factor configuration %d/%d/%d\n", q->bytes_per_frame, q->channels, q->frame_factor); return -1; } } else if (avctx->extradata_size == 10) { /* Parse the extradata, RM format. */ q->atrac3version = bytestream_get_be32(&edata_ptr); q->samples_per_frame = bytestream_get_be16(&edata_ptr); q->delay = bytestream_get_be16(&edata_ptr); q->codingMode = bytestream_get_be16(&edata_ptr); q->samples_per_channel = q->samples_per_frame / q->channels; q->scrambled_stream = 1; } else { av_log(NULL,AV_LOG_ERROR,"Unknown extradata size %d.\n",avctx->extradata_size); } /* Check the extradata. */ if (q->atrac3version != 4) { av_log(avctx,AV_LOG_ERROR,"Version %d != 4.\n",q->atrac3version); return -1; } if (q->samples_per_frame != 1024 && q->samples_per_frame != 2048) { av_log(avctx,AV_LOG_ERROR,"Unknown amount of samples per frame %d.\n",q->samples_per_frame); return -1; } if (q->delay != 0x88E) { av_log(avctx,AV_LOG_ERROR,"Unknown amount of delay %x != 0x88E.\n",q->delay); return -1; } if (q->codingMode == STEREO) { av_log(avctx,AV_LOG_DEBUG,"Normal stereo detected.\n"); } else if (q->codingMode == JOINT_STEREO) { av_log(avctx,AV_LOG_DEBUG,"Joint stereo detected.\n"); } else { av_log(avctx,AV_LOG_ERROR,"Unknown channel coding mode %x!\n",q->codingMode); return -1; } if (avctx->channels <= 0 || avctx->channels > 2 /*|| ((avctx->channels * 1024) != q->samples_per_frame)*/) { av_log(avctx,AV_LOG_ERROR,"Channel configuration error!\n"); return -1; } if(avctx->block_align >= UINT_MAX/2) return -1; /* Pad the data buffer with FF_INPUT_BUFFER_PADDING_SIZE, * this is for the bitstream reader. */ if ((q->decoded_bytes_buffer = av_mallocz((avctx->block_align+(4-avctx->block_align%4) + FF_INPUT_BUFFER_PADDING_SIZE))) == NULL) return AVERROR(ENOMEM); /* Initialize the VLC tables. */ if (!vlcs_initialized) { for (i=0 ; i<7 ; i++) { spectral_coeff_tab[i].table = &atrac3_vlc_table[atrac3_vlc_offs[i]]; spectral_coeff_tab[i].table_allocated = atrac3_vlc_offs[i + 1] - atrac3_vlc_offs[i]; init_vlc (&spectral_coeff_tab[i], 9, huff_tab_sizes[i], huff_bits[i], 1, 1, huff_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); } vlcs_initialized = 1; } init_atrac3_transforms(q); atrac_generate_tables(); /* Generate gain tables. */ for (i=0 ; i<16 ; i++) gain_tab1[i] = powf (2.0, (4 - i)); for (i=-15 ; i<16 ; i++) gain_tab2[i+15] = powf (2.0, i * -0.125); /* init the joint-stereo decoding data */ q->weighting_delay[0] = 0; q->weighting_delay[1] = 7; q->weighting_delay[2] = 0; q->weighting_delay[3] = 7; q->weighting_delay[4] = 0; q->weighting_delay[5] = 7; for (i=0; i<4; i++) { q->matrix_coeff_index_prev[i] = 3; q->matrix_coeff_index_now[i] = 3; q->matrix_coeff_index_next[i] = 3; } dsputil_init(&dsp, avctx); q->pUnits = av_mallocz(sizeof(channel_unit)*q->channels); if (!q->pUnits) { av_free(q->decoded_bytes_buffer); return AVERROR(ENOMEM); } avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } AVCodec atrac3_decoder = { .name = "atrac3", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_ATRAC3, .priv_data_size = sizeof(ATRAC3Context), .init = atrac3_decode_init, .close = atrac3_decode_close, .decode = atrac3_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"), };
123linslouis-android-video-cutter
jni/libavcodec/atrac3.c
C
asf20
32,338
/* * Audio and Video frame extraction * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "parser.h" static AVCodecParser *av_first_parser = NULL; AVCodecParser* av_parser_next(AVCodecParser *p){ if(p) return p->next; else return av_first_parser; } void av_register_codec_parser(AVCodecParser *parser) { parser->next = av_first_parser; av_first_parser = parser; } AVCodecParserContext *av_parser_init(int codec_id) { AVCodecParserContext *s; AVCodecParser *parser; int ret; if(codec_id == CODEC_ID_NONE) return NULL; for(parser = av_first_parser; parser != NULL; parser = parser->next) { if (parser->codec_ids[0] == codec_id || parser->codec_ids[1] == codec_id || parser->codec_ids[2] == codec_id || parser->codec_ids[3] == codec_id || parser->codec_ids[4] == codec_id) goto found; } return NULL; found: s = av_mallocz(sizeof(AVCodecParserContext)); if (!s) return NULL; s->parser = parser; s->priv_data = av_mallocz(parser->priv_data_size); if (!s->priv_data) { av_free(s); return NULL; } if (parser->parser_init) { ret = parser->parser_init(s); if (ret != 0) { av_free(s->priv_data); av_free(s); return NULL; } } s->fetch_timestamp=1; s->pict_type = FF_I_TYPE; s->key_frame = -1; s->convergence_duration = AV_NOPTS_VALUE; s->dts_sync_point = INT_MIN; s->dts_ref_dts_delta = INT_MIN; s->pts_dts_delta = INT_MIN; return s; } void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove){ int i; s->dts= s->pts= AV_NOPTS_VALUE; s->pos= -1; s->offset= 0; for(i = 0; i < AV_PARSER_PTS_NB; i++) { if ( s->cur_offset + off >= s->cur_frame_offset[i] && (s->frame_offset < s->cur_frame_offset[i] || (!s->frame_offset && !s->next_frame_offset)) // first field/frame //check is disabled because mpeg-ts doesnt send complete PES packets && /*s->next_frame_offset + off <*/ s->cur_frame_end[i]){ s->dts= s->cur_frame_dts[i]; s->pts= s->cur_frame_pts[i]; s->pos= s->cur_frame_pos[i]; s->offset = s->next_frame_offset - s->cur_frame_offset[i]; if(remove) s->cur_frame_offset[i]= INT64_MAX; if(s->cur_offset + off < s->cur_frame_end[i]) break; } } } /** * * @param buf input * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output) * @param pts input presentation timestamp * @param dts input decoding timestamp * @param poutbuf will contain a pointer to the first byte of the output frame * @param poutbuf_size will contain the length of the output frame * @return the number of bytes of the input bitstream used * * Example: * @code * while(in_len){ * len = av_parser_parse(myparser, AVCodecContext, &data, &size, * in_data, in_len, * pts, dts); * in_data += len; * in_len -= len; * * if(size) * decode_frame(data, size); * } * @endcode * * @deprecated Use av_parser_parse2() instead. */ int av_parser_parse(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts) { return av_parser_parse2(s, avctx, poutbuf, poutbuf_size, buf, buf_size, pts, dts, AV_NOPTS_VALUE); } int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts, int64_t pos) { int index, i; uint8_t dummy_buf[FF_INPUT_BUFFER_PADDING_SIZE]; if (buf_size == 0) { /* padding is always necessary even if EOF, so we add it here */ memset(dummy_buf, 0, sizeof(dummy_buf)); buf = dummy_buf; } else if (s->cur_offset + buf_size != s->cur_frame_end[s->cur_frame_start_index]) { /* skip remainder packets */ /* add a new packet descriptor */ i = (s->cur_frame_start_index + 1) & (AV_PARSER_PTS_NB - 1); s->cur_frame_start_index = i; s->cur_frame_offset[i] = s->cur_offset; s->cur_frame_end[i] = s->cur_offset + buf_size; s->cur_frame_pts[i] = pts; s->cur_frame_dts[i] = dts; s->cur_frame_pos[i] = pos; } if (s->fetch_timestamp){ s->fetch_timestamp=0; s->last_pts = s->pts; s->last_dts = s->dts; s->last_pos = s->pos; ff_fetch_timestamp(s, 0, 0); } /* WARNING: the returned index can be negative */ index = s->parser->parser_parse(s, avctx, (const uint8_t **)poutbuf, poutbuf_size, buf, buf_size); //av_log(NULL, AV_LOG_DEBUG, "parser: in:%"PRId64", %"PRId64", out:%"PRId64", %"PRId64", in:%d out:%d id:%d\n", pts, dts, s->last_pts, s->last_dts, buf_size, *poutbuf_size, avctx->codec_id); /* update the file pointer */ if (*poutbuf_size) { /* fill the data for the current frame */ s->frame_offset = s->next_frame_offset; /* offset of the next frame */ s->next_frame_offset = s->cur_offset + index; s->fetch_timestamp=1; } if (index < 0) index = 0; s->cur_offset += index; return index; } /** * * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed * @deprecated use AVBitstreamFilter */ int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe){ if(s && s->parser->split){ if((avctx->flags & CODEC_FLAG_GLOBAL_HEADER) || (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)){ int i= s->parser->split(avctx, buf, buf_size); buf += i; buf_size -= i; } } /* cast to avoid warning about discarding qualifiers */ *poutbuf= (uint8_t *) buf; *poutbuf_size= buf_size; if(avctx->extradata){ if( (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)) /*||(s->pict_type != FF_I_TYPE && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/ /*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){ int size= buf_size + avctx->extradata_size; *poutbuf_size= size; *poutbuf= av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(*poutbuf, avctx->extradata, avctx->extradata_size); memcpy((*poutbuf) + avctx->extradata_size, buf, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); return 1; } } return 0; } void av_parser_close(AVCodecParserContext *s) { if(s){ if (s->parser->parser_close) s->parser->parser_close(s); av_free(s->priv_data); av_free(s); } } /*****************************************************/ /** * combines the (truncated) bitstream to a complete frame * @return -1 if no complete frame could be created, AVERROR(ENOMEM) if there was a memory allocation error */ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size) { #if 0 if(pc->overread){ printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index); printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]); } #endif /* Copy overread bytes from last frame into buffer. */ for(; pc->overread>0; pc->overread--){ pc->buffer[pc->index++]= pc->buffer[pc->overread_index++]; } /* flush remaining if EOF */ if(!*buf_size && next == END_NOT_FOUND){ next= 0; } pc->last_index= pc->index; /* copy into buffer end return */ if(next == END_NOT_FOUND){ void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE); if(!new_buffer) return AVERROR(ENOMEM); pc->buffer = new_buffer; memcpy(&pc->buffer[pc->index], *buf, *buf_size); pc->index += *buf_size; return -1; } *buf_size= pc->overread_index= pc->index + next; /* append to buffer */ if(pc->index){ void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE); if(!new_buffer) return AVERROR(ENOMEM); pc->buffer = new_buffer; memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE ); pc->index = 0; *buf= pc->buffer; } /* store overread bytes */ for(;next < 0; next++){ pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next]; pc->state64 = (pc->state64<<8) | pc->buffer[pc->last_index + next]; pc->overread++; } #if 0 if(pc->overread){ printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index); printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]); } #endif return 0; } void ff_parse_close(AVCodecParserContext *s) { ParseContext *pc = s->priv_data; av_freep(&pc->buffer); } void ff_parse1_close(AVCodecParserContext *s) { ParseContext1 *pc1 = s->priv_data; av_free(pc1->pc.buffer); av_free(pc1->enc); } /*************************/ int ff_mpeg4video_split(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { int i; uint32_t state= -1; for(i=0; i<buf_size; i++){ state= (state<<8) | buf[i]; if(state == 0x1B3 || state == 0x1B6) return i-3; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/parser.c
C
asf20
10,973
/* * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ //#define DEBUG #include "avcodec.h" #define WIN32_LEAN_AND_MEAN #include <windows.h> #include <process.h> typedef struct ThreadContext{ AVCodecContext *avctx; HANDLE thread; HANDLE work_sem; HANDLE job_sem; HANDLE done_sem; int (*func)(AVCodecContext *c, void *arg); int (*func2)(AVCodecContext *c, void *arg, int, int); void *arg; int argsize; int *jobnr; int *ret; int threadnr; }ThreadContext; static unsigned WINAPI attribute_align_arg thread_func(void *v){ ThreadContext *c= v; for(;;){ int ret, jobnr; //printf("thread_func %X enter wait\n", (int)v); fflush(stdout); WaitForSingleObject(c->work_sem, INFINITE); // avoid trying to access jobnr if we should quit if (!c->func && !c->func2) break; WaitForSingleObject(c->job_sem, INFINITE); jobnr = (*c->jobnr)++; ReleaseSemaphore(c->job_sem, 1, 0); //printf("thread_func %X after wait (func=%X)\n", (int)v, (int)c->func); fflush(stdout); if(c->func) ret= c->func(c->avctx, (uint8_t *)c->arg + jobnr*c->argsize); else ret= c->func2(c->avctx, c->arg, jobnr, c->threadnr); if (c->ret) c->ret[jobnr] = ret; //printf("thread_func %X signal complete\n", (int)v); fflush(stdout); ReleaseSemaphore(c->done_sem, 1, 0); } return 0; } /** * Free what has been allocated by avcodec_thread_init(). * Must be called after decoding has finished, especially do not call while avcodec_thread_execute() is running. */ void avcodec_thread_free(AVCodecContext *s){ ThreadContext *c= s->thread_opaque; int i; for(i=0; i<s->thread_count; i++){ c[i].func= NULL; c[i].func2= NULL; } ReleaseSemaphore(c[0].work_sem, s->thread_count, 0); for(i=0; i<s->thread_count; i++){ WaitForSingleObject(c[i].thread, INFINITE); if(c[i].thread) CloseHandle(c[i].thread); } if(c[0].work_sem) CloseHandle(c[0].work_sem); if(c[0].job_sem) CloseHandle(c[0].job_sem); if(c[0].done_sem) CloseHandle(c[0].done_sem); av_freep(&s->thread_opaque); } static int avcodec_thread_execute(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){ ThreadContext *c= s->thread_opaque; int i; int jobnr = 0; assert(s == c->avctx); /* note, we can be certain that this is not called with the same AVCodecContext by different threads at the same time */ for(i=0; i<s->thread_count; i++){ c[i].arg= arg; c[i].argsize= size; c[i].func= func; c[i].ret= ret; c[i].jobnr = &jobnr; } ReleaseSemaphore(c[0].work_sem, count, 0); for(i=0; i<count; i++) WaitForSingleObject(c[0].done_sem, INFINITE); return 0; } static int avcodec_thread_execute2(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count){ ThreadContext *c= s->thread_opaque; int i; for(i=0; i<s->thread_count; i++) c[i].func2 = func; avcodec_thread_execute(s, NULL, arg, ret, count, 0); } int avcodec_thread_init(AVCodecContext *s, int thread_count){ int i; ThreadContext *c; uint32_t threadid; s->thread_count= thread_count; if (thread_count <= 1) return 0; assert(!s->thread_opaque); c= av_mallocz(sizeof(ThreadContext)*thread_count); s->thread_opaque= c; if(!(c[0].work_sem = CreateSemaphore(NULL, 0, INT_MAX, NULL))) goto fail; if(!(c[0].job_sem = CreateSemaphore(NULL, 1, 1, NULL))) goto fail; if(!(c[0].done_sem = CreateSemaphore(NULL, 0, INT_MAX, NULL))) goto fail; for(i=0; i<thread_count; i++){ //printf("init semaphors %d\n", i); fflush(stdout); c[i].avctx= s; c[i].work_sem = c[0].work_sem; c[i].job_sem = c[0].job_sem; c[i].done_sem = c[0].done_sem; c[i].threadnr = i; //printf("create thread %d\n", i); fflush(stdout); c[i].thread = (HANDLE)_beginthreadex(NULL, 0, thread_func, &c[i], 0, &threadid ); if( !c[i].thread ) goto fail; } //printf("init done\n"); fflush(stdout); s->execute= avcodec_thread_execute; s->execute2= avcodec_thread_execute2; return 0; fail: avcodec_thread_free(s); return -1; }
123linslouis-android-video-cutter
jni/libavcodec/w32thread.c
C
asf20
5,170
/* * MSMPEG4 backend for ffmpeg encoder and decoder * copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file */ #ifndef AVCODEC_MSMPEG4_H #define AVCODEC_MSMPEG4_H #include "config.h" #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #define INTER_INTRA_VLC_BITS 3 #define MB_NON_INTRA_VLC_BITS 9 #define MB_INTRA_VLC_BITS 9 extern VLC ff_mb_non_intra_vlc[4]; extern VLC ff_inter_intra_vlc; void ff_msmpeg4_code012(PutBitContext *pb, int n); void ff_msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n); void ff_msmpeg4_handle_slices(MpegEncContext *s); void ff_msmpeg4_encode_motion(MpegEncContext * s, int mx, int my); int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr); int ff_msmpeg4_decode_motion(MpegEncContext * s, int *mx_ptr, int *my_ptr); int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block, int n, int coded, const uint8_t *scan_table); int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]); #define CONFIG_MSMPEG4_DECODER (CONFIG_MSMPEG4V1_DECODER || \ CONFIG_MSMPEG4V2_DECODER || \ CONFIG_MSMPEG4V3_DECODER || \ CONFIG_WMV2_DECODER || \ CONFIG_VC1_DECODER) #define CONFIG_MSMPEG4_ENCODER (CONFIG_MSMPEG4V1_ENCODER || \ CONFIG_MSMPEG4V2_ENCODER || \ CONFIG_MSMPEG4V3_ENCODER || \ CONFIG_WMV2_ENCODER) #endif /* AVCODEC_MSMPEG4_H */
123linslouis-android-video-cutter
jni/libavcodec/msmpeg4.h
C
asf20
2,402
/* * V210 encoder * * Copyright (C) 2009 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "libavcodec/bytestream.h" static av_cold int encode_init(AVCodecContext *avctx) { if (avctx->width & 1) { av_log(avctx, AV_LOG_ERROR, "v210 needs even width\n"); return -1; } if (avctx->pix_fmt != PIX_FMT_YUV422P16) { av_log(avctx, AV_LOG_ERROR, "v210 needs YUV422P16\n"); return -1; } if (avctx->bits_per_raw_sample != 10) av_log(avctx, AV_LOG_WARNING, "bits per raw sample: %d != 10-bit\n", avctx->bits_per_raw_sample); avctx->coded_frame = avcodec_alloc_frame(); avctx->coded_frame->key_frame = 1; avctx->coded_frame->pict_type = FF_I_TYPE; return 0; } static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) { const AVFrame *pic = data; int aligned_width = ((avctx->width + 47) / 48) * 48; int stride = aligned_width * 8 / 3; int h, w; const uint16_t *y = (const uint16_t*)pic->data[0]; const uint16_t *u = (const uint16_t*)pic->data[1]; const uint16_t *v = (const uint16_t*)pic->data[2]; uint8_t *p = buf; uint8_t *pdst = buf; if (buf_size < aligned_width * avctx->height * 8 / 3) { av_log(avctx, AV_LOG_ERROR, "output buffer too small\n"); return -1; } #define WRITE_PIXELS(a, b, c) \ do { \ val = (*a++ >> 6) | \ ((*b++ & 0xFFC0) << 4); \ val|= (*c++ & 0xFFC0) << 14; \ bytestream_put_le32(&p, val); \ } while (0) for (h = 0; h < avctx->height; h++) { uint32_t val; for (w = 0; w < avctx->width - 5; w += 6) { WRITE_PIXELS(u, y, v); WRITE_PIXELS(y, u, y); WRITE_PIXELS(v, y, u); WRITE_PIXELS(y, v, y); } if (w < avctx->width - 1) { WRITE_PIXELS(u, y, v); val = *y++ >> 6; if (w == avctx->width - 2) bytestream_put_le32(&p, val); } if (w < avctx->width - 3) { val |=((*u++ & 0xFFC0) << 4) | ((*y++ & 0xFFC0) << 14); bytestream_put_le32(&p, val); val = (*v++ >> 6) | (*y++ & 0xFFC0) << 4; bytestream_put_le32(&p, val); } pdst += stride; memset(p, 0, pdst - p); p = pdst; y += pic->linesize[0] / 2 - avctx->width; u += pic->linesize[1] / 2 - avctx->width / 2; v += pic->linesize[2] / 2 - avctx->width / 2; } return p - buf; } static av_cold int encode_close(AVCodecContext *avctx) { av_freep(&avctx->coded_frame); return 0; } AVCodec v210_encoder = { "v210", AVMEDIA_TYPE_VIDEO, CODEC_ID_V210, 0, encode_init, encode_frame, encode_close, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P16, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"), };
123linslouis-android-video-cutter
jni/libavcodec/v210enc.c
C
asf20
3,949
/* * CD Graphics Video Decoder * Copyright (c) 2009 Michael Tison * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "bytestream.h" /** * @file * @brief CD Graphics Video Decoder * @author Michael Tison * @sa http://wiki.multimedia.cx/index.php?title=CD_Graphics * @sa http://www.ccs.neu.edu/home/bchafy/cdb/info/cdg */ /// default screen sizes #define CDG_FULL_WIDTH 300 #define CDG_FULL_HEIGHT 216 #define CDG_DISPLAY_WIDTH 294 #define CDG_DISPLAY_HEIGHT 204 #define CDG_BORDER_WIDTH 6 #define CDG_BORDER_HEIGHT 12 /// masks #define CDG_COMMAND 0x09 #define CDG_MASK 0x3F /// instruction codes #define CDG_INST_MEMORY_PRESET 1 #define CDG_INST_BORDER_PRESET 2 #define CDG_INST_TILE_BLOCK 6 #define CDG_INST_SCROLL_PRESET 20 #define CDG_INST_SCROLL_COPY 24 #define CDG_INST_LOAD_PAL_LO 30 #define CDG_INST_LOAD_PAL_HIGH 31 #define CDG_INST_TILE_BLOCK_XOR 38 /// data sizes #define CDG_PACKET_SIZE 24 #define CDG_DATA_SIZE 16 #define CDG_TILE_HEIGHT 12 #define CDG_TILE_WIDTH 6 #define CDG_MINIMUM_PKT_SIZE 6 #define CDG_MINIMUM_SCROLL_SIZE 3 #define CDG_HEADER_SIZE 8 #define CDG_PALETTE_SIZE 16 typedef struct CDGraphicsContext { AVFrame frame; int hscroll; int vscroll; } CDGraphicsContext; static void cdg_init_frame(AVFrame *frame) { avcodec_get_frame_defaults(frame); frame->reference = 3; frame->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; } static av_cold int cdg_decode_init(AVCodecContext *avctx) { CDGraphicsContext *cc = avctx->priv_data; cdg_init_frame(&cc->frame); avctx->width = CDG_FULL_WIDTH; avctx->height = CDG_FULL_HEIGHT; avctx->pix_fmt = PIX_FMT_PAL8; return 0; } static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data) { int y; int lsize = cc->frame.linesize[0]; uint8_t *buf = cc->frame.data[0]; int color = data[0] & 0x0F; if (!(data[1] & 0x0F)) { /// fill the top and bottom borders memset(buf, color, CDG_BORDER_HEIGHT * lsize); memset(buf + (CDG_FULL_HEIGHT - CDG_BORDER_HEIGHT) * lsize, color, CDG_BORDER_HEIGHT * lsize); /// fill the side borders for (y = CDG_BORDER_HEIGHT; y < CDG_FULL_HEIGHT - CDG_BORDER_HEIGHT; y++) { memset(buf + y * lsize, color, CDG_BORDER_WIDTH); memset(buf + CDG_FULL_WIDTH - CDG_BORDER_WIDTH + y * lsize, color, CDG_BORDER_WIDTH); } } } static void cdg_load_palette(CDGraphicsContext *cc, uint8_t *data, int low) { uint8_t r, g, b; uint16_t color; int i; int array_offset = low ? 0 : 8; uint32_t *palette = (uint32_t *) cc->frame.data[1]; for (i = 0; i < 8; i++) { color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F); r = ((color >> 8) & 0x000F) * 17; g = ((color >> 4) & 0x000F) * 17; b = ((color ) & 0x000F) * 17; palette[i + array_offset] = r << 16 | g << 8 | b; } cc->frame.palette_has_changed = 1; } static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b) { unsigned ci, ri; int color; int x, y; int ai; int stride = cc->frame.linesize[0]; uint8_t *buf = cc->frame.data[0]; ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll; ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll; if (ri > (CDG_FULL_HEIGHT - CDG_TILE_HEIGHT)) return AVERROR(EINVAL); if (ci > (CDG_FULL_WIDTH - CDG_TILE_WIDTH)) return AVERROR(EINVAL); for (y = 0; y < CDG_TILE_HEIGHT; y++) { for (x = 0; x < CDG_TILE_WIDTH; x++) { if (!((data[4 + y] >> (5 - x)) & 0x01)) color = data[0] & 0x0F; else color = data[1] & 0x0F; ai = ci + x + (stride * (ri + y)); if (b) color ^= buf[ai]; buf[ai] = color; } } return 0; } #define UP 2 #define DOWN 1 #define LEFT 2 #define RIGHT 1 static void cdg_copy_rect_buf(int out_tl_x, int out_tl_y, uint8_t *out, int in_tl_x, int in_tl_y, uint8_t *in, int w, int h, int stride) { int y; in += in_tl_x + in_tl_y * stride; out += out_tl_x + out_tl_y * stride; for (y = 0; y < h; y++) memcpy(out + y * stride, in + y * stride, w); } static void cdg_fill_rect_preset(int tl_x, int tl_y, uint8_t *out, int color, int w, int h, int stride) { int y; for (y = tl_y; y < tl_y + h; y++) memset(out + tl_x + y * stride, color, w); } static void cdg_fill_wrapper(int out_tl_x, int out_tl_y, uint8_t *out, int in_tl_x, int in_tl_y, uint8_t *in, int color, int w, int h, int stride, int roll) { if (roll) { cdg_copy_rect_buf(out_tl_x, out_tl_y, out, in_tl_x, in_tl_y, in, w, h, stride); } else { cdg_fill_rect_preset(out_tl_x, out_tl_y, out, color, w, h, stride); } } static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data, AVFrame *new_frame, int roll_over) { int color; int hscmd, h_off, hinc, vscmd, v_off, vinc; int y; int stride = cc->frame.linesize[0]; uint8_t *in = cc->frame.data[0]; uint8_t *out = new_frame->data[0]; color = data[0] & 0x0F; hscmd = (data[1] & 0x30) >> 4; vscmd = (data[2] & 0x30) >> 4; h_off = FFMIN(data[1] & 0x07, CDG_BORDER_WIDTH - 1); v_off = FFMIN(data[2] & 0x07, CDG_BORDER_HEIGHT - 1); /// find the difference and save the offset for cdg_tile_block usage hinc = h_off - cc->hscroll; vinc = v_off - cc->vscroll; cc->hscroll = h_off; cc->vscroll = v_off; if (vscmd == UP) vinc -= 12; if (vscmd == DOWN) vinc += 12; if (hscmd == LEFT) hinc -= 6; if (hscmd == RIGHT) hinc += 6; if (!hinc && !vinc) return; memcpy(new_frame->data[1], cc->frame.data[1], CDG_PALETTE_SIZE * 4); for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++) memcpy(out + FFMAX(0, hinc) + stride * y, in + FFMAX(0, hinc) - hinc + (y - vinc) * stride, FFMIN(stride + hinc, stride)); if (vinc > 0) cdg_fill_wrapper(0, 0, out, 0, CDG_FULL_HEIGHT - vinc, in, color, stride, vinc, stride, roll_over); else if (vinc < 0) cdg_fill_wrapper(0, CDG_FULL_HEIGHT + vinc, out, 0, 0, in, color, stride, -1 * vinc, stride, roll_over); if (hinc > 0) cdg_fill_wrapper(0, 0, out, CDG_FULL_WIDTH - hinc, 0, in, color, hinc, CDG_FULL_HEIGHT, stride, roll_over); else if (hinc < 0) cdg_fill_wrapper(CDG_FULL_WIDTH + hinc, 0, out, 0, 0, in, color, -1 * hinc, CDG_FULL_HEIGHT, stride, roll_over); } static int cdg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int ret; uint8_t command, inst; uint8_t cdg_data[CDG_DATA_SIZE]; AVFrame new_frame; CDGraphicsContext *cc = avctx->priv_data; if (buf_size < CDG_MINIMUM_PKT_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n"); return AVERROR(EINVAL); } ret = avctx->reget_buffer(avctx, &cc->frame); if (ret) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } command = bytestream_get_byte(&buf); inst = bytestream_get_byte(&buf); inst &= CDG_MASK; buf += 2; /// skipping 2 unneeded bytes bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE); if ((command & CDG_MASK) == CDG_COMMAND) { switch (inst) { case CDG_INST_MEMORY_PRESET: if (!(cdg_data[1] & 0x0F)) memset(cc->frame.data[0], cdg_data[0] & 0x0F, cc->frame.linesize[0] * CDG_FULL_HEIGHT); break; case CDG_INST_LOAD_PAL_LO: case CDG_INST_LOAD_PAL_HIGH: if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for loading palette\n"); return AVERROR(EINVAL); } cdg_load_palette(cc, cdg_data, inst == CDG_INST_LOAD_PAL_LO); break; case CDG_INST_BORDER_PRESET: cdg_border_preset(cc, cdg_data); break; case CDG_INST_TILE_BLOCK_XOR: case CDG_INST_TILE_BLOCK: if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for drawing tile\n"); return AVERROR(EINVAL); } ret = cdg_tile_block(cc, cdg_data, inst == CDG_INST_TILE_BLOCK_XOR); if (ret) { av_log(avctx, AV_LOG_ERROR, "tile is out of range\n"); return ret; } break; case CDG_INST_SCROLL_PRESET: case CDG_INST_SCROLL_COPY: if (buf_size - CDG_HEADER_SIZE < CDG_MINIMUM_SCROLL_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for scrolling\n"); return AVERROR(EINVAL); } cdg_init_frame(&new_frame); ret = avctx->get_buffer(avctx, &new_frame); if (ret) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } cdg_scroll(cc, cdg_data, &new_frame, inst == CDG_INST_SCROLL_COPY); avctx->release_buffer(avctx, &cc->frame); cc->frame = new_frame; break; default: break; } *data_size = sizeof(AVFrame); } else { *data_size = 0; buf_size = 0; } *(AVFrame *) data = cc->frame; return buf_size; } static av_cold int cdg_decode_end(AVCodecContext *avctx) { CDGraphicsContext *cc = avctx->priv_data; if (cc->frame.data[0]) avctx->release_buffer(avctx, &cc->frame); return 0; } AVCodec cdgraphics_decoder = { "cdgraphics", AVMEDIA_TYPE_VIDEO, CODEC_ID_CDGRAPHICS, sizeof(CDGraphicsContext), cdg_decode_init, NULL, cdg_decode_end, cdg_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("CD Graphics video"), };
123linslouis-android-video-cutter
jni/libavcodec/cdgraphics.c
C
asf20
11,632
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief IntraX8 (J-Frame) subdecoder, used by WMV2 and VC-1 */ #include "avcodec.h" #include "get_bits.h" #include "mpegvideo.h" #include "msmpeg4data.h" #include "intrax8huf.h" #include "intrax8.h" #define MAX_TABLE_DEPTH(table_bits, max_bits) ((max_bits+table_bits-1)/table_bits) #define DC_VLC_BITS 9 #define AC_VLC_BITS 9 #define OR_VLC_BITS 7 #define DC_VLC_MTD MAX_TABLE_DEPTH(DC_VLC_BITS, MAX_DC_VLC_BITS) #define AC_VLC_MTD MAX_TABLE_DEPTH(AC_VLC_BITS, MAX_AC_VLC_BITS) #define OR_VLC_MTD MAX_TABLE_DEPTH(OR_VLC_BITS, MAX_OR_VLC_BITS) static VLC j_ac_vlc[2][2][8]; //[quant<13],[intra/inter],[select] static VLC j_dc_vlc[2][8]; //[quant], [select] static VLC j_orient_vlc[2][4]; //[quant], [select] static av_cold void x8_vlc_init(void){ int i; int offset = 0; int sizeidx = 0; static const uint16_t sizes[8*4 + 8*2 + 2 + 4] = { 576, 548, 582, 618, 546, 616, 560, 642, 584, 582, 704, 664, 512, 544, 656, 640, 512, 648, 582, 566, 532, 614, 596, 648, 586, 552, 584, 590, 544, 578, 584, 624, 528, 528, 526, 528, 536, 528, 526, 544, 544, 512, 512, 528, 528, 544, 512, 544, 128, 128, 128, 128, 128, 128}; static VLC_TYPE table[28150][2]; #define init_ac_vlc(dst,src) \ dst.table = &table[offset]; \ dst.table_allocated = sizes[sizeidx]; \ offset += sizes[sizeidx++]; \ init_vlc(&dst, \ AC_VLC_BITS,77, \ &src[1],4,2, \ &src[0],4,2, \ INIT_VLC_USE_NEW_STATIC) //set ac tables for(i=0;i<8;i++){ init_ac_vlc( j_ac_vlc[0][0][i], x8_ac0_highquant_table[i][0] ); init_ac_vlc( j_ac_vlc[0][1][i], x8_ac1_highquant_table[i][0] ); init_ac_vlc( j_ac_vlc[1][0][i], x8_ac0_lowquant_table [i][0] ); init_ac_vlc( j_ac_vlc[1][1][i], x8_ac1_lowquant_table [i][0] ); } #undef init_ac_vlc //set dc tables #define init_dc_vlc(dst,src) \ dst.table = &table[offset]; \ dst.table_allocated = sizes[sizeidx]; \ offset += sizes[sizeidx++]; \ init_vlc(&dst, \ DC_VLC_BITS,34, \ &src[1],4,2, \ &src[0],4,2, \ INIT_VLC_USE_NEW_STATIC); for(i=0;i<8;i++){ init_dc_vlc( j_dc_vlc[0][i], x8_dc_highquant_table[i][0]); init_dc_vlc( j_dc_vlc[1][i], x8_dc_lowquant_table [i][0]); } #undef init_dc_vlc //set orient tables #define init_or_vlc(dst,src) \ dst.table = &table[offset]; \ dst.table_allocated = sizes[sizeidx]; \ offset += sizes[sizeidx++]; \ init_vlc(&dst, \ OR_VLC_BITS,12, \ &src[1],4,2, \ &src[0],4,2, \ INIT_VLC_USE_NEW_STATIC); for(i=0;i<2;i++){ init_or_vlc( j_orient_vlc[0][i], x8_orient_highquant_table[i][0]); } for(i=0;i<4;i++){ init_or_vlc( j_orient_vlc[1][i], x8_orient_lowquant_table [i][0]) } if (offset != sizeof(table)/sizeof(VLC_TYPE)/2) av_log(NULL, AV_LOG_ERROR, "table size %i does not match needed %i\n", (int)(sizeof(table)/sizeof(VLC_TYPE)/2), offset); } #undef init_or_vlc static void x8_reset_vlc_tables(IntraX8Context * w){ memset(w->j_dc_vlc,0,sizeof(w->j_dc_vlc)); memset(w->j_ac_vlc,0,sizeof(w->j_ac_vlc)); w->j_orient_vlc=NULL; } static inline void x8_select_ac_table(IntraX8Context * const w , int mode){ MpegEncContext * const s= w->s; int table_index; assert(mode<4); if( w->j_ac_vlc[mode] ) return; table_index = get_bits(&s->gb, 3); w->j_ac_vlc[mode] = &j_ac_vlc[w->quant<13][mode>>1][table_index];//2 modes use same tables assert(w->j_ac_vlc[mode]); } static inline int x8_get_orient_vlc(IntraX8Context * w){ MpegEncContext * const s= w->s; int table_index; if(!w->j_orient_vlc ){ table_index = get_bits(&s->gb, 1+(w->quant<13) ); w->j_orient_vlc = &j_orient_vlc[w->quant<13][table_index]; } assert(w->j_orient_vlc); assert(w->j_orient_vlc->table); return get_vlc2(&s->gb, w->j_orient_vlc->table, OR_VLC_BITS, OR_VLC_MTD); } #define extra_bits(eb) (eb) #define extra_run (0xFF<<8) #define extra_level (0x00<<8) #define run_offset(r) ((r)<<16) #define level_offset(l) ((l)<<24) static const uint32_t ac_decode_table[]={ /*46*/ extra_bits(3) | extra_run | run_offset(16) | level_offset( 0), /*47*/ extra_bits(3) | extra_run | run_offset(24) | level_offset( 0), /*48*/ extra_bits(2) | extra_run | run_offset( 4) | level_offset( 1), /*49*/ extra_bits(3) | extra_run | run_offset( 8) | level_offset( 1), /*50*/ extra_bits(5) | extra_run | run_offset(32) | level_offset( 0), /*51*/ extra_bits(4) | extra_run | run_offset(16) | level_offset( 1), /*52*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset( 4), /*53*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset( 8), /*54*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset(12), /*55*/ extra_bits(3) | extra_level | run_offset( 0) | level_offset(16), /*56*/ extra_bits(3) | extra_level | run_offset( 0) | level_offset(24), /*57*/ extra_bits(2) | extra_level | run_offset( 1) | level_offset( 3), /*58*/ extra_bits(3) | extra_level | run_offset( 1) | level_offset( 7), /*59*/ extra_bits(2) | extra_run | run_offset(16) | level_offset( 0), /*60*/ extra_bits(2) | extra_run | run_offset(20) | level_offset( 0), /*61*/ extra_bits(2) | extra_run | run_offset(24) | level_offset( 0), /*62*/ extra_bits(2) | extra_run | run_offset(28) | level_offset( 0), /*63*/ extra_bits(4) | extra_run | run_offset(32) | level_offset( 0), /*64*/ extra_bits(4) | extra_run | run_offset(48) | level_offset( 0), /*65*/ extra_bits(2) | extra_run | run_offset( 4) | level_offset( 1), /*66*/ extra_bits(3) | extra_run | run_offset( 8) | level_offset( 1), /*67*/ extra_bits(4) | extra_run | run_offset(16) | level_offset( 1), /*68*/ extra_bits(2) | extra_level | run_offset( 0) | level_offset( 4), /*69*/ extra_bits(3) | extra_level | run_offset( 0) | level_offset( 8), /*70*/ extra_bits(4) | extra_level | run_offset( 0) | level_offset(16), /*71*/ extra_bits(2) | extra_level | run_offset( 1) | level_offset( 3), /*72*/ extra_bits(3) | extra_level | run_offset( 1) | level_offset( 7), }; //extra_bits = 3bits; extra_run/level = 1 bit; run_offset = 6bits; level_offset = 5 bits; #undef extra_bits #undef extra_run #undef extra_level #undef run_offset #undef level_offset static void x8_get_ac_rlf(IntraX8Context * const w, const int mode, int * const run, int * const level, int * const final){ MpegEncContext * const s= w->s; int i,e; // x8_select_ac_table(w,mode); i = get_vlc2(&s->gb, w->j_ac_vlc[mode]->table, AC_VLC_BITS, AC_VLC_MTD); if(i<46){ //[0-45] int t,l; if(i<0){ (*level)=(*final)=//prevent 'may be used unilitialized' (*run)=64;//this would cause error exit in the ac loop return; } (*final) = t = (i>22); i-=23*t; /* i== 0-15 r=0-15 l=0 ;r=i& %01111 i==16-19 r=0-3 l=1 ;r=i& %00011 i==20-21 r=0-1 l=2 ;r=i& %00001 i==22 r=0 l=3 ;r=i& %00000 l=lut_l[i/2]={0,0,0,0,0,0,0,0,1,1,2,3}[i>>1];// 11 10'01 01'00 00'00 00'00 00'00 00 => 0xE50000 t=lut_mask[l]={0x0f,0x03,0x01,0x00}[l]; as i<256 the higher bits do not matter */ l=(0xE50000>>(i&(0x1E)))&3;/*0x1E or (~1) or ((i>>1)<<1)*/ t=(0x01030F>>(l<<3)); (*run) = i&t; (*level) = l; }else if(i<73){//[46-72] uint32_t sm; uint32_t mask; i-=46; sm=ac_decode_table[i]; e=get_bits(&s->gb,sm&0xF);sm>>=8;//3bits mask=sm&0xff;sm>>=8; //1bit (*run) =(sm&0xff) + (e&( mask));//6bits (*level)=(sm>>8) + (e&(~mask));//5bits (*final)=i>(58-46); }else if(i<75){//[73-74] static const uint8_t crazy_mix_runlevel[32]={ 0x22,0x32,0x33,0x53,0x23,0x42,0x43,0x63, 0x24,0x52,0x34,0x73,0x25,0x62,0x44,0x83, 0x26,0x72,0x35,0x54,0x27,0x82,0x45,0x64, 0x28,0x92,0x36,0x74,0x29,0xa2,0x46,0x84}; (*final)=!(i&1); e=get_bits(&s->gb,5);//get the extra bits (*run) =crazy_mix_runlevel[e]>>4; (*level)=crazy_mix_runlevel[e]&0x0F; }else{ (*level)=get_bits( &s->gb, 7-3*(i&1)); (*run) =get_bits( &s->gb, 6); (*final)=get_bits1(&s->gb); } return; } //static const uint8_t dc_extra_sbits[] ={0, 1,1, 1,1, 2,2, 3,3, 4,4, 5,5, 6,6, 7,7 }; static const uint8_t dc_index_offset[] ={ 0, 1,2, 3,4, 5,7, 9,13, 17,25, 33,49, 65,97, 129,193}; static int x8_get_dc_rlf(IntraX8Context * const w,int const mode, int * const level, int * const final){ MpegEncContext * const s= w->s; int i,e,c; assert(mode<3); if( !w->j_dc_vlc[mode] ) { int table_index; table_index = get_bits(&s->gb, 3); //4 modes, same table w->j_dc_vlc[mode]= &j_dc_vlc[w->quant<13][table_index]; } assert(w->j_dc_vlc); assert(w->j_dc_vlc[mode]->table); i=get_vlc2(&s->gb, w->j_dc_vlc[mode]->table, DC_VLC_BITS, DC_VLC_MTD); /*(i>=17) {i-=17;final=1;}*/ c= i>16; (*final)=c; i-=17*c; if(i<=0){ (*level)=0; return -i; } c=(i+1)>>1;//hackish way to calculate dc_extra_sbits[] c-=c>1; e=get_bits(&s->gb,c);//get the extra bits i=dc_index_offset[i]+(e>>1); e= -(e & 1);//0,0xffffff (*level)= (i ^ e) - e;// (i^0)-0 , (i^0xff)-(-1) return 0; } //end of huffman static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma){ MpegEncContext * const s= w->s; int range; int sum; int quant; s->dsp.x8_setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer, s->current_picture.linesize[chroma>0], &range, &sum, w->edges); if(chroma){ w->orient=w->chroma_orient; quant=w->quant_dc_chroma; }else{ quant=w->quant; } w->flat_dc=0; if(range < quant || range < 3){ w->orient=0; if(range < 3){//yep you read right, a +-1 idct error may break decoding! w->flat_dc=1; sum+=9; w->predicted_dc = (sum*6899)>>17;//((1<<17)+9)/(8+8+1+2)=6899 } } if(chroma) return 0; assert(w->orient < 3); if(range < 2*w->quant){ if( (w->edges&3) == 0){ if(w->orient==1) w->orient=11; if(w->orient==2) w->orient=10; }else{ w->orient=0; } w->raw_orient=0; }else{ static const uint8_t prediction_table[3][12]={ {0,8,4, 10,11, 2,6,9,1,3,5,7}, {4,0,8, 11,10, 3,5,2,6,9,1,7}, {8,0,4, 10,11, 1,7,2,6,9,3,5} }; w->raw_orient=x8_get_orient_vlc(w); if(w->raw_orient<0) return -1; assert(w->raw_orient < 12 ); assert(w->orient<3); w->orient=prediction_table[w->orient][w->raw_orient]; } return 0; } static void x8_update_predictions(IntraX8Context * const w, const int orient, const int est_run ){ MpegEncContext * const s= w->s; w->prediction_table[s->mb_x*2+(s->mb_y&1)] = (est_run<<2) + 1*(orient==4) + 2*(orient==8); /* y=2n+0 ->//0 2 4 y=2n+1 ->//1 3 5 */ } static void x8_get_prediction_chroma(IntraX8Context * const w){ MpegEncContext * const s= w->s; w->edges = 1*( !(s->mb_x>>1) ); w->edges|= 2*( !(s->mb_y>>1) ); w->edges|= 4*( s->mb_x >= (2*s->mb_width-1) );//mb_x for chroma would always be odd w->raw_orient=0; if(w->edges&3){//lut_co[8]={inv,4,8,8, inv,4,8,8}<- =>{1,1,0,0;1,1,0,0} => 0xCC w->chroma_orient=4<<((0xCC>>w->edges)&1); return; } w->chroma_orient = (w->prediction_table[2*s->mb_x-2] & 0x03)<<2;//block[x-1][y|1-1)] } static void x8_get_prediction(IntraX8Context * const w){ MpegEncContext * const s= w->s; int a,b,c,i; w->edges = 1*( !s->mb_x ); w->edges|= 2*( !s->mb_y ); w->edges|= 4*( s->mb_x >= (2*s->mb_width-1) ); switch(w->edges&3){ case 0: break; case 1: //take the one from the above block[0][y-1] w->est_run = w->prediction_table[!(s->mb_y&1)]>>2; w->orient = 1; return; case 2: //take the one from the previous block[x-1][0] w->est_run = w->prediction_table[2*s->mb_x-2]>>2; w->orient = 2; return; case 3: w->est_run = 16; w->orient = 0; return; } //no edge cases b= w->prediction_table[2*s->mb_x + !(s->mb_y&1) ];//block[x ][y-1] a= w->prediction_table[2*s->mb_x-2 + (s->mb_y&1) ];//block[x-1][y ] c= w->prediction_table[2*s->mb_x-2 + !(s->mb_y&1) ];//block[x-1][y-1] w->est_run = FFMIN(b,a); /* This condition has nothing to do with w->edges, even if it looks similar it would trigger if e.g. x=3;y=2; I guess somebody wrote something wrong and it became standard. */ if( (s->mb_x & s->mb_y) != 0 ) w->est_run=FFMIN(c,w->est_run); w->est_run>>=2; a&=3; b&=3; c&=3; i=( 0xFFEAF4C4>>(2*b+8*a) )&3; if(i!=3) w->orient=i; else w->orient=( 0xFFEAD8>>(2*c+8*(w->quant>12)) )&3; /* lut1[b][a]={ ->{0, 1, 0, pad}, {0, 1, X, pad}, {2, 2, 2, pad}} pad 2 2 2; pad X 1 0; pad 0 1 0 <- -> 11 10 '10 10 '11 11'01 00 '11 00'01 00=>0xEAF4C4 lut2[q>12][c]={ ->{0,2,1,pad}, {2,2,2,pad}} pad 2 2 2; pad 1 2 0 <- -> 11 10'10 10 '11 01'10 00=>0xEAD8 */ } static void x8_ac_compensation(IntraX8Context * const w, int const direction, int const dc_level){ MpegEncContext * const s= w->s; int t; #define B(x,y) s->block[0][s->dsp.idct_permutation[(x)+(y)*8]] #define T(x) ((x) * dc_level + 0x8000) >> 16; switch(direction){ case 0: t = T(3811);//h B(1,0) -= t; B(0,1) -= t; t = T(487);//e B(2,0) -= t; B(0,2) -= t; t = T(506);//f B(3,0) -= t; B(0,3) -= t; t = T(135);//c B(4,0) -= t; B(0,4) -= t; B(2,1) += t; B(1,2) += t; B(3,1) += t; B(1,3) += t; t = T(173);//d B(5,0) -= t; B(0,5) -= t; t = T(61);//b B(6,0) -= t; B(0,6) -= t; B(5,1) += t; B(1,5) += t; t = T(42); //a B(7,0) -= t; B(0,7) -= t; B(4,1) += t; B(1,4) += t; B(4,4) += t; t = T(1084);//g B(1,1) += t; s->block_last_index[0] = FFMAX(s->block_last_index[0], 7*8); break; case 1: B(0,1) -= T(6269); B(0,3) -= T( 708); B(0,5) -= T( 172); B(0,7) -= T( 73); s->block_last_index[0] = FFMAX(s->block_last_index[0], 7*8); break; case 2: B(1,0) -= T(6269); B(3,0) -= T( 708); B(5,0) -= T( 172); B(7,0) -= T( 73); s->block_last_index[0] = FFMAX(s->block_last_index[0], 7); break; } #undef B #undef T } static void dsp_x8_put_solidcolor(uint8_t const pix, uint8_t * dst, int const linesize){ int k; for(k=0;k<8;k++){ memset(dst,pix,8); dst+=linesize; } } static const int16_t quant_table[64] = { 256, 256, 256, 256, 256, 256, 259, 262, 265, 269, 272, 275, 278, 282, 285, 288, 292, 295, 299, 303, 306, 310, 314, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353, 358, 362, 366, 371, 375, 379, 384, 389, 393, 398, 403, 408, 413, 417, 422, 428, 433, 438, 443, 448, 454, 459, 465, 470, 476, 482, 488, 493, 499, 505, 511 }; static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){ MpegEncContext * const s= w->s; uint8_t * scantable; int final,run,level; int ac_mode,dc_mode,est_run,dc_level; int pos,n; int zeros_only; int use_quant_matrix; int sign; assert(w->orient<12); s->dsp.clear_block(s->block[0]); if(chroma){ dc_mode=2; }else{ dc_mode=!!w->est_run;//0,1 } if(x8_get_dc_rlf(w, dc_mode, &dc_level, &final)) return -1; n=0; zeros_only=0; if(!final){//decode ac use_quant_matrix=w->use_quant_matrix; if(chroma){ ac_mode = 1; est_run = 64;//not used }else{ if (w->raw_orient < 3){ use_quant_matrix = 0; } if(w->raw_orient > 4){ ac_mode = 0; est_run = 64; }else{ if(w->est_run > 1){ ac_mode = 2; est_run=w->est_run; }else{ ac_mode = 3; est_run = 64; } } } x8_select_ac_table(w,ac_mode); /*scantable_selector[12]={0,2,0,1,1,1,0,2,2,0,1,2};<- -> 10'01' 00'10' 10'00' 01'01' 01'00' 10'00 =>0x928548 */ scantable = w->scantable[ (0x928548>>(2*w->orient))&3 ].permutated; pos=0; do { n++; if( n >= est_run ){ ac_mode=3; x8_select_ac_table(w,3); } x8_get_ac_rlf(w,ac_mode,&run,&level,&final); pos+=run+1; if(pos>63){ //this also handles vlc error in x8_get_ac_rlf return -1; } level= (level+1) * w->dquant; level+= w->qsum; sign = - get_bits1(&s->gb); level = (level ^ sign) - sign; if(use_quant_matrix){ level = (level*quant_table[pos])>>8; } s->block[0][ scantable[pos] ]=level; }while(!final); s->block_last_index[0]=pos; }else{//DC only s->block_last_index[0]=0; if(w->flat_dc && ((unsigned)(dc_level+1)) < 3){//[-1;1] int32_t divide_quant= !chroma ? w->divide_quant_dc_luma: w->divide_quant_dc_chroma; int32_t dc_quant = !chroma ? w->quant: w->quant_dc_chroma; //original intent dc_level+=predicted_dc/quant; but it got lost somewhere in the rounding dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13; dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3), s->dest[chroma], s->current_picture.linesize[!!chroma]); goto block_placed; } zeros_only = (dc_level == 0); } if(!chroma){ s->block[0][0] = dc_level*w->quant; }else{ s->block[0][0] = dc_level*w->quant_dc_chroma; } //there is !zero_only check in the original, but dc_level check is enough if( (unsigned int)(dc_level+1) >= 3 && (w->edges&3) != 3 ){ int direction; /*ac_comp_direction[orient] = { 0, 3, 3, 1, 1, 0, 0, 0, 2, 2, 2, 1 };<- -> 01'10' 10'10' 00'00' 00'01' 01'11' 11'00 =>0x6A017C */ direction= (0x6A017C>>(w->orient*2))&3; if (direction != 3){ x8_ac_compensation(w, direction, s->block[0][0]);//modify block_last[] } } if(w->flat_dc){ dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.linesize[!!chroma]); }else{ s->dsp.x8_spatial_compensation[w->orient]( s->edge_emu_buffer, s->dest[chroma], s->current_picture.linesize[!!chroma] ); } if(!zeros_only) s->dsp.idct_add ( s->dest[chroma], s->current_picture.linesize[!!chroma], s->block[0] ); block_placed: if(!chroma){ x8_update_predictions(w,w->orient,n); } if(s->loop_filter){ uint8_t* ptr = s->dest[chroma]; int linesize = s->current_picture.linesize[!!chroma]; if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){ s->dsp.x8_h_loop_filter(ptr, linesize, w->quant); } if(!( (w->edges&1) || ( zeros_only && (w->orient|8)==8 ) )){ s->dsp.x8_v_loop_filter(ptr, linesize, w->quant); } } return 0; } static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_* //not s->linesize as this would be wrong for field pics //not that IntraX8 has interlacing support ;) const int linesize = s->current_picture.linesize[0]; const int uvlinesize= s->current_picture.linesize[1]; s->dest[0] = s->current_picture.data[0]; s->dest[1] = s->current_picture.data[1]; s->dest[2] = s->current_picture.data[2]; s->dest[0] += s->mb_y * linesize << 3; s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows s->dest[2] += ( s->mb_y&(~1) ) * uvlinesize << 2; } /** * Initialize IntraX8 frame decoder. * Requires valid MpegEncContext with valid s->mb_width before calling. * @param w pointer to IntraX8Context * @param s pointer to MpegEncContext of the parent codec */ av_cold void ff_intrax8_common_init(IntraX8Context * w, MpegEncContext * const s){ w->s=s; x8_vlc_init(); assert(s->mb_width>0); w->prediction_table=av_mallocz(s->mb_width*2*2);//two rows, 2 blocks per cannon mb ff_init_scantable(s->dsp.idct_permutation, &w->scantable[0], wmv1_scantable[0]); ff_init_scantable(s->dsp.idct_permutation, &w->scantable[1], wmv1_scantable[2]); ff_init_scantable(s->dsp.idct_permutation, &w->scantable[2], wmv1_scantable[3]); } /** * Destroy IntraX8 frame structure. * @param w pointer to IntraX8Context */ av_cold void ff_intrax8_common_end(IntraX8Context * w) { av_freep(&w->prediction_table); } /** * Decode single IntraX8 frame. * The parent codec must fill s->loopfilter and s->gb (bitstream). * The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function. * The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function. * This function does not use MPV_decode_mb(). * lowres decoding is theoretically impossible. * @param w pointer to IntraX8Context * @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1. * @param quant_offset offset away from zero */ //FIXME extern uint8_t wmv3_dc_scale_table[32]; int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_offset){ MpegEncContext * const s= w->s; int mb_xy; assert(s); w->use_quant_matrix = get_bits1(&s->gb); w->dquant = dquant; w->quant = dquant >> 1; w->qsum = quant_offset; w->divide_quant_dc_luma = ((1<<16) + (w->quant>>1)) / w->quant; if(w->quant < 5){ w->quant_dc_chroma = w->quant; w->divide_quant_dc_chroma = w->divide_quant_dc_luma; }else{ w->quant_dc_chroma = w->quant+((w->quant+3)>>3); w->divide_quant_dc_chroma = ((1<<16) + (w->quant_dc_chroma>>1)) / w->quant_dc_chroma; } x8_reset_vlc_tables(w); s->resync_mb_x=0; s->resync_mb_y=0; for(s->mb_y=0; s->mb_y < s->mb_height*2; s->mb_y++){ x8_init_block_index(s); mb_xy=(s->mb_y>>1)*s->mb_stride; for(s->mb_x=0; s->mb_x < s->mb_width*2; s->mb_x++){ x8_get_prediction(w); if(x8_setup_spatial_predictor(w,0)) goto error; if(x8_decode_intra_mb(w,0)) goto error; if( s->mb_x & s->mb_y & 1 ){ x8_get_prediction_chroma(w); /*when setting up chroma, no vlc is read, so no error condition can be reached*/ x8_setup_spatial_predictor(w,1); if(x8_decode_intra_mb(w,1)) goto error; x8_setup_spatial_predictor(w,2); if(x8_decode_intra_mb(w,2)) goto error; s->dest[1]+= 8; s->dest[2]+= 8; /*emulate MB info in the relevant tables*/ s->mbskip_table [mb_xy]=0; s->mbintra_table[mb_xy]=1; s->current_picture.qscale_table[mb_xy]=w->quant; mb_xy++; } s->dest[0]+= 8; } if(s->mb_y&1){ ff_draw_horiz_band(s, (s->mb_y-1)*8, 16); } } error: ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, (s->mb_x>>1)-1, (s->mb_y>>1)-1, (AC_END|DC_END|MV_END) ); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/intrax8.c
C
asf20
25,305
/* * Copyright (C) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * id RoQ Video Decoder by Dr. Tim Ferguson * For more information about the id RoQ format, visit: * http://www.csse.monash.edu.au/~timf/ */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "avcodec.h" #include "bytestream.h" #include "roqvideo.h" static void roqvideo_decode_frame(RoqContext *ri) { unsigned int chunk_id = 0, chunk_arg = 0; unsigned long chunk_size = 0; int i, j, k, nv1, nv2, vqflg = 0, vqflg_pos = -1; int vqid, bpos, xpos, ypos, xp, yp, x, y, mx, my; int frame_stats[2][4] = {{0},{0}}; roq_qcell *qcell; const unsigned char *buf = ri->buf; const unsigned char *buf_end = ri->buf + ri->size; while (buf < buf_end) { chunk_id = bytestream_get_le16(&buf); chunk_size = bytestream_get_le32(&buf); chunk_arg = bytestream_get_le16(&buf); if(chunk_id == RoQ_QUAD_VQ) break; if(chunk_id == RoQ_QUAD_CODEBOOK) { if((nv1 = chunk_arg >> 8) == 0) nv1 = 256; if((nv2 = chunk_arg & 0xff) == 0 && nv1 * 6 < chunk_size) nv2 = 256; for(i = 0; i < nv1; i++) { ri->cb2x2[i].y[0] = *buf++; ri->cb2x2[i].y[1] = *buf++; ri->cb2x2[i].y[2] = *buf++; ri->cb2x2[i].y[3] = *buf++; ri->cb2x2[i].u = *buf++; ri->cb2x2[i].v = *buf++; } for(i = 0; i < nv2; i++) for(j = 0; j < 4; j++) ri->cb4x4[i].idx[j] = *buf++; } } bpos = xpos = ypos = 0; while(bpos < chunk_size) { for (yp = ypos; yp < ypos + 16; yp += 8) for (xp = xpos; xp < xpos + 16; xp += 8) { if (vqflg_pos < 0) { vqflg = buf[bpos++]; vqflg |= (buf[bpos++] << 8); vqflg_pos = 7; } vqid = (vqflg >> (vqflg_pos * 2)) & 0x3; frame_stats[0][vqid]++; vqflg_pos--; switch(vqid) { case RoQ_ID_MOT: break; case RoQ_ID_FCC: mx = 8 - (buf[bpos] >> 4) - ((signed char) (chunk_arg >> 8)); my = 8 - (buf[bpos++] & 0xf) - ((signed char) chunk_arg); ff_apply_motion_8x8(ri, xp, yp, mx, my); break; case RoQ_ID_SLD: qcell = ri->cb4x4 + buf[bpos++]; ff_apply_vector_4x4(ri, xp, yp, ri->cb2x2 + qcell->idx[0]); ff_apply_vector_4x4(ri, xp+4, yp, ri->cb2x2 + qcell->idx[1]); ff_apply_vector_4x4(ri, xp, yp+4, ri->cb2x2 + qcell->idx[2]); ff_apply_vector_4x4(ri, xp+4, yp+4, ri->cb2x2 + qcell->idx[3]); break; case RoQ_ID_CCC: for (k = 0; k < 4; k++) { x = xp; y = yp; if(k & 0x01) x += 4; if(k & 0x02) y += 4; if (vqflg_pos < 0) { vqflg = buf[bpos++]; vqflg |= (buf[bpos++] << 8); vqflg_pos = 7; } vqid = (vqflg >> (vqflg_pos * 2)) & 0x3; frame_stats[1][vqid]++; vqflg_pos--; switch(vqid) { case RoQ_ID_MOT: break; case RoQ_ID_FCC: mx = 8 - (buf[bpos] >> 4) - ((signed char) (chunk_arg >> 8)); my = 8 - (buf[bpos++] & 0xf) - ((signed char) chunk_arg); ff_apply_motion_4x4(ri, x, y, mx, my); break; case RoQ_ID_SLD: qcell = ri->cb4x4 + buf[bpos++]; ff_apply_vector_2x2(ri, x, y, ri->cb2x2 + qcell->idx[0]); ff_apply_vector_2x2(ri, x+2, y, ri->cb2x2 + qcell->idx[1]); ff_apply_vector_2x2(ri, x, y+2, ri->cb2x2 + qcell->idx[2]); ff_apply_vector_2x2(ri, x+2, y+2, ri->cb2x2 + qcell->idx[3]); break; case RoQ_ID_CCC: ff_apply_vector_2x2(ri, x, y, ri->cb2x2 + buf[bpos]); ff_apply_vector_2x2(ri, x+2, y, ri->cb2x2 + buf[bpos+1]); ff_apply_vector_2x2(ri, x, y+2, ri->cb2x2 + buf[bpos+2]); ff_apply_vector_2x2(ri, x+2, y+2, ri->cb2x2 + buf[bpos+3]); bpos += 4; break; } } break; default: av_log(ri->avctx, AV_LOG_ERROR, "Unknown vq code: %d\n", vqid); } } xpos += 16; if (xpos >= ri->width) { xpos -= ri->width; ypos += 16; } if(ypos >= ri->height) break; } } static av_cold int roq_decode_init(AVCodecContext *avctx) { RoqContext *s = avctx->priv_data; s->avctx = avctx; s->width = avctx->width; s->height = avctx->height; s->last_frame = &s->frames[0]; s->current_frame = &s->frames[1]; avctx->pix_fmt = PIX_FMT_YUV444P; return 0; } static int roq_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; RoqContext *s = avctx->priv_data; int copy= !s->current_frame->data[0]; if (avctx->reget_buffer(avctx, s->current_frame)) { av_log(avctx, AV_LOG_ERROR, " RoQ: get_buffer() failed\n"); return -1; } if(copy) av_picture_copy((AVPicture*)s->current_frame, (AVPicture*)s->last_frame, avctx->pix_fmt, avctx->width, avctx->height); s->buf = buf; s->size = buf_size; roqvideo_decode_frame(s); *data_size = sizeof(AVFrame); *(AVFrame*)data = *s->current_frame; /* shuffle frames */ FFSWAP(AVFrame *, s->current_frame, s->last_frame); return buf_size; } static av_cold int roq_decode_end(AVCodecContext *avctx) { RoqContext *s = avctx->priv_data; /* release the last frame */ if (s->last_frame->data[0]) avctx->release_buffer(avctx, s->last_frame); if (s->current_frame->data[0]) avctx->release_buffer(avctx, s->current_frame); return 0; } AVCodec roq_decoder = { "roqvideo", AVMEDIA_TYPE_VIDEO, CODEC_ID_ROQ, sizeof(RoqContext), roq_decode_init, NULL, roq_decode_end, roq_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("id RoQ video"), };
123linslouis-android-video-cutter
jni/libavcodec/roqvideodec.c
C
asf20
7,764
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_AVFFT_H #define AVCODEC_AVFFT_H typedef float FFTSample; typedef struct FFTComplex { FFTSample re, im; } FFTComplex; typedef struct FFTContext FFTContext; /** * Set up a complex FFT. * @param nbits log2 of the length of the input array * @param inverse if 0 perform the forward transform, if 1 perform the inverse */ FFTContext *av_fft_init(int nbits, int inverse); /** * Do the permutation needed BEFORE calling ff_fft_calc(). */ void av_fft_permute(FFTContext *s, FFTComplex *z); /** * Do a complex FFT with the parameters defined in av_fft_init(). The * input data must be permuted before. No 1.0/sqrt(n) normalization is done. */ void av_fft_calc(FFTContext *s, FFTComplex *z); void av_fft_end(FFTContext *s); FFTContext *av_mdct_init(int nbits, int inverse, double scale); void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); void av_mdct_end(FFTContext *s); /* Real Discrete Fourier Transform */ enum RDFTransformType { DFT_R2C, IDFT_C2R, IDFT_R2C, DFT_C2R, }; typedef struct RDFTContext RDFTContext; /** * Set up a real FFT. * @param nbits log2 of the length of the input array * @param trans the type of transform */ RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans); void av_rdft_calc(RDFTContext *s, FFTSample *data); void av_rdft_end(RDFTContext *s); /* Discrete Cosine Transform */ typedef struct DCTContext DCTContext; enum DCTTransformType { DCT_II = 0, DCT_III, DCT_I, DST_I, }; /** * Sets up DCT. * @param nbits size of the input array: * (1 << nbits) for DCT-II, DCT-III and DST-I * (1 << nbits) + 1 for DCT-I * * @note the first element of the input of DST-I is ignored */ DCTContext *av_dct_init(int nbits, enum DCTTransformType type); void av_dct_calc(DCTContext *s, FFTSample *data); void av_dct_end (DCTContext *s); #endif /* AVCODEC_AVFFT_H */
123linslouis-android-video-cutter
jni/libavcodec/avfft.h
C
asf20
2,914
/* * FLAC (Free Lossless Audio Codec) decoder/demuxer common functions * Copyright (c) 2008 Justin Ruggles * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * FLAC (Free Lossless Audio Codec) decoder/demuxer common functions */ #ifndef AVCODEC_FLAC_H #define AVCODEC_FLAC_H #include "avcodec.h" #define FLAC_STREAMINFO_SIZE 34 #define FLAC_MAX_CHANNELS 8 #define FLAC_MIN_BLOCKSIZE 16 #define FLAC_MAX_BLOCKSIZE 65535 enum { FLAC_CHMODE_INDEPENDENT = 0, FLAC_CHMODE_LEFT_SIDE = 8, FLAC_CHMODE_RIGHT_SIDE = 9, FLAC_CHMODE_MID_SIDE = 10, }; enum { FLAC_METADATA_TYPE_STREAMINFO = 0, FLAC_METADATA_TYPE_PADDING, FLAC_METADATA_TYPE_APPLICATION, FLAC_METADATA_TYPE_SEEKTABLE, FLAC_METADATA_TYPE_VORBIS_COMMENT, FLAC_METADATA_TYPE_CUESHEET, FLAC_METADATA_TYPE_PICTURE, FLAC_METADATA_TYPE_INVALID = 127 }; enum FLACExtradataFormat { FLAC_EXTRADATA_FORMAT_STREAMINFO = 0, FLAC_EXTRADATA_FORMAT_FULL_HEADER = 1 }; #define FLACCOMMONINFO \ int samplerate; /**< sample rate */\ int channels; /**< number of channels */\ int bps; /**< bits-per-sample */\ /** * Data needed from the Streaminfo header for use by the raw FLAC demuxer * and/or the FLAC decoder. */ #define FLACSTREAMINFO \ FLACCOMMONINFO \ int max_blocksize; /**< maximum block size, in samples */\ int max_framesize; /**< maximum frame size, in bytes */\ int64_t samples; /**< total number of samples */\ typedef struct FLACStreaminfo { FLACSTREAMINFO } FLACStreaminfo; typedef struct FLACFrameInfo { FLACCOMMONINFO int blocksize; /**< block size of the frame */ int ch_mode; /**< channel decorrelation mode */ } FLACFrameInfo; /** * Parse the Streaminfo metadata block * @param[out] avctx codec context to set basic stream parameters * @param[out] s where parsed information is stored * @param[in] buffer pointer to start of 34-byte streaminfo data */ void ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s, const uint8_t *buffer); /** * Validate the FLAC extradata. * @param[in] avctx codec context containing the extradata. * @param[out] format extradata format. * @param[out] streaminfo_start pointer to start of 34-byte STREAMINFO data. * @return 1 if valid, 0 if not valid. */ int ff_flac_is_extradata_valid(AVCodecContext *avctx, enum FLACExtradataFormat *format, uint8_t **streaminfo_start); /** * Parse the metadata block parameters from the header. * @param[in] block_header header data, at least 4 bytes * @param[out] last indicator for last metadata block * @param[out] type metadata block type * @param[out] size metadata block size */ void ff_flac_parse_block_header(const uint8_t *block_header, int *last, int *type, int *size); /** * Calculate an estimate for the maximum frame size based on verbatim mode. * @param blocksize block size, in samples * @param ch number of channels * @param bps bits-per-sample */ int ff_flac_get_max_frame_size(int blocksize, int ch, int bps); #endif /* AVCODEC_FLAC_H */
123linslouis-android-video-cutter
jni/libavcodec/flac.h
C
asf20
4,134
/* * Floating point AAN DCT * this implementation is based upon the IJG integer AAN DCT (see jfdctfst.c) * * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2003 Roman Shaposhnik * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file * @brief * Floating point AAN DCT * @author Michael Niedermayer <michaelni@gmx.at> */ #include "dsputil.h" #include "faandct.h" #define FLOAT float #ifdef FAAN_POSTSCALE # define SCALE(x) postscale[x] #else # define SCALE(x) 1 #endif //numbers generated by simple c code (not as accurate as they could be) /* for(i=0; i<8; i++){ printf("#define B%d %1.20llf\n", i, (long double)1.0/(cosl(i*acosl(-1.0)/(long double)16.0)*sqrtl(2))); } */ #define B0 1.00000000000000000000 #define B1 0.72095982200694791383 // (cos(pi*1/16)sqrt(2))^-1 #define B2 0.76536686473017954350 // (cos(pi*2/16)sqrt(2))^-1 #define B3 0.85043009476725644878 // (cos(pi*3/16)sqrt(2))^-1 #define B4 1.00000000000000000000 // (cos(pi*4/16)sqrt(2))^-1 #define B5 1.27275858057283393842 // (cos(pi*5/16)sqrt(2))^-1 #define B6 1.84775906502257351242 // (cos(pi*6/16)sqrt(2))^-1 #define B7 3.62450978541155137218 // (cos(pi*7/16)sqrt(2))^-1 #define A1 0.70710678118654752438 // cos(pi*4/16) #define A2 0.54119610014619698435 // cos(pi*6/16)sqrt(2) #define A5 0.38268343236508977170 // cos(pi*6/16) #define A4 1.30656296487637652774 // cos(pi*2/16)sqrt(2) static const FLOAT postscale[64]={ B0*B0, B0*B1, B0*B2, B0*B3, B0*B4, B0*B5, B0*B6, B0*B7, B1*B0, B1*B1, B1*B2, B1*B3, B1*B4, B1*B5, B1*B6, B1*B7, B2*B0, B2*B1, B2*B2, B2*B3, B2*B4, B2*B5, B2*B6, B2*B7, B3*B0, B3*B1, B3*B2, B3*B3, B3*B4, B3*B5, B3*B6, B3*B7, B4*B0, B4*B1, B4*B2, B4*B3, B4*B4, B4*B5, B4*B6, B4*B7, B5*B0, B5*B1, B5*B2, B5*B3, B5*B4, B5*B5, B5*B6, B5*B7, B6*B0, B6*B1, B6*B2, B6*B3, B6*B4, B6*B5, B6*B6, B6*B7, B7*B0, B7*B1, B7*B2, B7*B3, B7*B4, B7*B5, B7*B6, B7*B7, }; static av_always_inline void row_fdct(FLOAT temp[64], DCTELEM * data) { FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; FLOAT tmp10, tmp11, tmp12, tmp13; FLOAT z2, z4, z11, z13; FLOAT av_unused z5; int i; for (i=0; i<8*8; i+=8) { tmp0= data[0 + i] + data[7 + i]; tmp7= data[0 + i] - data[7 + i]; tmp1= data[1 + i] + data[6 + i]; tmp6= data[1 + i] - data[6 + i]; tmp2= data[2 + i] + data[5 + i]; tmp5= data[2 + i] - data[5 + i]; tmp3= data[3 + i] + data[4 + i]; tmp4= data[3 + i] - data[4 + i]; tmp10= tmp0 + tmp3; tmp13= tmp0 - tmp3; tmp11= tmp1 + tmp2; tmp12= tmp1 - tmp2; temp[0 + i]= tmp10 + tmp11; temp[4 + i]= tmp10 - tmp11; tmp12 += tmp13; tmp12 *= A1; temp[2 + i]= tmp13 + tmp12; temp[6 + i]= tmp13 - tmp12; tmp4 += tmp5; tmp5 += tmp6; tmp6 += tmp7; #if 0 z5= (tmp4 - tmp6) * A5; z2= tmp4*A2 + z5; z4= tmp6*A4 + z5; #else z2= tmp4*(A2+A5) - tmp6*A5; z4= tmp6*(A4-A5) + tmp4*A5; #endif tmp5*=A1; z11= tmp7 + tmp5; z13= tmp7 - tmp5; temp[5 + i]= z13 + z2; temp[3 + i]= z13 - z2; temp[1 + i]= z11 + z4; temp[7 + i]= z11 - z4; } } void ff_faandct(DCTELEM * data) { FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; FLOAT tmp10, tmp11, tmp12, tmp13; FLOAT z2, z4, z11, z13; FLOAT av_unused z5; FLOAT temp[64]; int i; emms_c(); row_fdct(temp, data); for (i=0; i<8; i++) { tmp0= temp[8*0 + i] + temp[8*7 + i]; tmp7= temp[8*0 + i] - temp[8*7 + i]; tmp1= temp[8*1 + i] + temp[8*6 + i]; tmp6= temp[8*1 + i] - temp[8*6 + i]; tmp2= temp[8*2 + i] + temp[8*5 + i]; tmp5= temp[8*2 + i] - temp[8*5 + i]; tmp3= temp[8*3 + i] + temp[8*4 + i]; tmp4= temp[8*3 + i] - temp[8*4 + i]; tmp10= tmp0 + tmp3; tmp13= tmp0 - tmp3; tmp11= tmp1 + tmp2; tmp12= tmp1 - tmp2; data[8*0 + i]= lrintf(SCALE(8*0 + i) * (tmp10 + tmp11)); data[8*4 + i]= lrintf(SCALE(8*4 + i) * (tmp10 - tmp11)); tmp12 += tmp13; tmp12 *= A1; data[8*2 + i]= lrintf(SCALE(8*2 + i) * (tmp13 + tmp12)); data[8*6 + i]= lrintf(SCALE(8*6 + i) * (tmp13 - tmp12)); tmp4 += tmp5; tmp5 += tmp6; tmp6 += tmp7; #if 0 z5= (tmp4 - tmp6) * A5; z2= tmp4*A2 + z5; z4= tmp6*A4 + z5; #else z2= tmp4*(A2+A5) - tmp6*A5; z4= tmp6*(A4-A5) + tmp4*A5; #endif tmp5*=A1; z11= tmp7 + tmp5; z13= tmp7 - tmp5; data[8*5 + i]= lrintf(SCALE(8*5 + i) * (z13 + z2)); data[8*3 + i]= lrintf(SCALE(8*3 + i) * (z13 - z2)); data[8*1 + i]= lrintf(SCALE(8*1 + i) * (z11 + z4)); data[8*7 + i]= lrintf(SCALE(8*7 + i) * (z11 - z4)); } } void ff_faandct248(DCTELEM * data) { FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; FLOAT tmp10, tmp11, tmp12, tmp13; FLOAT temp[64]; int i; emms_c(); row_fdct(temp, data); for (i=0; i<8; i++) { tmp0 = temp[8*0 + i] + temp[8*1 + i]; tmp1 = temp[8*2 + i] + temp[8*3 + i]; tmp2 = temp[8*4 + i] + temp[8*5 + i]; tmp3 = temp[8*6 + i] + temp[8*7 + i]; tmp4 = temp[8*0 + i] - temp[8*1 + i]; tmp5 = temp[8*2 + i] - temp[8*3 + i]; tmp6 = temp[8*4 + i] - temp[8*5 + i]; tmp7 = temp[8*6 + i] - temp[8*7 + i]; tmp10 = tmp0 + tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; tmp13 = tmp0 - tmp3; data[8*0 + i] = lrintf(SCALE(8*0 + i) * (tmp10 + tmp11)); data[8*4 + i] = lrintf(SCALE(8*4 + i) * (tmp10 - tmp11)); tmp12 += tmp13; tmp12 *= A1; data[8*2 + i] = lrintf(SCALE(8*2 + i) * (tmp13 + tmp12)); data[8*6 + i] = lrintf(SCALE(8*6 + i) * (tmp13 - tmp12)); tmp10 = tmp4 + tmp7; tmp11 = tmp5 + tmp6; tmp12 = tmp5 - tmp6; tmp13 = tmp4 - tmp7; data[8*1 + i] = lrintf(SCALE(8*0 + i) * (tmp10 + tmp11)); data[8*5 + i] = lrintf(SCALE(8*4 + i) * (tmp10 - tmp11)); tmp12 += tmp13; tmp12 *= A1; data[8*3 + i] = lrintf(SCALE(8*2 + i) * (tmp13 + tmp12)); data[8*7 + i] = lrintf(SCALE(8*6 + i) * (tmp13 - tmp12)); } }
123linslouis-android-video-cutter
jni/libavcodec/faandct.c
C
asf20
7,023
/* * AAC definitions and structures * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org ) * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com ) * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAC definitions and structures * @author Oded Shimon ( ods15 ods15 dyndns org ) * @author Maxim Gavrilov ( maxim.gavrilov gmail com ) */ #ifndef AVCODEC_AAC_H #define AVCODEC_AAC_H #include "avcodec.h" #include "dsputil.h" #include "fft.h" #include "mpeg4audio.h" #include "sbr.h" #include <stdint.h> #define MAX_CHANNELS 64 #define MAX_ELEM_ID 16 #define TNS_MAX_ORDER 20 enum RawDataBlockType { TYPE_SCE, TYPE_CPE, TYPE_CCE, TYPE_LFE, TYPE_DSE, TYPE_PCE, TYPE_FIL, TYPE_END, }; enum ExtensionPayloadID { EXT_FILL, EXT_FILL_DATA, EXT_DATA_ELEMENT, EXT_DYNAMIC_RANGE = 0xb, EXT_SBR_DATA = 0xd, EXT_SBR_DATA_CRC = 0xe, }; enum WindowSequence { ONLY_LONG_SEQUENCE, LONG_START_SEQUENCE, EIGHT_SHORT_SEQUENCE, LONG_STOP_SEQUENCE, }; enum BandType { ZERO_BT = 0, ///< Scalefactors and spectral data are all zero. FIRST_PAIR_BT = 5, ///< This and later band types encode two values (rather than four) with one code word. ESC_BT = 11, ///< Spectral data are coded with an escape sequence. NOISE_BT = 13, ///< Spectral data are scaled white noise not coded in the bitstream. INTENSITY_BT2 = 14, ///< Scalefactor data are intensity stereo positions. INTENSITY_BT = 15, ///< Scalefactor data are intensity stereo positions. }; #define IS_CODEBOOK_UNSIGNED(x) ((x - 1) & 10) enum ChannelPosition { AAC_CHANNEL_FRONT = 1, AAC_CHANNEL_SIDE = 2, AAC_CHANNEL_BACK = 3, AAC_CHANNEL_LFE = 4, AAC_CHANNEL_CC = 5, }; /** * The point during decoding at which channel coupling is applied. */ enum CouplingPoint { BEFORE_TNS, BETWEEN_TNS_AND_IMDCT, AFTER_IMDCT = 3, }; /** * Output configuration status */ enum OCStatus { OC_NONE, //< Output unconfigured OC_TRIAL_PCE, //< Output configuration under trial specified by an inband PCE OC_TRIAL_FRAME, //< Output configuration under trial specified by a frame header OC_GLOBAL_HDR, //< Output configuration set in a global header but not yet locked OC_LOCKED, //< Output configuration locked in place }; /** * Predictor State */ typedef struct { float cor0; float cor1; float var0; float var1; float r0; float r1; } PredictorState; #define MAX_PREDICTORS 672 #define SCALE_DIV_512 36 ///< scalefactor difference that corresponds to scale difference in 512 times #define SCALE_ONE_POS 140 ///< scalefactor index that corresponds to scale=1.0 #define SCALE_MAX_POS 255 ///< scalefactor index maximum value #define SCALE_MAX_DIFF 60 ///< maximum scalefactor difference allowed by standard #define SCALE_DIFF_ZERO 60 ///< codebook index corresponding to zero scalefactor indices difference /** * Individual Channel Stream */ typedef struct { uint8_t max_sfb; ///< number of scalefactor bands per group enum WindowSequence window_sequence[2]; uint8_t use_kb_window[2]; ///< If set, use Kaiser-Bessel window, otherwise use a sinus window. int num_window_groups; uint8_t group_len[8]; const uint16_t *swb_offset; ///< table of offsets to the lowest spectral coefficient of a scalefactor band, sfb, for a particular window const uint8_t *swb_sizes; ///< table of scalefactor band sizes for a particular window int num_swb; ///< number of scalefactor window bands int num_windows; int tns_max_bands; int predictor_present; int predictor_initialized; int predictor_reset_group; uint8_t prediction_used[41]; } IndividualChannelStream; /** * Temporal Noise Shaping */ typedef struct { int present; int n_filt[8]; int length[8][4]; int direction[8][4]; int order[8][4]; float coef[8][4][TNS_MAX_ORDER]; } TemporalNoiseShaping; /** * Dynamic Range Control - decoded from the bitstream but not processed further. */ typedef struct { int pce_instance_tag; ///< Indicates with which program the DRC info is associated. int dyn_rng_sgn[17]; ///< DRC sign information; 0 - positive, 1 - negative int dyn_rng_ctl[17]; ///< DRC magnitude information int exclude_mask[MAX_CHANNELS]; ///< Channels to be excluded from DRC processing. int band_incr; ///< Number of DRC bands greater than 1 having DRC info. int interpolation_scheme; ///< Indicates the interpolation scheme used in the SBR QMF domain. int band_top[17]; ///< Indicates the top of the i-th DRC band in units of 4 spectral lines. int prog_ref_level; /**< A reference level for the long-term program audio level for all * channels combined. */ } DynamicRangeControl; typedef struct { int num_pulse; int start; int pos[4]; int amp[4]; } Pulse; /** * coupling parameters */ typedef struct { enum CouplingPoint coupling_point; ///< The point during decoding at which coupling is applied. int num_coupled; ///< number of target elements enum RawDataBlockType type[8]; ///< Type of channel element to be coupled - SCE or CPE. int id_select[8]; ///< element id int ch_select[8]; /**< [0] shared list of gains; [1] list of gains for right channel; * [2] list of gains for left channel; [3] lists of gains for both channels */ float gain[16][120]; } ChannelCoupling; /** * Single Channel Element - used for both SCE and LFE elements. */ typedef struct { IndividualChannelStream ics; TemporalNoiseShaping tns; Pulse pulse; enum BandType band_type[128]; ///< band types int band_type_run_end[120]; ///< band type run end points float sf[120]; ///< scalefactors int sf_idx[128]; ///< scalefactor indices (used by encoder) uint8_t zeroes[128]; ///< band is not coded (used by encoder) DECLARE_ALIGNED(16, float, coeffs)[1024]; ///< coefficients for IMDCT DECLARE_ALIGNED(16, float, saved)[1024]; ///< overlap DECLARE_ALIGNED(16, float, ret)[2048]; ///< PCM output PredictorState predictor_state[MAX_PREDICTORS]; } SingleChannelElement; /** * channel element - generic struct for SCE/CPE/CCE/LFE */ typedef struct { // CPE specific int common_window; ///< Set if channels share a common 'IndividualChannelStream' in bitstream. int ms_mode; ///< Signals mid/side stereo flags coding mode (used by encoder) uint8_t ms_mask[128]; ///< Set if mid/side stereo is used for each scalefactor window band // shared SingleChannelElement ch[2]; // CCE specific ChannelCoupling coup; SpectralBandReplication sbr; } ChannelElement; /** * main AAC context */ typedef struct { AVCodecContext *avctx; MPEG4AudioConfig m4ac; int is_saved; ///< Set if elements have stored overlap from previous frame. DynamicRangeControl che_drc; /** * @defgroup elements Channel element related data. * @{ */ enum ChannelPosition che_pos[4][MAX_ELEM_ID]; /**< channel element channel mapping with the * first index as the first 4 raw data block types */ ChannelElement *che[4][MAX_ELEM_ID]; ChannelElement *tag_che_map[4][MAX_ELEM_ID]; uint8_t tags_seen_this_frame[4][MAX_ELEM_ID]; int tags_mapped; /** @} */ /** * @defgroup temporary aligned temporary buffers (We do not want to have these on the stack.) * @{ */ DECLARE_ALIGNED(16, float, buf_mdct)[1024]; /** @} */ /** * @defgroup tables Computed / set up during initialization. * @{ */ FFTContext mdct; FFTContext mdct_small; DSPContext dsp; int random_state; /** @} */ /** * @defgroup output Members used for output interleaving. * @{ */ float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output). float add_bias; ///< offset for dsp.float_to_int16 float sf_scale; ///< Pre-scale for correct IMDCT and dsp.float_to_int16. int sf_offset; ///< offset into pow2sf_tab as appropriate for dsp.float_to_int16 /** @} */ DECLARE_ALIGNED(16, float, temp)[128]; enum OCStatus output_configured; } AACContext; #endif /* AVCODEC_AAC_H */
123linslouis-android-video-cutter
jni/libavcodec/aac.h
C
asf20
9,822
/* * Copyright (C) 2007 Vitor Sessak <vitor1001@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_ELBG_H #define AVCODEC_ELBG_H #include "libavutil/lfg.h" /** * Implementation of the Enhanced LBG Algorithm * Based on the paper "Neural Networks 14:1219-1237" that can be found in * http://citeseer.ist.psu.edu/patan01enhanced.html . * * @param points Input points. * @param dim Dimension of the points. * @param numpoints Num of points in **points. * @param codebook Pointer to the output codebook. Must be allocated. * @param numCB Number of points in the codebook. * @param num_steps The maximum number of steps. One step is already a good compromise between time and quality. * @param closest_cb Return the closest codebook to each point. Must be allocated. * @param rand_state A random number generator state. Should be already initialized by av_lfg_init(). */ void ff_do_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int num_steps, int *closest_cb, AVLFG *rand_state); /** * Initialize the **codebook vector for the elbg algorithm. If you have already * a codebook and you want to refine it, you shouldn't call this function. * If numpoints < 8*numCB this function fills **codebook with random numbers. * If not, it calls ff_do_elbg for a (smaller) random sample of the points in * **points. Get the same parameters as ff_do_elbg. */ void ff_init_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int num_steps, int *closest_cb, AVLFG *rand_state); #endif /* AVCODEC_ELBG_H */
123linslouis-android-video-cutter
jni/libavcodec/elbg.h
C
asf20
2,354
/* * DVD subtitle decoding for ffmpeg * Copyright (c) 2005 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" /* parser definition */ typedef struct DVDSubParseContext { uint8_t *packet; int packet_len; int packet_index; } DVDSubParseContext; static av_cold int dvdsub_parse_init(AVCodecParserContext *s) { return 0; } static int dvdsub_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { DVDSubParseContext *pc = s->priv_data; if (pc->packet_index == 0) { if (buf_size < 2) return 0; pc->packet_len = AV_RB16(buf); if (pc->packet_len == 0) /* HD-DVD subpicture packet */ pc->packet_len = AV_RB32(buf+2); av_freep(&pc->packet); pc->packet = av_malloc(pc->packet_len); } if (pc->packet) { if (pc->packet_index + buf_size <= pc->packet_len) { memcpy(pc->packet + pc->packet_index, buf, buf_size); pc->packet_index += buf_size; if (pc->packet_index >= pc->packet_len) { *poutbuf = pc->packet; *poutbuf_size = pc->packet_len; pc->packet_index = 0; return buf_size; } } else { /* erroneous size */ pc->packet_index = 0; } } *poutbuf = NULL; *poutbuf_size = 0; return buf_size; } static av_cold void dvdsub_parse_close(AVCodecParserContext *s) { DVDSubParseContext *pc = s->priv_data; av_freep(&pc->packet); } AVCodecParser dvdsub_parser = { { CODEC_ID_DVD_SUBTITLE }, sizeof(DVDSubParseContext), dvdsub_parse_init, dvdsub_parse, dvdsub_parse_close, };
123linslouis-android-video-cutter
jni/libavcodec/dvdsub_parser.c
C
asf20
2,595
/* * MPEG-4 / H.263 HW decode acceleration through VA API * * Copyright (C) 2008-2009 Splitted-Desktop Systems * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "vaapi_internal.h" /** Reconstruct bitstream intra_dc_vlc_thr */ static int mpeg4_get_intra_dc_vlc_thr(MpegEncContext *s) { switch (s->intra_dc_threshold) { case 99: return 0; case 13: return 1; case 15: return 2; case 17: return 3; case 19: return 4; case 21: return 5; case 23: return 6; case 0: return 7; } return 0; } static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) { MpegEncContext * const s = avctx->priv_data; struct vaapi_context * const vactx = avctx->hwaccel_context; VAPictureParameterBufferMPEG4 *pic_param; VAIQMatrixBufferMPEG4 *iq_matrix; int i; dprintf(avctx, "vaapi_mpeg4_start_frame()\n"); vactx->slice_param_size = sizeof(VASliceParameterBufferMPEG4); /* Fill in VAPictureParameterBufferMPEG4 */ pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferMPEG4)); if (!pic_param) return -1; pic_param->vop_width = s->width; pic_param->vop_height = s->height; pic_param->forward_reference_picture = VA_INVALID_ID; pic_param->backward_reference_picture = VA_INVALID_ID; pic_param->vol_fields.value = 0; /* reset all bits */ pic_param->vol_fields.bits.short_video_header = avctx->codec->id == CODEC_ID_H263; pic_param->vol_fields.bits.chroma_format = CHROMA_420; pic_param->vol_fields.bits.interlaced = !s->progressive_sequence; pic_param->vol_fields.bits.obmc_disable = 1; pic_param->vol_fields.bits.sprite_enable = s->vol_sprite_usage; pic_param->vol_fields.bits.sprite_warping_accuracy = s->sprite_warping_accuracy; pic_param->vol_fields.bits.quant_type = s->mpeg_quant; pic_param->vol_fields.bits.quarter_sample = s->quarter_sample; pic_param->vol_fields.bits.data_partitioned = s->data_partitioning; pic_param->vol_fields.bits.reversible_vlc = s->rvlc; pic_param->vol_fields.bits.resync_marker_disable = !s->resync_marker; pic_param->no_of_sprite_warping_points = s->num_sprite_warping_points; for (i = 0; i < s->num_sprite_warping_points && i < 3; i++) { pic_param->sprite_trajectory_du[i] = s->sprite_traj[i][0]; pic_param->sprite_trajectory_dv[i] = s->sprite_traj[i][1]; } pic_param->quant_precision = s->quant_precision; pic_param->vop_fields.value = 0; /* reset all bits */ pic_param->vop_fields.bits.vop_coding_type = s->pict_type - FF_I_TYPE; pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == FF_B_TYPE ? s->next_picture.pict_type - FF_I_TYPE : 0; pic_param->vop_fields.bits.vop_rounding_type = s->no_rounding; pic_param->vop_fields.bits.intra_dc_vlc_thr = mpeg4_get_intra_dc_vlc_thr(s); pic_param->vop_fields.bits.top_field_first = s->top_field_first; pic_param->vop_fields.bits.alternate_vertical_scan_flag = s->alternate_scan; pic_param->vop_fcode_forward = s->f_code; pic_param->vop_fcode_backward = s->b_code; pic_param->vop_time_increment_resolution = avctx->time_base.den; pic_param->num_macroblocks_in_gob = s->mb_width * ff_h263_get_gob_height(s); pic_param->num_gobs_in_vop = (s->mb_width * s->mb_height) / pic_param->num_macroblocks_in_gob; pic_param->TRB = s->pb_time; pic_param->TRD = s->pp_time; if (s->pict_type == FF_B_TYPE) pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture); if (s->pict_type != FF_I_TYPE) pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture); /* Fill in VAIQMatrixBufferMPEG4 */ /* Only the first inverse quantisation method uses the weighthing matrices */ if (pic_param->vol_fields.bits.quant_type) { iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferMPEG4)); if (!iq_matrix) return -1; iq_matrix->load_intra_quant_mat = 1; iq_matrix->load_non_intra_quant_mat = 1; for (i = 0; i < 64; i++) { int n = s->dsp.idct_permutation[ff_zigzag_direct[i]]; iq_matrix->intra_quant_mat[i] = s->intra_matrix[n]; iq_matrix->non_intra_quant_mat[i] = s->inter_matrix[n]; } } return 0; } static int vaapi_mpeg4_end_frame(AVCodecContext *avctx) { return ff_vaapi_common_end_frame(avctx->priv_data); } static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { MpegEncContext * const s = avctx->priv_data; VASliceParameterBufferMPEG4 *slice_param; dprintf(avctx, "vaapi_mpeg4_decode_slice(): buffer %p, size %d\n", buffer, size); /* video_plane_with_short_video_header() contains all GOBs * in-order, and this is what VA API (Intel backend) expects: only * a single slice param. So fake macroblock_number for FFmpeg so * that we don't call vaapi_mpeg4_decode_slice() again */ if (avctx->codec->id == CODEC_ID_H263) size = s->gb.buffer_end - buffer; /* Fill in VASliceParameterBufferMPEG4 */ slice_param = (VASliceParameterBufferMPEG4 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size); if (!slice_param) return -1; slice_param->macroblock_offset = get_bits_count(&s->gb) % 8; slice_param->macroblock_number = s->mb_y * s->mb_width + s->mb_x; slice_param->quant_scale = s->qscale; if (avctx->codec->id == CODEC_ID_H263) s->mb_y = s->mb_height; return 0; } #if CONFIG_MPEG4_VAAPI_HWACCEL AVHWAccel mpeg4_vaapi_hwaccel = { .name = "mpeg4_vaapi", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_MPEG4, .pix_fmt = PIX_FMT_VAAPI_VLD, .capabilities = 0, .start_frame = vaapi_mpeg4_start_frame, .end_frame = vaapi_mpeg4_end_frame, .decode_slice = vaapi_mpeg4_decode_slice, .priv_data_size = 0, }; #endif #if CONFIG_H263_VAAPI_HWACCEL AVHWAccel h263_vaapi_hwaccel = { .name = "h263_vaapi", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_H263, .pix_fmt = PIX_FMT_VAAPI_VLD, .capabilities = 0, .start_frame = vaapi_mpeg4_start_frame, .end_frame = vaapi_mpeg4_end_frame, .decode_slice = vaapi_mpeg4_decode_slice, .priv_data_size = 0, }; #endif
123linslouis-android-video-cutter
jni/libavcodec/vaapi_mpeg4.c
C
asf20
7,720
/* * Header file for hardcoded QDM2 tables * * Copyright (c) 2010 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef QDM2_TABLEGEN_H #define QDM2_TABLEGEN_H #include <stdint.h> #include <math.h> #include "../libavutil/attributes.h" #define SOFTCLIP_THRESHOLD 27600 #define HARDCLIP_THRESHOLD 35716 #if CONFIG_HARDCODED_TABLES #define softclip_table_init() #define rnd_table_init() #define init_noise_samples() #include "libavcodec/qdm2_tables.h" #else static uint16_t softclip_table[HARDCLIP_THRESHOLD - SOFTCLIP_THRESHOLD + 1]; static float noise_table[4096]; static uint8_t random_dequant_index[256][5]; static uint8_t random_dequant_type24[128][3]; static float noise_samples[128]; static av_cold void softclip_table_init(void) { int i; double dfl = SOFTCLIP_THRESHOLD - 32767; float delta = 1.0 / -dfl; for (i = 0; i < HARDCLIP_THRESHOLD - SOFTCLIP_THRESHOLD + 1; i++) softclip_table[i] = SOFTCLIP_THRESHOLD - ((int)(sin((float)i * delta) * dfl) & 0x0000FFFF); } // random generated table static av_cold void rnd_table_init(void) { int i,j; uint32_t ldw,hdw; uint64_t tmp64_1; uint64_t random_seed = 0; float delta = 1.0 / 16384.0; for(i = 0; i < 4096 ;i++) { random_seed = random_seed * 214013 + 2531011; noise_table[i] = (delta * (float)(((int32_t)random_seed >> 16) & 0x00007FFF)- 1.0) * 1.3; } for (i = 0; i < 256 ;i++) { random_seed = 81; ldw = i; for (j = 0; j < 5 ;j++) { random_dequant_index[i][j] = (uint8_t)((ldw / random_seed) & 0xFF); ldw = (uint32_t)ldw % (uint32_t)random_seed; tmp64_1 = (random_seed * 0x55555556); hdw = (uint32_t)(tmp64_1 >> 32); random_seed = (uint64_t)(hdw + (ldw >> 31)); } } for (i = 0; i < 128 ;i++) { random_seed = 25; ldw = i; for (j = 0; j < 3 ;j++) { random_dequant_type24[i][j] = (uint8_t)((ldw / random_seed) & 0xFF); ldw = (uint32_t)ldw % (uint32_t)random_seed; tmp64_1 = (random_seed * 0x66666667); hdw = (uint32_t)(tmp64_1 >> 33); random_seed = hdw + (ldw >> 31); } } } static av_cold void init_noise_samples(void) { int i; int random_seed = 0; float delta = 1.0 / 16384.0; for (i = 0; i < 128;i++) { random_seed = random_seed * 214013 + 2531011; noise_samples[i] = (delta * (float)((random_seed >> 16) & 0x00007fff) - 1.0); } } #endif /* CONFIG_HARDCODED_TABLES */ #endif /* QDM2_TABLEGEN_H */
123linslouis-android-video-cutter
jni/libavcodec/qdm2_tablegen.h
C
asf20
3,329
/* * Copyright (c) 2004 Gildas Bazin * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "dcadsp.h" static void dca_lfe_fir_c(float *out, const float *in, const float *coefs, int decifactor, float scale, float bias) { float *out2 = out + decifactor; const float *cf0 = coefs; const float *cf1 = coefs + 256; int j, k; /* One decimated sample generates 2*decifactor interpolated ones */ for (k = 0; k < decifactor; k++) { float v0 = 0.0; float v1 = 0.0; for (j = 0; j < 256 / decifactor; j++) { float s = in[-j]; v0 += s * *cf0++; v1 += s * *--cf1; } *out++ = (v0 * scale) + bias; *out2++ = (v1 * scale) + bias; } } void ff_dcadsp_init(DCADSPContext *s) { s->lfe_fir = dca_lfe_fir_c; if (ARCH_ARM) ff_dcadsp_init_arm(s); }
123linslouis-android-video-cutter
jni/libavcodec/dcadsp.c
C
asf20
1,667
/* * DCA compatible decoder - huffman tables * Copyright (C) 2004 Gildas Bazin * Copyright (C) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_DCAHUFF_H #define AVCODEC_DCAHUFF_H #include <stdint.h> #include <stdlib.h> #define TMODE_COUNT 4 static const uint8_t tmode_vlc_bits[TMODE_COUNT] = { 3, 3, 3, 2 }; static const uint16_t tmode_codes[TMODE_COUNT][4] = { { 0x0000, 0x0002, 0x0006, 0x0007 }, { 0x0002, 0x0006, 0x0007, 0x0000 }, { 0x0006, 0x0007, 0x0000, 0x0002 }, { 0x0000, 0x0001, 0x0002, 0x0003 } }; static const uint8_t tmode_bits[TMODE_COUNT][4] = { { 1, 2, 3, 3 }, { 2, 3, 3, 1 }, { 3, 3, 1, 2 }, { 2, 2, 2, 2 } }; #define BITALLOC_12_COUNT 5 #define BITALLOC_12_VLC_BITS 9 static const uint8_t bitalloc_12_vlc_bits[BITALLOC_12_COUNT] = { 9, 7, 7, 9, 9 }; static const uint16_t bitalloc_12_codes[BITALLOC_12_COUNT][12] = { { 0x0000, 0x0002, 0x0006, 0x000E, 0x001E, 0x003E, 0x00FF, 0x00FE, 0x01FB, 0x01FA, 0x01F9, 0x01F8, }, { 0x0001, 0x0000, 0x0002, 0x000F, 0x000C, 0x001D, 0x0039, 0x0038, 0x0037, 0x0036, 0x0035, 0x0034, }, { 0x0000, 0x0007, 0x0005, 0x0004, 0x0002, 0x000D, 0x000C, 0x0006, 0x000F, 0x001D, 0x0039, 0x0038, }, { 0x0003, 0x0002, 0x0000, 0x0002, 0x0006, 0x000E, 0x001E, 0x003E, 0x007E, 0x00FE, 0x01FF, 0x01FE, }, { 0x0001, 0x0000, 0x0002, 0x0006, 0x000E, 0x003F, 0x003D, 0x007C, 0x0079, 0x0078, 0x00FB, 0x00FA, } }; static const uint8_t bitalloc_12_bits[BITALLOC_12_COUNT][12] = { { 1, 2, 3, 4, 5, 6, 8, 8, 9, 9, 9, 9 }, { 1, 2, 3, 5, 5, 6, 7, 7, 7, 7, 7, 7 }, { 2, 3, 3, 3, 3, 4, 4, 4, 5, 6, 7, 7 }, { 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10 }, { 1, 2, 3, 4, 5, 7, 7, 8, 8, 8, 9, 9 } }; #define SCALES_COUNT 5 #define SCALES_VLC_BITS 9 static const uint16_t scales_codes[SCALES_COUNT][129] = { { 0x3AB0, 0x3AB2, 0x3AB4, 0x3AB6, 0x3AB8, 0x3ABA, 0x3ABC, 0x3ABE, 0x3AC0, 0x3AC2, 0x3AC4, 0x3AC6, 0x3AC8, 0x3ACA, 0x3ACC, 0x3ACE, 0x3AD0, 0x3AD2, 0x3AD4, 0x3AD6, 0x3AD8, 0x3ADA, 0x3ADC, 0x3ADE, 0x3AE0, 0x3AE2, 0x3AE4, 0x3AE6, 0x3AE8, 0x3AEA, 0x3AEC, 0x3AEE, 0x3AF0, 0x3AF2, 0x3AF4, 0x3AF6, 0x3AF8, 0x3AFA, 0x3AFC, 0x3AFE, 0x0540, 0x0542, 0x0544, 0x0546, 0x0548, 0x054A, 0x054C, 0x054E, 0x0558, 0x055E, 0x02AD, 0x0154, 0x0754, 0x03A8, 0x0056, 0x0028, 0x00E8, 0x004A, 0x000B, 0x003B, 0x0013, 0x0003, 0x000F, 0x0005, 0x0001, 0x0006, 0x0000, 0x0008, 0x001C, 0x0004, 0x0024, 0x004B, 0x00E9, 0x0029, 0x0057, 0x03A9, 0x0755, 0x0155, 0x02AE, 0x055F, 0x0559, 0x054F, 0x054D, 0x054B, 0x0549, 0x0547, 0x0545, 0x0543, 0x0541, 0x3AFF, 0x3AFD, 0x3AFB, 0x3AF9, 0x3AF7, 0x3AF5, 0x3AF3, 0x3AF1, 0x3AEF, 0x3AED, 0x3AEB, 0x3AE9, 0x3AE7, 0x3AE5, 0x3AE3, 0x3AE1, 0x3ADF, 0x3ADD, 0x3ADB, 0x3AD9, 0x3AD7, 0x3AD5, 0x3AD3, 0x3AD1, 0x3ACF, 0x3ACD, 0x3ACB, 0x3AC9, 0x3AC7, 0x3AC5, 0x3AC3, 0x3AC1, 0x3ABF, 0x3ABD, 0x3ABB, 0x3AB9, 0x3AB7, 0x3AB5, 0x3AB3, 0x3AB1, }, { 0x0F60, 0x0F62, 0x0F64, 0x0F66, 0x0F68, 0x0F6A, 0x0F6C, 0x0F6E, 0x0F70, 0x0F72, 0x0F74, 0x0F76, 0x0F78, 0x0F7A, 0x0F7C, 0x0F7E, 0x0F80, 0x0F82, 0x0F84, 0x0F86, 0x0F88, 0x0F8A, 0x0F8C, 0x0F8E, 0x0F90, 0x0F92, 0x0F94, 0x0F96, 0x0F98, 0x0F9A, 0x0F9C, 0x0F9E, 0x0FA0, 0x0FA2, 0x0FA4, 0x0FA6, 0x0FA8, 0x0FAA, 0x0FAC, 0x0FAE, 0x0FB0, 0x0FB2, 0x0FB4, 0x0FB6, 0x0FB8, 0x0FBA, 0x0FBC, 0x0FBE, 0x07A0, 0x07A2, 0x03D2, 0x01EA, 0x00FC, 0x007F, 0x001C, 0x000C, 0x0004, 0x0034, 0x0010, 0x001B, 0x0009, 0x000B, 0x000E, 0x0001, 0x0003, 0x0002, 0x000F, 0x000C, 0x000A, 0x0000, 0x0011, 0x0035, 0x0005, 0x000D, 0x001D, 0x003C, 0x00FD, 0x01EB, 0x03D3, 0x07A3, 0x07A1, 0x0FBF, 0x0FBD, 0x0FBB, 0x0FB9, 0x0FB7, 0x0FB5, 0x0FB3, 0x0FB1, 0x0FAF, 0x0FAD, 0x0FAB, 0x0FA9, 0x0FA7, 0x0FA5, 0x0FA3, 0x0FA1, 0x0F9F, 0x0F9D, 0x0F9B, 0x0F99, 0x0F97, 0x0F95, 0x0F93, 0x0F91, 0x0F8F, 0x0F8D, 0x0F8B, 0x0F89, 0x0F87, 0x0F85, 0x0F83, 0x0F81, 0x0F7F, 0x0F7D, 0x0F7B, 0x0F79, 0x0F77, 0x0F75, 0x0F73, 0x0F71, 0x0F6F, 0x0F6D, 0x0F6B, 0x0F69, 0x0F67, 0x0F65, 0x0F63, 0x0F61, }, { 0x51D0, 0x51D2, 0x51D4, 0x51D6, 0x51D8, 0x51DA, 0x51DC, 0x51DE, 0x51E0, 0x51E2, 0x51E4, 0x51E6, 0x51E8, 0x51EA, 0x51EC, 0x51EE, 0x51F0, 0x51F2, 0x51F4, 0x51F6, 0x51F8, 0x51FA, 0x51FC, 0x51FE, 0x70C0, 0x70C2, 0x70C4, 0x70C6, 0x70C8, 0x70CA, 0x70CC, 0x70CE, 0x70EC, 0x10EA, 0x3868, 0x3877, 0x0876, 0x1C35, 0x0434, 0x0A34, 0x0E1B, 0x021B, 0x051B, 0x070F, 0x010F, 0x0380, 0x0080, 0x0140, 0x01C1, 0x0041, 0x00A1, 0x00E2, 0x0022, 0x0052, 0x0072, 0x0012, 0x002A, 0x003A, 0x000A, 0x0016, 0x001E, 0x0006, 0x000C, 0x0000, 0x0004, 0x0001, 0x000D, 0x0007, 0x001F, 0x0017, 0x000B, 0x003B, 0x002B, 0x0013, 0x0073, 0x0053, 0x0023, 0x00E3, 0x00A2, 0x0042, 0x01C2, 0x0141, 0x0081, 0x0381, 0x028C, 0x010C, 0x051C, 0x021C, 0x0E1C, 0x0A35, 0x0435, 0x1C3A, 0x0877, 0x0874, 0x3869, 0x10EB, 0x70ED, 0x70CF, 0x70CD, 0x70CB, 0x70C9, 0x70C7, 0x70C5, 0x70C3, 0x70C1, 0x51FF, 0x51FD, 0x51FB, 0x51F9, 0x51F7, 0x51F5, 0x51F3, 0x51F1, 0x51EF, 0x51ED, 0x51EB, 0x51E9, 0x51E7, 0x51E5, 0x51E3, 0x51E1, 0x51DF, 0x51DD, 0x51DB, 0x51D9, 0x51D7, 0x51D5, 0x51D3, 0x51D1, }, { 0x6F64, 0x6F66, 0x6F68, 0x6F6A, 0x6F6C, 0x6F6E, 0x6F70, 0x6F72, 0x6F74, 0x6F76, 0x6F78, 0x6F7A, 0x6F7C, 0x6F7E, 0x6F80, 0x6F82, 0x6F84, 0x6F86, 0x6F88, 0x6F8A, 0x6F8C, 0x6F8E, 0x6F90, 0x6F92, 0x6F94, 0x6F96, 0x6F98, 0x6F9A, 0x6F9C, 0x6F9E, 0x6FA0, 0x6FA2, 0x6FA4, 0x6FA6, 0x6FA8, 0x6FAA, 0x6FAC, 0x6FAE, 0x6FB0, 0x6FB2, 0x6FB4, 0x6FB6, 0x17B4, 0x37DC, 0x0BDB, 0x1BEF, 0x05EE, 0x0DF8, 0x02F8, 0x06FD, 0x017D, 0x037F, 0x00BF, 0x0040, 0x00C0, 0x0021, 0x0061, 0x0011, 0x0031, 0x0009, 0x0019, 0x0006, 0x000E, 0x0004, 0x0000, 0x0005, 0x000F, 0x0007, 0x001A, 0x000A, 0x0036, 0x0016, 0x006E, 0x002E, 0x00C1, 0x0041, 0x01BC, 0x00BC, 0x037A, 0x017A, 0x02F9, 0x0DF9, 0x05EF, 0x05EC, 0x1BD8, 0x37DD, 0x17B5, 0x6FB7, 0x6FB5, 0x6FB3, 0x6FB1, 0x6FAF, 0x6FAD, 0x6FAB, 0x6FA9, 0x6FA7, 0x6FA5, 0x6FA3, 0x6FA1, 0x6F9F, 0x6F9D, 0x6F9B, 0x6F99, 0x6F97, 0x6F95, 0x6F93, 0x6F91, 0x6F8F, 0x6F8D, 0x6F8B, 0x6F89, 0x6F87, 0x6F85, 0x6F83, 0x6F81, 0x6F7F, 0x6F7D, 0x6F7B, 0x6F79, 0x6F77, 0x6F75, 0x6F73, 0x6F71, 0x6F6F, 0x6F6D, 0x6F6B, 0x6F69, 0x6F67, 0x6F65, }, { 0xDF54, 0xDF56, 0xDFC8, 0xDFCA, 0xDFCC, 0xDFCE, 0xDFD0, 0xDFD2, 0xDFD4, 0xDFD6, 0xDFD8, 0xDFDA, 0xDFDC, 0xDFDE, 0xDFE0, 0xDFE2, 0x0FE8, 0x2FEA, 0x6FA8, 0x6FF6, 0x07F5, 0x07F7, 0x37D2, 0x37F9, 0x03F8, 0x0BF8, 0x0BFB, 0x1BEB, 0x01FA, 0x05FA, 0x09FA, 0x0DFA, 0x0DFF, 0x00FF, 0x02FF, 0x06FB, 0x007C, 0x017C, 0x027C, 0x027F, 0x003C, 0x00BC, 0x013C, 0x01BC, 0x001C, 0x005C, 0x009C, 0x00DC, 0x000C, 0x002C, 0x004C, 0x006C, 0x0004, 0x0014, 0x0024, 0x0034, 0x0000, 0x0008, 0x0010, 0x0018, 0x001E, 0x0002, 0x0006, 0x000A, 0x000E, 0x000B, 0x0007, 0x0003, 0x001F, 0x0019, 0x0011, 0x0009, 0x0001, 0x0035, 0x0025, 0x0015, 0x0005, 0x006D, 0x004D, 0x002D, 0x000D, 0x00DD, 0x009D, 0x005D, 0x001D, 0x01BD, 0x013D, 0x00BD, 0x003D, 0x037C, 0x027D, 0x017D, 0x007D, 0x06FC, 0x04FC, 0x02FC, 0x00FC, 0x0DFB, 0x09FB, 0x05FB, 0x01FB, 0x1BF8, 0x1BE8, 0x0BF9, 0x03F9, 0x37FA, 0x37D3, 0x17F4, 0x07F6, 0x6FF7, 0x6FA9, 0x2FEB, 0x0FE9, 0xDFE3, 0xDFE1, 0xDFDF, 0xDFDD, 0xDFDB, 0xDFD9, 0xDFD7, 0xDFD5, 0xDFD3, 0xDFD1, 0xDFCF, 0xDFCD, 0xDFCB, 0xDFC9, 0xDF57, 0xDF55, } }; static const uint8_t scales_bits[SCALES_COUNT][129] = { { 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 11, 11, 10, 9, 8, 8, 7, 6, 6, 5, 4, 4, 3, 2, 3, 3, 4, 5, 5, 6, 7, 8, 8, 9, 10, 11, 11, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, }, { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 11, 10, 8, 7, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 4, 4, 4, 4, 5, 6, 6, 7, 8, 9, 11, 12, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, }, { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 14, 13, 13, 12, 12, 12, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, }, { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 10, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 12, 12, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, }, { 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 15, 15, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12, 12, 12, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, } }; static const uint16_t bitalloc_3_codes[3] = { 0x0003, 0x0000, 0x0002, }; static const uint8_t bitalloc_3_bits[3] = { 2, 1, 2, }; static const uint16_t bitalloc_5_codes_a[5] = { 0x000F, 0x0006, 0x0000, 0x0002, 0x000E, }; static const uint16_t bitalloc_5_codes_b[5] = { 0x0007, 0x0001, 0x0002, 0x0000, 0x0006, }; static const uint16_t bitalloc_5_codes_c[5] = { 0x0007, 0x0005, 0x0000, 0x0004, 0x0006, }; static const uint8_t bitalloc_5_bits_a[5] = { 4, 3, 1, 2, 4, }; static const uint8_t bitalloc_5_bits_b[5] = { 3, 2, 2, 2, 3, }; static const uint8_t bitalloc_5_bits_c[5] = { 3, 3, 1, 3, 3, }; static const uint16_t bitalloc_7_codes_a[7] = { 0x001E, 0x000E, 0x0005, 0x0000, 0x0006, 0x0004, 0x001F, }; static const uint16_t bitalloc_7_codes_b[7] = { 0x0014, 0x000B, 0x0000, 0x0003, 0x0001, 0x0004, 0x0015, }; static const uint16_t bitalloc_7_codes_c[7] = { 0x0000, 0x0002, 0x0001, 0x0003, 0x0002, 0x0003, 0x0001, }; static const uint8_t bitalloc_7_bits_a[7] = { 5, 4, 3, 1, 3, 3, 5, }; static const uint8_t bitalloc_7_bits_b[7] = { 5, 4, 2, 2, 2, 3, 5, }; static const uint8_t bitalloc_7_bits_c[7] = { 4, 4, 2, 2, 2, 4, 4, }; static const uint16_t bitalloc_9_codes_a[9] = { 0x0030, 0x0019, 0x0009, 0x0005, 0x0000, 0x0007, 0x000D, 0x0008, 0x0031, }; static const uint16_t bitalloc_9_codes_b[9] = { 0x0018, 0x001A, 0x0002, 0x0007, 0x0002, 0x0000, 0x0003, 0x001B, 0x0019, }; static const uint16_t bitalloc_9_codes_c[9] = { 0x001C, 0x000F, 0x0002, 0x0007, 0x0002, 0x0000, 0x0006, 0x0006, 0x001D, }; static const uint8_t bitalloc_9_bits_a[9] = { 6, 5, 4, 3, 1, 3, 4, 4, 6, }; static const uint8_t bitalloc_9_bits_b[9] = { 5, 5, 3, 3, 2, 2, 3, 5, 5, }; static const uint8_t bitalloc_9_bits_c[9] = { 6, 5, 3, 3, 2, 2, 3, 4, 6, }; static const uint16_t bitalloc_13_codes_a[13] = { 0x0070, 0x002E, 0x0039, 0x001D, 0x000C, 0x000F, 0x0000, 0x0004, 0x000D, 0x000A, 0x0016, 0x002F, 0x0071, }; static const uint16_t bitalloc_13_codes_b[13] = { 0x0038, 0x0010, 0x001D, 0x0007, 0x000F, 0x0005, 0x0000, 0x0006, 0x0002, 0x0009, 0x0006, 0x0011, 0x0039, }; static const uint16_t bitalloc_13_codes_c[13] = { 0x0004, 0x001A, 0x0003, 0x000E, 0x0000, 0x0003, 0x0005, 0x0004, 0x0002, 0x000F, 0x000C, 0x001B, 0x0005, }; static const uint8_t bitalloc_13_bits_a[13] = { 7, 6, 6, 5, 4, 4, 1, 3, 4, 4, 5, 6, 7, }; static const uint8_t bitalloc_13_bits_b[13] = { 6, 5, 5, 4, 4, 3, 2, 3, 3, 4, 4, 5, 6, }; static const uint8_t bitalloc_13_bits_c[13] = { 5, 5, 4, 4, 3, 3, 3, 3, 3, 4, 4, 5, 5, }; static const uint16_t bitalloc_17_codes_a[17] = { 0x0154, 0x00AB, 0x002B, 0x000B, 0x0003, 0x000A, 0x0001, 0x0006, 0x0001, 0x0007, 0x0004, 0x000B, 0x0000, 0x0004, 0x0014, 0x0054, 0x0155, }; static const uint16_t bitalloc_17_codes_b[17] = { 0x007C, 0x003F, 0x0019, 0x000D, 0x001C, 0x0008, 0x000F, 0x0005, 0x0000, 0x0006, 0x0002, 0x0009, 0x001D, 0x000E, 0x001E, 0x0018, 0x007D, }; static const uint16_t bitalloc_17_codes_c[17] = { 0x002C, 0x0017, 0x0005, 0x001C, 0x0003, 0x000A, 0x000F, 0x0003, 0x0006, 0x0004, 0x0000, 0x000B, 0x0004, 0x001D, 0x000A, 0x0004, 0x002D, }; static const uint16_t bitalloc_17_codes_d[17] = { 0x0100, 0x0102, 0x0082, 0x0042, 0x0022, 0x0012, 0x000A, 0x0006, 0x0000, 0x0007, 0x000B, 0x0013, 0x0023, 0x0043, 0x0083, 0x0103, 0x0101, }; static const uint16_t bitalloc_17_codes_e[17] = { 0x00E8, 0x00F6, 0x0075, 0x0034, 0x003B, 0x001B, 0x001F, 0x0004, 0x0000, 0x0005, 0x000C, 0x001C, 0x003C, 0x0035, 0x007A, 0x00F7, 0x00E9, }; static const uint16_t bitalloc_17_codes_f[17] = { 0x0004, 0x0003, 0x001E, 0x0001, 0x0001, 0x000E, 0x0001, 0x0004, 0x0006, 0x0005, 0x0002, 0x000F, 0x0006, 0x000E, 0x001F, 0x0000, 0x0005, }; static const uint16_t bitalloc_17_codes_g[17] = { 0x0060, 0x007E, 0x0031, 0x0019, 0x000D, 0x0004, 0x0000, 0x0006, 0x0002, 0x0007, 0x0001, 0x0005, 0x000E, 0x001E, 0x003E, 0x007F, 0x0061, }; static const uint8_t bitalloc_17_bits_a[17] = { 12, 11, 9, 7, 5, 4, 3, 3, 2, 3, 3, 4, 4, 6, 8, 10, 12, }; static const uint8_t bitalloc_17_bits_b[17] = { 8, 7, 6, 5, 5, 4, 4, 3, 2, 3, 3, 4, 5, 5, 6, 6, 8, }; static const uint8_t bitalloc_17_bits_c[17] = { 7, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 4, 4, 5, 5, 5, 7, }; static const uint8_t bitalloc_17_bits_d[17] = { 9, 9, 8, 7, 6, 5, 4, 3, 1, 3, 4, 5, 6, 7, 8, 9, 9, }; static const uint8_t bitalloc_17_bits_e[17] = { 8, 8, 7, 6, 6, 5, 5, 3, 1, 3, 4, 5, 6, 6, 7, 8, 8, }; static const uint8_t bitalloc_17_bits_f[17] = { 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 4, 4, 5, 6, 6, 8, }; static const uint8_t bitalloc_17_bits_g[17] = { 8, 8, 7, 6, 5, 4, 3, 3, 2, 3, 3, 4, 5, 6, 7, 8, 8, }; static const uint16_t bitalloc_25_codes_a[25] = { 0x2854, 0x142B, 0x050B, 0x0143, 0x00A2, 0x0052, 0x002E, 0x0015, 0x0004, 0x000E, 0x0000, 0x0003, 0x0006, 0x0004, 0x0001, 0x000F, 0x0005, 0x0016, 0x002F, 0x0053, 0x00A3, 0x00A0, 0x0284, 0x0A14, 0x2855, }; static const uint16_t bitalloc_25_codes_b[25] = { 0x001C, 0x000F, 0x0005, 0x0000, 0x0030, 0x0036, 0x000E, 0x0019, 0x0001, 0x0008, 0x000E, 0x0001, 0x0005, 0x0002, 0x000F, 0x0009, 0x0006, 0x001A, 0x000F, 0x0037, 0x0031, 0x0001, 0x0006, 0x0004, 0x001D, }; static const uint16_t bitalloc_25_codes_c[25] = { 0x004C, 0x0027, 0x006D, 0x0028, 0x0037, 0x000E, 0x0015, 0x0000, 0x0005, 0x0008, 0x000B, 0x000E, 0x0001, 0x000F, 0x000C, 0x0009, 0x0006, 0x0001, 0x001A, 0x000F, 0x0008, 0x0029, 0x0012, 0x006C, 0x004D, }; static const uint16_t bitalloc_25_codes_d[25] = { 0x0780, 0x0782, 0x03C2, 0x01E2, 0x00FE, 0x0079, 0x003D, 0x001C, 0x000C, 0x0004, 0x0000, 0x0006, 0x0002, 0x0007, 0x0001, 0x0005, 0x000D, 0x001D, 0x003E, 0x007E, 0x00FF, 0x01E3, 0x03C3, 0x0783, 0x0781, }; static const uint16_t bitalloc_25_codes_e[25] = { 0x003C, 0x0092, 0x0018, 0x001F, 0x004E, 0x000D, 0x0025, 0x0004, 0x0010, 0x0000, 0x000A, 0x0002, 0x0003, 0x0003, 0x000B, 0x0001, 0x0011, 0x0005, 0x0026, 0x000E, 0x004F, 0x0048, 0x0019, 0x0093, 0x003D, }; static const uint16_t bitalloc_25_codes_f[25] = { 0x0324, 0x0193, 0x00CE, 0x0065, 0x0024, 0x000C, 0x0013, 0x0004, 0x0007, 0x000A, 0x000D, 0x000F, 0x0001, 0x0000, 0x000E, 0x000B, 0x0008, 0x0005, 0x0018, 0x000D, 0x0025, 0x0066, 0x00CF, 0x00C8, 0x0325, }; static const uint16_t bitalloc_25_codes_g[25] = { 0x03A8, 0x03AE, 0x01D5, 0x0094, 0x0014, 0x004B, 0x000B, 0x003B, 0x0013, 0x0003, 0x000F, 0x0005, 0x0001, 0x0006, 0x0000, 0x0008, 0x001C, 0x0004, 0x0024, 0x0074, 0x0015, 0x0095, 0x01D6, 0x03AF, 0x03A9, }; static const uint8_t bitalloc_25_bits_a[25] = { 14, 13, 11, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 8, 10, 12, 14, }; static const uint8_t bitalloc_25_bits_b[25] = { 9, 8, 7, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 9, }; static const uint8_t bitalloc_25_bits_c[25] = { 8, 7, 7, 6, 6, 5, 5, 4, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 5, 5, 5, 6, 6, 7, 8, }; static const uint8_t bitalloc_25_bits_d[25] = { 12, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 3, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, }; static const uint8_t bitalloc_25_bits_e[25] = { 8, 8, 7, 7, 7, 6, 6, 5, 5, 4, 4, 3, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, }; static const uint8_t bitalloc_25_bits_f[25] = { 10, 9, 8, 7, 6, 5, 5, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 8, 8, 10, }; static const uint8_t bitalloc_25_bits_g[25] = { 10, 10, 9, 8, 7, 7, 6, 6, 5, 4, 4, 3, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, }; static const uint16_t bitalloc_33_codes_a[33] = { 0x1580, 0x1582, 0x0AC2, 0x0562, 0x02B2, 0x015E, 0x00AD, 0x0054, 0x001C, 0x003C, 0x000F, 0x001F, 0x0008, 0x000B, 0x000D, 0x0000, 0x0002, 0x0001, 0x000E, 0x000C, 0x0009, 0x0006, 0x0014, 0x003D, 0x001D, 0x0055, 0x00AE, 0x015F, 0x02B3, 0x0563, 0x0AC3, 0x1583, 0x1581, }; static const uint16_t bitalloc_33_codes_b[33] = { 0x030C, 0x0187, 0x006D, 0x0028, 0x0037, 0x0066, 0x0015, 0x0031, 0x0000, 0x000B, 0x0012, 0x001A, 0x0001, 0x0007, 0x000A, 0x000E, 0x0001, 0x000F, 0x000B, 0x0008, 0x0004, 0x001B, 0x0013, 0x000C, 0x0001, 0x0032, 0x001A, 0x0067, 0x0060, 0x0029, 0x00C2, 0x006C, 0x030D, }; static const uint16_t bitalloc_33_codes_c[33] = { 0x00CC, 0x0067, 0x0005, 0x0070, 0x0003, 0x001A, 0x0039, 0x003F, 0x000A, 0x0012, 0x0018, 0x001D, 0x0001, 0x0003, 0x0007, 0x000A, 0x000D, 0x000B, 0x0008, 0x0004, 0x0002, 0x001E, 0x0019, 0x0013, 0x000B, 0x0000, 0x003E, 0x001B, 0x0018, 0x0071, 0x0032, 0x0004, 0x00CD, }; static const uint16_t bitalloc_33_codes_d[33] = { 0x3AF8, 0x3AFA, 0x1D7E, 0x0EBC, 0x075C, 0x03AC, 0x01D4, 0x0094, 0x0014, 0x004B, 0x000B, 0x003B, 0x0013, 0x0003, 0x000F, 0x0005, 0x0001, 0x0006, 0x0000, 0x0008, 0x001C, 0x0004, 0x0024, 0x0074, 0x0015, 0x0095, 0x01D5, 0x03AD, 0x075D, 0x0EBD, 0x1D7F, 0x3AFB, 0x3AF9, }; static const uint16_t bitalloc_33_codes_e[33] = { 0x01C8, 0x01E6, 0x0064, 0x00E2, 0x00E5, 0x0030, 0x0033, 0x0073, 0x007A, 0x001A, 0x003A, 0x0002, 0x001A, 0x001F, 0x0007, 0x0001, 0x0002, 0x0002, 0x000C, 0x0000, 0x001B, 0x0003, 0x003B, 0x001B, 0x007B, 0x0078, 0x0070, 0x0031, 0x00F2, 0x00E3, 0x0065, 0x01E7, 0x01C9, }; static const uint16_t bitalloc_33_codes_f[33] = { 0x0724, 0x0393, 0x01CE, 0x00E5, 0x002C, 0x0008, 0x0017, 0x003E, 0x0005, 0x0014, 0x001D, 0x0000, 0x0003, 0x0006, 0x0008, 0x000B, 0x000D, 0x000C, 0x0009, 0x0007, 0x0004, 0x0001, 0x001E, 0x0015, 0x000A, 0x003F, 0x0038, 0x0009, 0x002D, 0x00E6, 0x01CF, 0x01C8, 0x0725, }; static const uint16_t bitalloc_33_codes_g[33] = { 0x0284, 0x0042, 0x0140, 0x0143, 0x003E, 0x00BE, 0x0011, 0x0051, 0x0009, 0x0029, 0x0005, 0x0015, 0x0000, 0x0008, 0x000E, 0x0002, 0x0006, 0x0003, 0x000F, 0x0009, 0x0001, 0x0016, 0x0006, 0x002E, 0x000E, 0x005E, 0x001E, 0x00BF, 0x003F, 0x0020, 0x0141, 0x0043, 0x0285, }; static const uint8_t bitalloc_33_bits_a[33] = { 13, 13, 12, 11, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 4, 4, 4, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 13, }; static const uint8_t bitalloc_33_bits_b[33] = { 10, 9, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 7, 7, 7, 8, 8, 10, }; static const uint8_t bitalloc_33_bits_c[33] = { 9, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 9, }; static const uint8_t bitalloc_33_bits_d[33] = { 14, 14, 13, 12, 11, 10, 9, 8, 7, 7, 6, 6, 5, 4, 4, 3, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14, 14, }; static const uint8_t bitalloc_33_bits_e[33] = { 9, 9, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 3, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, }; static const uint8_t bitalloc_33_bits_f[33] = { 11, 10, 9, 8, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 11, }; static const uint8_t bitalloc_33_bits_g[33] = { 10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8, 9, 9, 10, }; static const uint16_t bitalloc_65_codes_a[65] = { 0x9E5C, 0x9E5E, 0x4F2C, 0x2794, 0x13C4, 0x1E44, 0x09E3, 0x0F23, 0x04F3, 0x0792, 0x027E, 0x03CE, 0x013D, 0x01E5, 0x009C, 0x00CC, 0x0040, 0x0058, 0x0067, 0x001E, 0x0021, 0x002D, 0x003D, 0x0007, 0x0011, 0x0014, 0x0017, 0x001A, 0x001C, 0x001F, 0x0001, 0x0004, 0x0006, 0x0005, 0x0002, 0x0000, 0x001D, 0x001B, 0x0018, 0x0015, 0x0012, 0x000E, 0x0006, 0x0032, 0x0026, 0x001F, 0x0078, 0x0059, 0x0041, 0x00CD, 0x009D, 0x01E6, 0x013E, 0x03CF, 0x027F, 0x0793, 0x0790, 0x04F0, 0x09E4, 0x1E45, 0x13C5, 0x2795, 0x4F2D, 0x9E5F, 0x9E5D, }; static const uint16_t bitalloc_65_codes_b[65] = { 0x0A8C, 0x0547, 0x01B5, 0x0008, 0x00DB, 0x0152, 0x0005, 0x000B, 0x008E, 0x00AE, 0x00E4, 0x0003, 0x0037, 0x0039, 0x0055, 0x006C, 0x0073, 0x0003, 0x0015, 0x001D, 0x0028, 0x0030, 0x0037, 0x003E, 0x0006, 0x000B, 0x000F, 0x0012, 0x0016, 0x0019, 0x001D, 0x0001, 0x0004, 0x0002, 0x001E, 0x001A, 0x0017, 0x0013, 0x0010, 0x000C, 0x0007, 0x003F, 0x0038, 0x0031, 0x0029, 0x0022, 0x001A, 0x0014, 0x0000, 0x006D, 0x0056, 0x0046, 0x0038, 0x0004, 0x00E5, 0x00AF, 0x008F, 0x006C, 0x000A, 0x0153, 0x0150, 0x0009, 0x02A2, 0x01B4, 0x0A8D, }; static const uint16_t bitalloc_65_codes_c[65] = { 0x045C, 0x022F, 0x03F5, 0x01BC, 0x01FB, 0x0059, 0x00D0, 0x00DF, 0x000A, 0x002D, 0x002F, 0x0052, 0x0069, 0x0078, 0x007F, 0x000A, 0x0010, 0x001C, 0x0023, 0x002A, 0x0035, 0x003A, 0x003D, 0x0000, 0x0003, 0x0006, 0x0009, 0x000C, 0x000F, 0x0012, 0x0016, 0x0018, 0x001C, 0x0019, 0x0017, 0x0013, 0x0010, 0x000D, 0x000A, 0x0007, 0x0004, 0x0001, 0x003E, 0x003B, 0x0036, 0x002B, 0x0028, 0x001D, 0x0011, 0x000B, 0x0004, 0x0079, 0x006E, 0x0053, 0x0044, 0x002E, 0x000B, 0x00FC, 0x00D1, 0x008A, 0x0058, 0x01BD, 0x0116, 0x03F4, 0x045D, }; static const uint16_t bitalloc_65_codes_d[65] = { 0x70B0, 0x70B2, 0x70B4, 0x2852, 0x385B, 0x142E, 0x1C2E, 0x0A15, 0x0E14, 0x0214, 0x0704, 0x0104, 0x010B, 0x0383, 0x0083, 0x0143, 0x01C3, 0x0043, 0x00A2, 0x00E2, 0x0022, 0x0052, 0x0072, 0x0012, 0x002A, 0x003A, 0x000A, 0x0016, 0x001E, 0x0006, 0x000C, 0x0000, 0x0004, 0x0001, 0x000D, 0x0007, 0x001F, 0x0017, 0x000B, 0x003B, 0x002B, 0x0013, 0x0073, 0x0053, 0x0023, 0x00E3, 0x00A3, 0x00A0, 0x0040, 0x01C0, 0x0084, 0x0384, 0x0284, 0x0105, 0x0705, 0x0215, 0x0E15, 0x0A16, 0x1C2F, 0x142F, 0x1428, 0x2853, 0x70B5, 0x70B3, 0x70B1, }; static const uint16_t bitalloc_65_codes_e[65] = { 0x032C, 0x0332, 0x0378, 0x037E, 0x008C, 0x014A, 0x0188, 0x0197, 0x019E, 0x01BD, 0x0044, 0x0047, 0x00AA, 0x00C5, 0x00CD, 0x00DC, 0x001C, 0x002C, 0x0053, 0x0063, 0x0068, 0x0008, 0x000F, 0x0017, 0x002B, 0x0035, 0x0005, 0x0009, 0x0016, 0x001C, 0x0006, 0x000F, 0x0004, 0x0000, 0x0007, 0x001D, 0x0017, 0x000A, 0x0006, 0x0036, 0x0030, 0x0028, 0x0010, 0x0009, 0x0069, 0x0064, 0x0054, 0x002D, 0x001D, 0x00DD, 0x00CE, 0x00CA, 0x00AB, 0x00A4, 0x0045, 0x01BE, 0x019F, 0x0198, 0x0189, 0x014B, 0x008D, 0x037F, 0x0379, 0x0333, 0x032D, }; static const uint16_t bitalloc_65_codes_f[65] = { 0x0FE0, 0x0FE2, 0x0FE8, 0x0FEA, 0x0FEC, 0x0FEE, 0x0FF0, 0x0FF2, 0x0FF4, 0x2FF2, 0x07F2, 0x07FB, 0x03F6, 0x0BFA, 0x0BFD, 0x01FF, 0x05FF, 0x02FC, 0x007C, 0x017C, 0x003C, 0x00BC, 0x001C, 0x005C, 0x000C, 0x002C, 0x0004, 0x0014, 0x0000, 0x0008, 0x000E, 0x0002, 0x0006, 0x0003, 0x000F, 0x0009, 0x0001, 0x0015, 0x0005, 0x002D, 0x000D, 0x005D, 0x001D, 0x00BD, 0x003D, 0x017D, 0x007D, 0x02FD, 0x00FC, 0x05FC, 0x01FA, 0x0BFB, 0x03F7, 0x17F8, 0x07F3, 0x2FF3, 0x0FF5, 0x0FF3, 0x0FF1, 0x0FEF, 0x0FED, 0x0FEB, 0x0FE9, 0x0FE3, 0x0FE1, }; static const uint16_t bitalloc_65_codes_g[65] = { 0x010C, 0x038A, 0x0608, 0x0786, 0x0084, 0x0087, 0x0302, 0x0305, 0x0040, 0x00E0, 0x00E3, 0x0183, 0x001E, 0x005E, 0x009E, 0x00DE, 0x00F1, 0x0011, 0x0039, 0x0061, 0x0079, 0x0009, 0x001D, 0x0031, 0x003D, 0x0005, 0x000F, 0x0019, 0x001F, 0x0003, 0x0006, 0x000A, 0x000E, 0x000B, 0x0008, 0x0004, 0x0000, 0x001A, 0x0012, 0x000A, 0x0002, 0x0036, 0x0026, 0x0016, 0x0006, 0x006E, 0x004E, 0x002E, 0x000E, 0x00DF, 0x009F, 0x005F, 0x001F, 0x01E0, 0x0180, 0x00E1, 0x0041, 0x03C2, 0x0303, 0x01C4, 0x0085, 0x0787, 0x0609, 0x038B, 0x010D, }; static const uint8_t bitalloc_65_bits_a[65] = { 16, 16, 15, 14, 13, 13, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 13, 13, 14, 15, 16, 16, }; static const uint8_t bitalloc_65_bits_b[65] = { 12, 11, 10, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 10, 10, 12, }; static const uint8_t bitalloc_65_bits_c[65] = { 11, 10, 10, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 10, 11, }; static const uint8_t bitalloc_65_bits_d[65] = { 15, 15, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 13, 14, 15, 15, 15, }; static const uint8_t bitalloc_65_bits_e[65] = { 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, }; static const uint8_t bitalloc_65_bits_f[65] = { 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 13, 13, 12, 12, 12, 11, 11, 10, 9, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, }; static const uint8_t bitalloc_65_bits_g[65] = { 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, }; static const uint16_t bitalloc_129_codes_a[129] = { 0x0660, 0x0666, 0x06EC, 0x0722, 0x0760, 0x076E, 0x004C, 0x004E, 0x00F4, 0x010A, 0x0148, 0x0156, 0x01D4, 0x01F2, 0x0331, 0x0370, 0x0377, 0x0396, 0x03B1, 0x0024, 0x0064, 0x007B, 0x008A, 0x00A5, 0x00D4, 0x00EB, 0x00FA, 0x019A, 0x01B9, 0x01C9, 0x01D9, 0x0010, 0x0030, 0x0033, 0x0043, 0x0053, 0x006B, 0x007A, 0x00CA, 0x00D2, 0x00DE, 0x00E6, 0x00F6, 0x000E, 0x001F, 0x0023, 0x002B, 0x003B, 0x003F, 0x0067, 0x0070, 0x0077, 0x0005, 0x000D, 0x0012, 0x001B, 0x002C, 0x0035, 0x003A, 0x0004, 0x000B, 0x0017, 0x001F, 0x0009, 0x0008, 0x000A, 0x0000, 0x0018, 0x000C, 0x0005, 0x003C, 0x0036, 0x002D, 0x001C, 0x0013, 0x000E, 0x0006, 0x007A, 0x0071, 0x0068, 0x0064, 0x003C, 0x0034, 0x0028, 0x0020, 0x000F, 0x00F7, 0x00E7, 0x00DF, 0x00D3, 0x00CB, 0x007B, 0x0074, 0x0054, 0x0044, 0x003C, 0x0031, 0x0011, 0x01DA, 0x01CA, 0x01BA, 0x019B, 0x00FB, 0x00F8, 0x00D5, 0x00AA, 0x008B, 0x0084, 0x0065, 0x0025, 0x03B6, 0x0397, 0x0390, 0x0371, 0x0332, 0x01F3, 0x01D5, 0x0157, 0x0149, 0x010B, 0x00F5, 0x004F, 0x004D, 0x076F, 0x0761, 0x0723, 0x06ED, 0x0667, 0x0661, }; static const uint16_t bitalloc_129_codes_b[129] = { 0x29DC, 0x14EF, 0x0455, 0x0E9C, 0x022B, 0x0489, 0x0740, 0x074F, 0x0172, 0x0245, 0x0247, 0x030A, 0x03A1, 0x001C, 0x008B, 0x00D6, 0x010C, 0x0148, 0x014F, 0x0186, 0x01D1, 0x0008, 0x000F, 0x0046, 0x005D, 0x0078, 0x0087, 0x0096, 0x00A5, 0x00BC, 0x00D8, 0x00DE, 0x00F6, 0x0005, 0x0014, 0x0024, 0x002F, 0x003A, 0x003D, 0x0049, 0x0050, 0x0058, 0x005F, 0x0066, 0x006D, 0x0075, 0x007C, 0x0004, 0x000B, 0x0013, 0x0018, 0x001B, 0x001F, 0x0022, 0x0026, 0x002A, 0x002D, 0x0031, 0x0034, 0x0038, 0x003B, 0x003F, 0x0003, 0x0006, 0x000A, 0x0007, 0x0004, 0x0000, 0x003C, 0x0039, 0x0035, 0x0032, 0x002E, 0x002B, 0x0027, 0x0023, 0x0020, 0x001C, 0x0019, 0x0016, 0x0010, 0x0005, 0x007D, 0x007A, 0x006E, 0x0067, 0x0060, 0x0059, 0x0051, 0x004A, 0x0042, 0x003B, 0x0034, 0x0025, 0x0015, 0x0006, 0x00F7, 0x00DF, 0x00D9, 0x00BD, 0x00A6, 0x0097, 0x0090, 0x0079, 0x006A, 0x0047, 0x0044, 0x0009, 0x01D2, 0x0187, 0x0184, 0x0149, 0x010D, 0x00D7, 0x00B8, 0x001D, 0x03A6, 0x030B, 0x029C, 0x0246, 0x0173, 0x0114, 0x0741, 0x053A, 0x0488, 0x0E9D, 0x0A76, 0x0454, 0x29DD, }; static const uint16_t bitalloc_129_codes_c[129] = { 0x0E5C, 0x072F, 0x001D, 0x0724, 0x000F, 0x010D, 0x0324, 0x0393, 0x03E9, 0x0080, 0x0087, 0x00FA, 0x0164, 0x0193, 0x01DE, 0x01F5, 0x0010, 0x002A, 0x0041, 0x0064, 0x0073, 0x008E, 0x00A4, 0x00B3, 0x00D6, 0x00E5, 0x00F4, 0x00FB, 0x0002, 0x0009, 0x0013, 0x001E, 0x0026, 0x002C, 0x0033, 0x003F, 0x0041, 0x004C, 0x0053, 0x005E, 0x0065, 0x0070, 0x0073, 0x0078, 0x007B, 0x007E, 0x0002, 0x0005, 0x0007, 0x000B, 0x000D, 0x0011, 0x0014, 0x0017, 0x001A, 0x001D, 0x0021, 0x0024, 0x0027, 0x002A, 0x002D, 0x0030, 0x0033, 0x0036, 0x003A, 0x0037, 0x0034, 0x0031, 0x002E, 0x002B, 0x0028, 0x0025, 0x0022, 0x001E, 0x001B, 0x0018, 0x0015, 0x0012, 0x000E, 0x000C, 0x0008, 0x0006, 0x0003, 0x007F, 0x007C, 0x0079, 0x0076, 0x0071, 0x006A, 0x005F, 0x0058, 0x004D, 0x0046, 0x0040, 0x0038, 0x002D, 0x0027, 0x001F, 0x0014, 0x0012, 0x0003, 0x0000, 0x00F5, 0x00EE, 0x00D7, 0x00C8, 0x00A5, 0x008F, 0x007C, 0x0065, 0x0042, 0x002B, 0x0011, 0x0002, 0x01DF, 0x01C8, 0x0165, 0x00FB, 0x00E4, 0x0081, 0x0006, 0x03E8, 0x0325, 0x01CA, 0x010C, 0x0725, 0x0396, 0x001C, 0x0E5D, }; static const uint16_t bitalloc_129_codes_d[129] = { 0xA598, 0xA59A, 0xA59C, 0xA59E, 0xC598, 0xE586, 0x3ACC, 0x52CA, 0x62CD, 0x0D48, 0x1D67, 0x2978, 0x3167, 0x3966, 0x06A5, 0x0EBC, 0x14BD, 0x1CB1, 0x0350, 0x0353, 0x075F, 0x0A5F, 0x0C5E, 0x0E5E, 0x01AE, 0x03AD, 0x052D, 0x062D, 0x072D, 0x00D5, 0x01D4, 0x0294, 0x0314, 0x0394, 0x0014, 0x0094, 0x0114, 0x0174, 0x01B4, 0x01F4, 0x000B, 0x004B, 0x008B, 0x00BB, 0x00DB, 0x00FB, 0x001B, 0x003B, 0x0053, 0x0063, 0x0073, 0x0003, 0x0013, 0x0023, 0x002F, 0x0037, 0x003F, 0x0007, 0x000F, 0x0015, 0x0019, 0x001D, 0x0001, 0x0005, 0x0009, 0x0006, 0x0002, 0x001E, 0x001A, 0x0016, 0x0010, 0x0008, 0x0000, 0x0038, 0x0030, 0x0028, 0x001C, 0x000C, 0x007C, 0x006C, 0x005C, 0x0044, 0x0024, 0x0004, 0x00E4, 0x00C4, 0x00A4, 0x0074, 0x0034, 0x01F5, 0x01B5, 0x0175, 0x0115, 0x0095, 0x0015, 0x0395, 0x0315, 0x0295, 0x01D5, 0x00D6, 0x072E, 0x062E, 0x052E, 0x03AE, 0x01AF, 0x0E5F, 0x0C5F, 0x0C58, 0x0A58, 0x0758, 0x0351, 0x1CB2, 0x18B2, 0x0EBD, 0x0EB2, 0x3967, 0x3960, 0x2979, 0x2964, 0x0D49, 0x72C2, 0x52CB, 0x3ACD, 0xE587, 0xC599, 0xA59F, 0xA59D, 0xA59B, 0xA599, }; static const uint16_t bitalloc_129_codes_e[129] = { 0xA13C, 0xC720, 0xA13F, 0xA13E, 0xA13D, 0xE722, 0x5090, 0x6393, 0x7392, 0x2849, 0x31CE, 0x39CE, 0x1425, 0x18E5, 0x1CE5, 0x0844, 0x0A1C, 0x0C7C, 0x036C, 0x0423, 0x050F, 0x063F, 0x01B7, 0x0216, 0x0285, 0x031D, 0x039D, 0x0109, 0x0140, 0x0180, 0x01C8, 0x01CF, 0x007A, 0x008A, 0x00A2, 0x00C1, 0x00E5, 0x0014, 0x0037, 0x0043, 0x004E, 0x0056, 0x0061, 0x006C, 0x007C, 0x000B, 0x001C, 0x001F, 0x0023, 0x0025, 0x0029, 0x002C, 0x002E, 0x0032, 0x0034, 0x0037, 0x003A, 0x003C, 0x003F, 0x0001, 0x0003, 0x0006, 0x0008, 0x000A, 0x000C, 0x000B, 0x0009, 0x0007, 0x0004, 0x0002, 0x0000, 0x003D, 0x003B, 0x0038, 0x0035, 0x0033, 0x002F, 0x002D, 0x002A, 0x0026, 0x0024, 0x0020, 0x001D, 0x001A, 0x007D, 0x006D, 0x0062, 0x0057, 0x004F, 0x0044, 0x003C, 0x0015, 0x00E6, 0x00C6, 0x00A3, 0x008B, 0x007B, 0x006C, 0x01C9, 0x0181, 0x0141, 0x010A, 0x00DA, 0x031E, 0x0286, 0x0217, 0x0210, 0x0738, 0x0638, 0x0508, 0x036D, 0x0C7D, 0x0A1D, 0x0845, 0x1CE6, 0x18E6, 0x1426, 0x39CF, 0x31CF, 0x284E, 0x7393, 0x7390, 0x5091, 0xE723, 0xC724, 0xC725, 0xC722, 0xC723, 0xC721, }; static const uint16_t bitalloc_129_codes_f[129] = { 0x762C, 0x3B17, 0x1555, 0x0608, 0x0AAB, 0x0FF2, 0x0305, 0x0307, 0x0763, 0x0046, 0x010C, 0x01BC, 0x02AB, 0x03B6, 0x03FD, 0x0080, 0x0087, 0x00DF, 0x0156, 0x01D9, 0x01F8, 0x01FF, 0x002A, 0x0041, 0x0061, 0x0094, 0x00D4, 0x00EA, 0x00F2, 0x00FD, 0x0009, 0x000B, 0x001A, 0x0026, 0x0031, 0x0040, 0x004B, 0x006B, 0x0073, 0x0077, 0x007A, 0x007C, 0x0000, 0x0002, 0x0006, 0x0008, 0x000B, 0x000E, 0x0011, 0x0014, 0x0016, 0x0019, 0x001C, 0x001E, 0x0021, 0x0023, 0x0026, 0x0028, 0x002B, 0x002D, 0x002F, 0x0031, 0x0033, 0x0036, 0x0038, 0x0037, 0x0034, 0x0032, 0x0030, 0x002E, 0x002C, 0x0029, 0x0027, 0x0024, 0x0022, 0x001F, 0x001D, 0x001A, 0x0017, 0x0015, 0x0012, 0x000F, 0x000C, 0x0009, 0x0007, 0x0003, 0x0001, 0x007D, 0x007B, 0x0078, 0x0074, 0x0072, 0x0054, 0x0041, 0x0036, 0x0027, 0x001B, 0x0014, 0x000A, 0x00FE, 0x00F3, 0x00EB, 0x00D5, 0x0095, 0x006E, 0x0042, 0x002B, 0x0010, 0x01F9, 0x01DA, 0x0157, 0x0154, 0x00C0, 0x0081, 0x0022, 0x03B7, 0x03B0, 0x01BD, 0x010D, 0x0047, 0x07F8, 0x0554, 0x0306, 0x0FF3, 0x0EC4, 0x0609, 0x1D8A, 0x1554, 0x762D, }; static const uint16_t bitalloc_129_codes_g[129] = { 0x1E20, 0x1E5E, 0x031C, 0x051A, 0x0718, 0x0916, 0x0B14, 0x0D12, 0x0F11, 0x0090, 0x018F, 0x028E, 0x038D, 0x048C, 0x058B, 0x068A, 0x0789, 0x0049, 0x00C8, 0x0148, 0x01C7, 0x0247, 0x02C6, 0x0346, 0x03C5, 0x0025, 0x0065, 0x00A5, 0x00E4, 0x0124, 0x0164, 0x01A4, 0x01E3, 0x0013, 0x0033, 0x0053, 0x0073, 0x0093, 0x00B3, 0x00D3, 0x00F3, 0x000A, 0x001A, 0x002A, 0x003A, 0x004A, 0x005A, 0x006A, 0x007A, 0x0006, 0x000E, 0x0016, 0x001E, 0x0026, 0x002E, 0x0036, 0x003E, 0x0004, 0x0008, 0x000C, 0x0010, 0x0014, 0x0018, 0x001C, 0x0000, 0x001D, 0x0019, 0x0015, 0x0011, 0x000D, 0x0009, 0x0005, 0x003F, 0x0037, 0x002F, 0x0027, 0x001F, 0x0017, 0x000F, 0x0007, 0x007B, 0x006B, 0x005B, 0x004B, 0x003B, 0x002B, 0x001B, 0x000B, 0x0008, 0x00F0, 0x00D0, 0x00B0, 0x0090, 0x0070, 0x0050, 0x0030, 0x01E4, 0x01A5, 0x0165, 0x0125, 0x00E5, 0x00E2, 0x00A2, 0x0062, 0x03CA, 0x0347, 0x02C7, 0x02C4, 0x0244, 0x0149, 0x00C9, 0x00C6, 0x0796, 0x068B, 0x0688, 0x048D, 0x048A, 0x028F, 0x028C, 0x0091, 0x0F2E, 0x0D13, 0x0B15, 0x0917, 0x0719, 0x051B, 0x031D, 0x1E5F, 0x1E21, }; static const uint8_t bitalloc_129_bits_a[129] = { 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, }; static const uint8_t bitalloc_129_bits_b[129] = { 14, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 14, }; static const uint8_t bitalloc_129_bits_c[129] = { 13, 12, 11, 11, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 13, }; static const uint8_t bitalloc_129_bits_d[129] = { 16, 16, 16, 16, 16, 16, 15, 15, 15, 14, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 16, 16, 16, 16, 16, 16, }; static const uint8_t bitalloc_129_bits_e[129] = { 16, 16, 16, 16, 16, 16, 15, 15, 15, 14, 14, 14, 13, 13, 13, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16, 16, 16, 16, 16, }; static const uint8_t bitalloc_129_bits_f[129] = { 15, 14, 13, 12, 12, 12, 11, 11, 11, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 15, }; static const uint8_t bitalloc_129_bits_g[129] = { 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 13, 13, }; static const uint8_t bitalloc_sizes[10] = { 3, 5, 7, 9, 13, 17, 25, 33, 65, 129 }; static const int8_t bitalloc_offsets[10] = { -1, -2, -3, -4, -6, -8, -12, -16, -32, -64 }; static const uint8_t bitalloc_maxbits[10][7] = { { 2 }, { 4, 3, 3 }, { 5, 5, 4 }, { 6, 5, 6 }, { 7, 6, 5 }, { 9, 8, 7, 9, 8, 8, 8 }, { 9, 9, 8, 9, 8, 9, 9 }, { 9, 9, 9, 9, 9, 9, 9 }, { 9, 9, 9, 9, 9, 9, 9 }, { 9, 9, 9, 9, 9, 9, 9 } }; static const uint16_t* const bitalloc_codes[10][8] = { { bitalloc_3_codes, NULL }, { bitalloc_5_codes_a, bitalloc_5_codes_b, bitalloc_5_codes_c, NULL }, { bitalloc_7_codes_a, bitalloc_7_codes_b, bitalloc_7_codes_c, NULL }, { bitalloc_9_codes_a, bitalloc_9_codes_b, bitalloc_9_codes_c, NULL }, { bitalloc_13_codes_a, bitalloc_13_codes_b, bitalloc_13_codes_c, NULL }, { bitalloc_17_codes_a, bitalloc_17_codes_b, bitalloc_17_codes_c, bitalloc_17_codes_d, bitalloc_17_codes_e, bitalloc_17_codes_f, bitalloc_17_codes_g, NULL }, { bitalloc_25_codes_a, bitalloc_25_codes_b, bitalloc_25_codes_c, bitalloc_25_codes_d, bitalloc_25_codes_e, bitalloc_25_codes_f, bitalloc_25_codes_g, NULL }, { bitalloc_33_codes_a, bitalloc_33_codes_b, bitalloc_33_codes_c, bitalloc_33_codes_d, bitalloc_33_codes_e, bitalloc_33_codes_f, bitalloc_33_codes_g, NULL }, { bitalloc_65_codes_a, bitalloc_65_codes_b, bitalloc_65_codes_c, bitalloc_65_codes_d, bitalloc_65_codes_e, bitalloc_65_codes_f, bitalloc_65_codes_g, NULL }, { bitalloc_129_codes_a, bitalloc_129_codes_b, bitalloc_129_codes_c, bitalloc_129_codes_d, bitalloc_129_codes_e, bitalloc_129_codes_f, bitalloc_129_codes_g, NULL } }; static const uint8_t* const bitalloc_bits[10][8] = { { bitalloc_3_bits, NULL }, { bitalloc_5_bits_a, bitalloc_5_bits_b, bitalloc_5_bits_c, NULL }, { bitalloc_7_bits_a, bitalloc_7_bits_b, bitalloc_7_bits_c, NULL }, { bitalloc_9_bits_a, bitalloc_9_bits_b, bitalloc_9_bits_c, NULL }, { bitalloc_13_bits_a, bitalloc_13_bits_b, bitalloc_13_bits_c, NULL }, { bitalloc_17_bits_a, bitalloc_17_bits_b, bitalloc_17_bits_c, bitalloc_17_bits_d, bitalloc_17_bits_e, bitalloc_17_bits_f, bitalloc_17_bits_g, NULL }, { bitalloc_25_bits_a, bitalloc_25_bits_b, bitalloc_25_bits_c, bitalloc_25_bits_d, bitalloc_25_bits_e, bitalloc_25_bits_f, bitalloc_25_bits_g, NULL }, { bitalloc_33_bits_a, bitalloc_33_bits_b, bitalloc_33_bits_c, bitalloc_33_bits_d, bitalloc_33_bits_e, bitalloc_33_bits_f, bitalloc_33_bits_g, NULL }, { bitalloc_65_bits_a, bitalloc_65_bits_b, bitalloc_65_bits_c, bitalloc_65_bits_d, bitalloc_65_bits_e, bitalloc_65_bits_f, bitalloc_65_bits_g, NULL }, { bitalloc_129_bits_a, bitalloc_129_bits_b, bitalloc_129_bits_c, bitalloc_129_bits_d, bitalloc_129_bits_e, bitalloc_129_bits_f, bitalloc_129_bits_g, NULL } }; #endif /* AVCODEC_DCAHUFF_H */
123linslouis-android-video-cutter
jni/libavcodec/dcahuff.h
C
asf20
44,691
/* * JPEG-LS common code * Copyright (c) 2003 Michael Niedermayer * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * JPEG-LS common code. */ #include "jpegls.h" void ff_jpegls_init_state(JLSState *state){ int i; state->twonear = state->near * 2 + 1; state->range = ((state->maxval + state->twonear - 1) / state->twonear) + 1; // QBPP = ceil(log2(RANGE)) for(state->qbpp = 0; (1 << state->qbpp) < state->range; state->qbpp++); if(state->bpp < 8) state->limit = 16 + 2 * state->bpp - state->qbpp; else state->limit = (4 * state->bpp) - state->qbpp; for(i = 0; i < 367; i++) { state->A[i] = FFMAX((state->range + 32) >> 6, 2); state->N[i] = 1; } } /** * Custom value clipping function used in T1, T2, T3 calculation */ static inline int iso_clip(int v, int vmin, int vmax){ if(v > vmax || v < vmin) return vmin; else return v; } void ff_jpegls_reset_coding_parameters(JLSState *s, int reset_all){ const int basic_t1= 3; const int basic_t2= 7; const int basic_t3= 21; int factor; if(s->maxval==0 || reset_all) s->maxval= (1 << s->bpp) - 1; if(s->maxval >=128){ factor= (FFMIN(s->maxval, 4095) + 128)>>8; if(s->T1==0 || reset_all) s->T1= iso_clip(factor*(basic_t1-2) + 2 + 3*s->near, s->near+1, s->maxval); if(s->T2==0 || reset_all) s->T2= iso_clip(factor*(basic_t2-3) + 3 + 5*s->near, s->T1, s->maxval); if(s->T3==0 || reset_all) s->T3= iso_clip(factor*(basic_t3-4) + 4 + 7*s->near, s->T2, s->maxval); }else{ factor= 256 / (s->maxval + 1); if(s->T1==0 || reset_all) s->T1= iso_clip(FFMAX(2, basic_t1/factor + 3*s->near), s->near+1, s->maxval); if(s->T2==0 || reset_all) s->T2= iso_clip(FFMAX(3, basic_t2/factor + 5*s->near), s->T1, s->maxval); if(s->T3==0 || reset_all) s->T3= iso_clip(FFMAX(4, basic_t3/factor + 7*s->near), s->T2, s->maxval); } if(s->reset==0 || reset_all) s->reset= 64; // av_log(NULL, AV_LOG_DEBUG, "[JPEG-LS RESET] T=%i,%i,%i\n", s->T1, s->T2, s->T3); }
123linslouis-android-video-cutter
jni/libavcodec/jpegls.c
C
asf20
2,957
/* * (c) 2001 Fabrice Bellard * 2007 Marc Hoffman <marc.hoffman@analog.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * DCT test (c) 2001 Fabrice Bellard * Started from sample code by Juan J. Sierralta P. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <sys/time.h> #include <unistd.h> #include <math.h> #include "libavutil/common.h" #include "libavutil/lfg.h" #include "simple_idct.h" #include "aandcttab.h" #include "faandct.h" #include "faanidct.h" #include "x86/idct_xvid.h" #include "dctref.h" #undef printf void ff_mmx_idct(DCTELEM *data); void ff_mmxext_idct(DCTELEM *data); void odivx_idct_c(short *block); // BFIN void ff_bfin_idct(DCTELEM *block); void ff_bfin_fdct(DCTELEM *block); // ALTIVEC void fdct_altivec(DCTELEM *block); //void idct_altivec(DCTELEM *block);?? no routine // ARM void ff_j_rev_dct_arm(DCTELEM *data); void ff_simple_idct_arm(DCTELEM *data); void ff_simple_idct_armv5te(DCTELEM *data); void ff_simple_idct_armv6(DCTELEM *data); void ff_simple_idct_neon(DCTELEM *data); void ff_simple_idct_axp(DCTELEM *data); struct algo { const char *name; enum { FDCT, IDCT } is_idct; void (* func) (DCTELEM *block); void (* ref) (DCTELEM *block); enum formattag { NO_PERM,MMX_PERM, MMX_SIMPLE_PERM, SCALE_PERM, SSE2_PERM, PARTTRANS_PERM } format; int mm_support; }; #ifndef FAAN_POSTSCALE #define FAAN_SCALE SCALE_PERM #else #define FAAN_SCALE NO_PERM #endif static int cpu_flags; struct algo algos[] = { {"REF-DBL", 0, ff_ref_fdct, ff_ref_fdct, NO_PERM}, {"FAAN", 0, ff_faandct, ff_ref_fdct, FAAN_SCALE}, {"FAANI", 1, ff_faanidct, ff_ref_idct, NO_PERM}, {"IJG-AAN-INT", 0, fdct_ifast, ff_ref_fdct, SCALE_PERM}, {"IJG-LLM-INT", 0, ff_jpeg_fdct_islow, ff_ref_fdct, NO_PERM}, {"REF-DBL", 1, ff_ref_idct, ff_ref_idct, NO_PERM}, {"INT", 1, j_rev_dct, ff_ref_idct, MMX_PERM}, {"SIMPLE-C", 1, ff_simple_idct, ff_ref_idct, NO_PERM}, #if HAVE_MMX {"MMX", 0, ff_fdct_mmx, ff_ref_fdct, NO_PERM, FF_MM_MMX}, #if HAVE_MMX2 {"MMX2", 0, ff_fdct_mmx2, ff_ref_fdct, NO_PERM, FF_MM_MMX2}, {"SSE2", 0, ff_fdct_sse2, ff_ref_fdct, NO_PERM, FF_MM_SSE2}, #endif #if CONFIG_GPL {"LIBMPEG2-MMX", 1, ff_mmx_idct, ff_ref_idct, MMX_PERM, FF_MM_MMX}, {"LIBMPEG2-MMX2", 1, ff_mmxext_idct, ff_ref_idct, MMX_PERM, FF_MM_MMX2}, #endif {"SIMPLE-MMX", 1, ff_simple_idct_mmx, ff_ref_idct, MMX_SIMPLE_PERM, FF_MM_MMX}, {"XVID-MMX", 1, ff_idct_xvid_mmx, ff_ref_idct, NO_PERM, FF_MM_MMX}, {"XVID-MMX2", 1, ff_idct_xvid_mmx2, ff_ref_idct, NO_PERM, FF_MM_MMX2}, {"XVID-SSE2", 1, ff_idct_xvid_sse2, ff_ref_idct, SSE2_PERM, FF_MM_SSE2}, #endif #if HAVE_ALTIVEC {"altivecfdct", 0, fdct_altivec, ff_ref_fdct, NO_PERM, FF_MM_ALTIVEC}, #endif #if ARCH_BFIN {"BFINfdct", 0, ff_bfin_fdct, ff_ref_fdct, NO_PERM}, {"BFINidct", 1, ff_bfin_idct, ff_ref_idct, NO_PERM}, #endif #if ARCH_ARM {"SIMPLE-ARM", 1, ff_simple_idct_arm, ff_ref_idct, NO_PERM }, {"INT-ARM", 1, ff_j_rev_dct_arm, ff_ref_idct, MMX_PERM }, #if HAVE_ARMV5TE {"SIMPLE-ARMV5TE", 1, ff_simple_idct_armv5te, ff_ref_idct, NO_PERM }, #endif #if HAVE_ARMV6 {"SIMPLE-ARMV6", 1, ff_simple_idct_armv6, ff_ref_idct, MMX_PERM }, #endif #if HAVE_NEON {"SIMPLE-NEON", 1, ff_simple_idct_neon, ff_ref_idct, PARTTRANS_PERM }, #endif #endif /* ARCH_ARM */ #if ARCH_ALPHA {"SIMPLE-ALPHA", 1, ff_simple_idct_axp, ff_ref_idct, NO_PERM }, #endif { 0 } }; #define AANSCALE_BITS 12 uint8_t cropTbl[256 + 2 * MAX_NEG_CROP]; static int64_t gettime(void) { struct timeval tv; gettimeofday(&tv,NULL); return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec; } #define NB_ITS 20000 #define NB_ITS_SPEED 50000 static short idct_mmx_perm[64]; static short idct_simple_mmx_perm[64]={ 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D, 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D, 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D, 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F, 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F, 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D, 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F, 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F, }; static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7}; static void idct_mmx_init(void) { int i; /* the mmx/mmxext idct uses a reordered input, so we patch scan tables */ for (i = 0; i < 64; i++) { idct_mmx_perm[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2); // idct_simple_mmx_perm[i] = simple_block_permute_op(i); } } DECLARE_ALIGNED(16, static DCTELEM, block)[64]; DECLARE_ALIGNED(8, static DCTELEM, block1)[64]; DECLARE_ALIGNED(8, static DCTELEM, block_org)[64]; static inline void mmx_emms(void) { #if HAVE_MMX if (cpu_flags & FF_MM_MMX) __asm__ volatile ("emms\n\t"); #endif } static void dct_error(const char *name, int is_idct, void (*fdct_func)(DCTELEM *block), void (*fdct_ref)(DCTELEM *block), int form, int test) { int it, i, scale; int err_inf, v; int64_t err2, ti, ti1, it1; int64_t sysErr[64], sysErrMax=0; int maxout=0; int blockSumErrMax=0, blockSumErr; AVLFG prng; av_lfg_init(&prng, 1); err_inf = 0; err2 = 0; for(i=0; i<64; i++) sysErr[i]=0; for(it=0;it<NB_ITS;it++) { for(i=0;i<64;i++) block1[i] = 0; switch(test){ case 0: for(i=0;i<64;i++) block1[i] = (av_lfg_get(&prng) % 512) -256; if (is_idct){ ff_ref_fdct(block1); for(i=0;i<64;i++) block1[i]>>=3; } break; case 1:{ int num = av_lfg_get(&prng) % 10 + 1; for(i=0;i<num;i++) block1[av_lfg_get(&prng) % 64] = av_lfg_get(&prng) % 512 -256; }break; case 2: block1[0] = av_lfg_get(&prng) % 4096 - 2048; block1[63]= (block1[0]&1)^1; break; } #if 0 // simulate mismatch control { int sum=0; for(i=0;i<64;i++) sum+=block1[i]; if((sum&1)==0) block1[63]^=1; } #endif for(i=0; i<64; i++) block_org[i]= block1[i]; if (form == MMX_PERM) { for(i=0;i<64;i++) block[idct_mmx_perm[i]] = block1[i]; } else if (form == MMX_SIMPLE_PERM) { for(i=0;i<64;i++) block[idct_simple_mmx_perm[i]] = block1[i]; } else if (form == SSE2_PERM) { for(i=0; i<64; i++) block[(i&0x38) | idct_sse2_row_perm[i&7]] = block1[i]; } else if (form == PARTTRANS_PERM) { for(i=0; i<64; i++) block[(i&0x24) | ((i&3)<<3) | ((i>>3)&3)] = block1[i]; } else { for(i=0; i<64; i++) block[i]= block1[i]; } #if 0 // simulate mismatch control for tested IDCT but not the ref { int sum=0; for(i=0;i<64;i++) sum+=block[i]; if((sum&1)==0) block[63]^=1; } #endif fdct_func(block); mmx_emms(); if (form == SCALE_PERM) { for(i=0; i<64; i++) { scale = 8*(1 << (AANSCALE_BITS + 11)) / ff_aanscales[i]; block[i] = (block[i] * scale /*+ (1<<(AANSCALE_BITS-1))*/) >> AANSCALE_BITS; } } fdct_ref(block1); blockSumErr=0; for(i=0;i<64;i++) { v = abs(block[i] - block1[i]); if (v > err_inf) err_inf = v; err2 += v * v; sysErr[i] += block[i] - block1[i]; blockSumErr += v; if( abs(block[i])>maxout) maxout=abs(block[i]); } if(blockSumErrMax < blockSumErr) blockSumErrMax= blockSumErr; #if 0 // print different matrix pairs if(blockSumErr){ printf("\n"); for(i=0; i<64; i++){ if((i&7)==0) printf("\n"); printf("%4d ", block_org[i]); } for(i=0; i<64; i++){ if((i&7)==0) printf("\n"); printf("%4d ", block[i] - block1[i]); } } #endif } for(i=0; i<64; i++) sysErrMax= FFMAX(sysErrMax, FFABS(sysErr[i])); #if 1 // dump systematic errors for(i=0; i<64; i++){ if(i%8==0) printf("\n"); printf("%7d ", (int)sysErr[i]); } printf("\n"); #endif printf("%s %s: err_inf=%d err2=%0.8f syserr=%0.8f maxout=%d blockSumErr=%d\n", is_idct ? "IDCT" : "DCT", name, err_inf, (double)err2 / NB_ITS / 64.0, (double)sysErrMax / NB_ITS, maxout, blockSumErrMax); #if 1 //Speed test /* speed test */ for(i=0;i<64;i++) block1[i] = 0; switch(test){ case 0: for(i=0;i<64;i++) block1[i] = av_lfg_get(&prng) % 512 -256; if (is_idct){ ff_ref_fdct(block1); for(i=0;i<64;i++) block1[i]>>=3; } break; case 1:{ case 2: block1[0] = av_lfg_get(&prng) % 512 -256; block1[1] = av_lfg_get(&prng) % 512 -256; block1[2] = av_lfg_get(&prng) % 512 -256; block1[3] = av_lfg_get(&prng) % 512 -256; }break; } if (form == MMX_PERM) { for(i=0;i<64;i++) block[idct_mmx_perm[i]] = block1[i]; } else if(form == MMX_SIMPLE_PERM) { for(i=0;i<64;i++) block[idct_simple_mmx_perm[i]] = block1[i]; } else { for(i=0; i<64; i++) block[i]= block1[i]; } ti = gettime(); it1 = 0; do { for(it=0;it<NB_ITS_SPEED;it++) { for(i=0; i<64; i++) block[i]= block1[i]; // memcpy(block, block1, sizeof(DCTELEM) * 64); // do not memcpy especially not fastmemcpy because it does movntq !!! fdct_func(block); } it1 += NB_ITS_SPEED; ti1 = gettime() - ti; } while (ti1 < 1000000); mmx_emms(); printf("%s %s: %0.1f kdct/s\n", is_idct ? "IDCT" : "DCT", name, (double)it1 * 1000.0 / (double)ti1); #endif } DECLARE_ALIGNED(8, static uint8_t, img_dest)[64]; DECLARE_ALIGNED(8, static uint8_t, img_dest1)[64]; static void idct248_ref(uint8_t *dest, int linesize, int16_t *block) { static int init; static double c8[8][8]; static double c4[4][4]; double block1[64], block2[64], block3[64]; double s, sum, v; int i, j, k; if (!init) { init = 1; for(i=0;i<8;i++) { sum = 0; for(j=0;j<8;j++) { s = (i==0) ? sqrt(1.0/8.0) : sqrt(1.0/4.0); c8[i][j] = s * cos(M_PI * i * (j + 0.5) / 8.0); sum += c8[i][j] * c8[i][j]; } } for(i=0;i<4;i++) { sum = 0; for(j=0;j<4;j++) { s = (i==0) ? sqrt(1.0/4.0) : sqrt(1.0/2.0); c4[i][j] = s * cos(M_PI * i * (j + 0.5) / 4.0); sum += c4[i][j] * c4[i][j]; } } } /* butterfly */ s = 0.5 * sqrt(2.0); for(i=0;i<4;i++) { for(j=0;j<8;j++) { block1[8*(2*i)+j] = (block[8*(2*i)+j] + block[8*(2*i+1)+j]) * s; block1[8*(2*i+1)+j] = (block[8*(2*i)+j] - block[8*(2*i+1)+j]) * s; } } /* idct8 on lines */ for(i=0;i<8;i++) { for(j=0;j<8;j++) { sum = 0; for(k=0;k<8;k++) sum += c8[k][j] * block1[8*i+k]; block2[8*i+j] = sum; } } /* idct4 */ for(i=0;i<8;i++) { for(j=0;j<4;j++) { /* top */ sum = 0; for(k=0;k<4;k++) sum += c4[k][j] * block2[8*(2*k)+i]; block3[8*(2*j)+i] = sum; /* bottom */ sum = 0; for(k=0;k<4;k++) sum += c4[k][j] * block2[8*(2*k+1)+i]; block3[8*(2*j+1)+i] = sum; } } /* clamp and store the result */ for(i=0;i<8;i++) { for(j=0;j<8;j++) { v = block3[8*i+j]; if (v < 0) v = 0; else if (v > 255) v = 255; dest[i * linesize + j] = (int)rint(v); } } } static void idct248_error(const char *name, void (*idct248_put)(uint8_t *dest, int line_size, int16_t *block)) { int it, i, it1, ti, ti1, err_max, v; AVLFG prng; av_lfg_init(&prng, 1); /* just one test to see if code is correct (precision is less important here) */ err_max = 0; for(it=0;it<NB_ITS;it++) { /* XXX: use forward transform to generate values */ for(i=0;i<64;i++) block1[i] = av_lfg_get(&prng) % 256 - 128; block1[0] += 1024; for(i=0; i<64; i++) block[i]= block1[i]; idct248_ref(img_dest1, 8, block); for(i=0; i<64; i++) block[i]= block1[i]; idct248_put(img_dest, 8, block); for(i=0;i<64;i++) { v = abs((int)img_dest[i] - (int)img_dest1[i]); if (v == 255) printf("%d %d\n", img_dest[i], img_dest1[i]); if (v > err_max) err_max = v; } #if 0 printf("ref=\n"); for(i=0;i<8;i++) { int j; for(j=0;j<8;j++) { printf(" %3d", img_dest1[i*8+j]); } printf("\n"); } printf("out=\n"); for(i=0;i<8;i++) { int j; for(j=0;j<8;j++) { printf(" %3d", img_dest[i*8+j]); } printf("\n"); } #endif } printf("%s %s: err_inf=%d\n", 1 ? "IDCT248" : "DCT248", name, err_max); ti = gettime(); it1 = 0; do { for(it=0;it<NB_ITS_SPEED;it++) { for(i=0; i<64; i++) block[i]= block1[i]; // memcpy(block, block1, sizeof(DCTELEM) * 64); // do not memcpy especially not fastmemcpy because it does movntq !!! idct248_put(img_dest, 8, block); } it1 += NB_ITS_SPEED; ti1 = gettime() - ti; } while (ti1 < 1000000); mmx_emms(); printf("%s %s: %0.1f kdct/s\n", 1 ? "IDCT248" : "DCT248", name, (double)it1 * 1000.0 / (double)ti1); } static void help(void) { printf("dct-test [-i] [<test-number>]\n" "test-number 0 -> test with random matrixes\n" " 1 -> test with random sparse matrixes\n" " 2 -> do 3. test from mpeg4 std\n" "-i test IDCT implementations\n" "-4 test IDCT248 implementations\n"); } int main(int argc, char **argv) { int test_idct = 0, test_248_dct = 0; int c,i; int test=1; cpu_flags = mm_support(); ff_ref_dct_init(); idct_mmx_init(); for(i=0;i<256;i++) cropTbl[i + MAX_NEG_CROP] = i; for(i=0;i<MAX_NEG_CROP;i++) { cropTbl[i] = 0; cropTbl[i + MAX_NEG_CROP + 256] = 255; } for(;;) { c = getopt(argc, argv, "ih4"); if (c == -1) break; switch(c) { case 'i': test_idct = 1; break; case '4': test_248_dct = 1; break; default : case 'h': help(); return 0; } } if(optind <argc) test= atoi(argv[optind]); printf("ffmpeg DCT/IDCT test\n"); if (test_248_dct) { idct248_error("SIMPLE-C", ff_simple_idct248_put); } else { for (i=0;algos[i].name;i++) if (algos[i].is_idct == test_idct && !(~cpu_flags & algos[i].mm_support)) { dct_error (algos[i].name, algos[i].is_idct, algos[i].func, algos[i].ref, algos[i].format, test); } } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/dct-test.c
C
asf20
16,744
/* * Generate a header file for hardcoded DV tables * * Copyright (c) 2010 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #define CONFIG_HARDCODED_TABLES 0 #ifndef CONFIG_SMALL #error CONFIG_SMALL must be defined to generate tables #endif #include "dv_tablegen.h" #include "tableprint.h" #include <inttypes.h> WRITE_1D_FUNC_ARGV(vlc_pair, struct dv_vlc_pair, 7, "{0x%"PRIx32", %"PRId8"}", data[i].vlc, data[i].size) WRITE_2D_FUNC(vlc_pair, struct dv_vlc_pair) int main(void) { dv_vlc_map_tableinit(); write_fileheader(); printf("static const struct dv_vlc_pair dv_vlc_map[DV_VLC_MAP_RUN_SIZE][DV_VLC_MAP_LEV_SIZE] = {\n"); write_vlc_pair_2d_array(dv_vlc_map, DV_VLC_MAP_RUN_SIZE, DV_VLC_MAP_LEV_SIZE); printf("};\n"); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/dv_tablegen.c
C
asf20
1,563
/* * Dirac parser * * Copyright (c) 2007-2008 Marco Gerards <marco@gnu.org> * Copyright (c) 2008 BBC, Anuradha Suraparaju <asuraparaju@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Dirac Parser * @author Marco Gerards <marco@gnu.org> */ #include "libavutil/intreadwrite.h" #include "parser.h" #define DIRAC_PARSE_INFO_PREFIX 0x42424344 /** * Finds the end of the current frame in the bitstream. * @return the position of the first byte of the next frame or -1 */ typedef struct DiracParseContext { int state; int is_synced; int sync_offset; int header_bytes_needed; int overread_index; int buffer_size; int index; uint8_t *buffer; int dirac_unit_size; uint8_t *dirac_unit; } DiracParseContext; static int find_frame_end(DiracParseContext *pc, const uint8_t *buf, int buf_size) { uint32_t state = pc->state; int i = 0; if (!pc->is_synced) { for (i = 0; i < buf_size; i++) { state = (state << 8) | buf[i]; if (state == DIRAC_PARSE_INFO_PREFIX) { state = -1; pc->is_synced = 1; pc->header_bytes_needed = 9; pc->sync_offset = i; break; } } } if (pc->is_synced) { pc->sync_offset = 0; for (; i < buf_size; i++) { if (state == DIRAC_PARSE_INFO_PREFIX) { if ((buf_size-i) >= pc->header_bytes_needed) { pc->state = -1; return i + pc->header_bytes_needed; } else { pc->header_bytes_needed = 9-(buf_size-i); break; } } else state = (state << 8) | buf[i]; } } pc->state = state; return -1; } typedef struct DiracParseUnit { int next_pu_offset; int prev_pu_offset; uint8_t pu_type; } DiracParseUnit; static int unpack_parse_unit(DiracParseUnit *pu, DiracParseContext *pc, int offset) { uint8_t *start = pc->buffer + offset; uint8_t *end = pc->buffer + pc->index; if (start < pc->buffer || (start+13 > end)) return 0; pu->pu_type = start[4]; pu->next_pu_offset = AV_RB32(start+5); pu->prev_pu_offset = AV_RB32(start+9); if (pu->pu_type == 0x10 && pu->next_pu_offset == 0) pu->next_pu_offset = 13; return 1; } static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx, int next, const uint8_t **buf, int *buf_size) { int parse_timing_info = (s->pts == AV_NOPTS_VALUE && s->dts == AV_NOPTS_VALUE); DiracParseContext *pc = s->priv_data; if (pc->overread_index) { memcpy(pc->buffer, pc->buffer + pc->overread_index, pc->index - pc->overread_index); pc->index -= pc->overread_index; pc->overread_index = 0; if (*buf_size == 0 && pc->buffer[4] == 0x10) { *buf = pc->buffer; *buf_size = pc->index; return 0; } } if ( next == -1) { /* Found a possible frame start but not a frame end */ void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, pc->index + (*buf_size - pc->sync_offset)); pc->buffer = new_buffer; memcpy(pc->buffer+pc->index, (*buf + pc->sync_offset), *buf_size - pc->sync_offset); pc->index += *buf_size - pc->sync_offset; return -1; } else { /* Found a possible frame start and a possible frame end */ DiracParseUnit pu1, pu; void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, pc->index + next); pc->buffer = new_buffer; memcpy(pc->buffer + pc->index, *buf, next); pc->index += next; /* Need to check if we have a valid Parse Unit. We can't go by the * sync pattern 'BBCD' alone because arithmetic coding of the residual * and motion data can cause the pattern triggering a false start of * frame. So check if the previous parse offset of the next parse unit * is equal to the next parse offset of the current parse unit then * we can be pretty sure that we have a valid parse unit */ if (!unpack_parse_unit(&pu1, pc, pc->index - 13) || !unpack_parse_unit(&pu, pc, pc->index - 13 - pu1.prev_pu_offset) || pu.next_pu_offset != pu1.prev_pu_offset) { pc->index -= 9; *buf_size = next-9; pc->header_bytes_needed = 9; return -1; } /* All non-frame data must be accompanied by frame data. This is to * ensure that pts is set correctly. So if the current parse unit is * not frame data, wait for frame data to come along */ pc->dirac_unit = pc->buffer + pc->index - 13 - pu1.prev_pu_offset - pc->dirac_unit_size; pc->dirac_unit_size += pu.next_pu_offset; if ((pu.pu_type&0x08) != 0x08) { pc->header_bytes_needed = 9; *buf_size = next; return -1; } /* Get the picture number to set the pts and dts*/ if (parse_timing_info) { uint8_t *cur_pu = pc->buffer + pc->index - 13 - pu1.prev_pu_offset; int pts = AV_RB32(cur_pu + 13); if (s->last_pts == 0 && s->last_dts == 0) s->dts = pts - 1; else s->dts = s->last_dts+1; s->pts = pts; if (!avctx->has_b_frames && (cur_pu[4] & 0x03)) avctx->has_b_frames = 1; } if (avctx->has_b_frames && s->pts == s->dts) s->pict_type = FF_B_TYPE; /* Finally have a complete Dirac data unit */ *buf = pc->dirac_unit; *buf_size = pc->dirac_unit_size; pc->dirac_unit_size = 0; pc->overread_index = pc->index-13; pc->header_bytes_needed = 9; } return next; } static int dirac_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { DiracParseContext *pc = s->priv_data; int next; *poutbuf = NULL; *poutbuf_size = 0; if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { next = buf_size; *poutbuf = buf; *poutbuf_size = buf_size; /* Assume that data has been packetized into an encapsulation unit. */ } else { next = find_frame_end(pc, buf, buf_size); if (!pc->is_synced && next == -1) { /* No frame start found yet. So throw away the entire buffer. */ return buf_size; } if (dirac_combine_frame(s, avctx, next, &buf, &buf_size) < 0) { return buf_size; } } *poutbuf = buf; *poutbuf_size = buf_size; return next; } static void dirac_parse_close(AVCodecParserContext *s) { DiracParseContext *pc = s->priv_data; if (pc->buffer_size > 0) av_free(pc->buffer); } AVCodecParser dirac_parser = { { CODEC_ID_DIRAC }, sizeof(DiracParseContext), NULL, dirac_parse, dirac_parse_close, };
123linslouis-android-video-cutter
jni/libavcodec/dirac_parser.c
C
asf20
8,240
/* * Copyright (c) 2002 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "h263.h" #include "mathops.h" #include "msmpeg4.h" #include "msmpeg4data.h" #include "intrax8.h" #include "wmv2.h" static void parse_mb_skip(Wmv2Context * w){ int mb_x, mb_y; MpegEncContext * const s= &w->s; uint32_t * const mb_type= s->current_picture_ptr->mb_type; w->skip_type= get_bits(&s->gb, 2); switch(w->skip_type){ case SKIP_TYPE_NONE: for(mb_y=0; mb_y<s->mb_height; mb_y++){ for(mb_x=0; mb_x<s->mb_width; mb_x++){ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_16x16 | MB_TYPE_L0; } } break; case SKIP_TYPE_MPEG: for(mb_y=0; mb_y<s->mb_height; mb_y++){ for(mb_x=0; mb_x<s->mb_width; mb_x++){ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0; } } break; case SKIP_TYPE_ROW: for(mb_y=0; mb_y<s->mb_height; mb_y++){ if(get_bits1(&s->gb)){ for(mb_x=0; mb_x<s->mb_width; mb_x++){ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; } }else{ for(mb_x=0; mb_x<s->mb_width; mb_x++){ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0; } } } break; case SKIP_TYPE_COL: for(mb_x=0; mb_x<s->mb_width; mb_x++){ if(get_bits1(&s->gb)){ for(mb_y=0; mb_y<s->mb_height; mb_y++){ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; } }else{ for(mb_y=0; mb_y<s->mb_height; mb_y++){ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0; } } } break; } } static int decode_ext_header(Wmv2Context *w){ MpegEncContext * const s= &w->s; GetBitContext gb; int fps; int code; if(s->avctx->extradata_size<4) return -1; init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8); fps = get_bits(&gb, 5); s->bit_rate = get_bits(&gb, 11)*1024; w->mspel_bit = get_bits1(&gb); s->loop_filter = get_bits1(&gb); w->abt_flag = get_bits1(&gb); w->j_type_bit = get_bits1(&gb); w->top_left_mv_flag= get_bits1(&gb); w->per_mb_rl_bit = get_bits1(&gb); code = get_bits(&gb, 3); if(code==0) return -1; s->slice_height = s->mb_height / code; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, tl_mv_flag:%d, mbrl_bit:%d, code:%d, loop_filter:%d, slices:%d\n", fps, s->bit_rate, w->mspel_bit, w->abt_flag, w->j_type_bit, w->top_left_mv_flag, w->per_mb_rl_bit, code, s->loop_filter, code); } return 0; } int ff_wmv2_decode_picture_header(MpegEncContext * s) { Wmv2Context * const w= (Wmv2Context*)s; int code; #if 0 { int i; for(i=0; i<s->gb.size*8; i++) printf("%d", get_bits1(&s->gb)); // get_bits1(&s->gb); printf("END\n"); return -1; } #endif if(s->picture_number==0) decode_ext_header(w); s->pict_type = get_bits1(&s->gb) + 1; if(s->pict_type == FF_I_TYPE){ code = get_bits(&s->gb, 7); av_log(s->avctx, AV_LOG_DEBUG, "I7:%X/\n", code); } s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); if(s->qscale <= 0) return -1; return 0; } int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s) { Wmv2Context * const w= (Wmv2Context*)s; if (s->pict_type == FF_I_TYPE) { if(w->j_type_bit) w->j_type= get_bits1(&s->gb); else w->j_type= 0; //FIXME check if(!w->j_type){ if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb); else s->per_mb_rl_table= 0; if(!s->per_mb_rl_table){ s->rl_chroma_table_index = decode012(&s->gb); s->rl_table_index = decode012(&s->gb); } s->dc_table_index = get_bits1(&s->gb); } s->inter_intra_pred= 0; s->no_rounding = 1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n", s->qscale, s->rl_chroma_table_index, s->rl_table_index, s->dc_table_index, s->per_mb_rl_table, w->j_type); } }else{ int cbp_index; w->j_type=0; parse_mb_skip(w); cbp_index= decode012(&s->gb); if(s->qscale <= 10){ int map[3]= {0,2,1}; w->cbp_table_index= map[cbp_index]; }else if(s->qscale <= 20){ int map[3]= {1,0,2}; w->cbp_table_index= map[cbp_index]; }else{ int map[3]= {2,1,0}; w->cbp_table_index= map[cbp_index]; } if(w->mspel_bit) s->mspel= get_bits1(&s->gb); else s->mspel= 0; //FIXME check if(w->abt_flag){ w->per_mb_abt= get_bits1(&s->gb)^1; if(!w->per_mb_abt){ w->abt_type= decode012(&s->gb); } } if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb); else s->per_mb_rl_table= 0; if(!s->per_mb_rl_table){ s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; } s->dc_table_index = get_bits1(&s->gb); s->mv_table_index = get_bits1(&s->gb); s->inter_intra_pred= 0;//(s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE); s->no_rounding ^= 1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n", s->rl_table_index, s->rl_chroma_table_index, s->dc_table_index, s->mv_table_index, s->per_mb_rl_table, s->qscale, s->mspel, w->per_mb_abt, w->abt_type, w->cbp_table_index, s->inter_intra_pred); } } s->esc3_level_length= 0; s->esc3_run_length= 0; s->picture_number++; //FIXME ? if(w->j_type){ ff_intrax8_decode_picture(&w->x8, 2*s->qscale, (s->qscale-1)|1 ); return 1; } return 0; } static inline int wmv2_decode_motion(Wmv2Context *w, int *mx_ptr, int *my_ptr){ MpegEncContext * const s= &w->s; int ret; ret= ff_msmpeg4_decode_motion(s, mx_ptr, my_ptr); if(ret<0) return -1; if((((*mx_ptr)|(*my_ptr)) & 1) && s->mspel) w->hshift= get_bits1(&s->gb); else w->hshift= 0; //printf("%d %d ", *mx_ptr, *my_ptr); return 0; } static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){ MpegEncContext * const s= &w->s; int xy, wrap, diff, type; int16_t *A, *B, *C, *mot_val; wrap = s->b8_stride; xy = s->block_index[0]; mot_val = s->current_picture.motion_val[0][xy]; A = s->current_picture.motion_val[0][xy - 1]; B = s->current_picture.motion_val[0][xy - wrap]; C = s->current_picture.motion_val[0][xy + 2 - wrap]; if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag) diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1])); else diff=0; if(diff >= 8) type= get_bits1(&s->gb); else type= 2; if(type == 0){ *px= A[0]; *py= A[1]; }else if(type == 1){ *px= B[0]; *py= B[1]; }else{ /* special case for first (slice) line */ if (s->first_slice_line) { *px = A[0]; *py = A[1]; } else { *px = mid_pred(A[0], B[0], C[0]); *py = mid_pred(A[1], B[1], C[1]); } } return mot_val; } static inline int wmv2_decode_inter_block(Wmv2Context *w, DCTELEM *block, int n, int cbp){ MpegEncContext * const s= &w->s; static const int sub_cbp_table[3]= {2,3,1}; int sub_cbp; if(!cbp){ s->block_last_index[n] = -1; return 0; } if(w->per_block_abt) w->abt_type= decode012(&s->gb); #if 0 if(w->per_block_abt) printf("B%d", w->abt_type); #endif w->abt_type_table[n]= w->abt_type; if(w->abt_type){ // const uint8_t *scantable= w->abt_scantable[w->abt_type-1].permutated; const uint8_t *scantable= w->abt_scantable[w->abt_type-1].scantable; // const uint8_t *scantable= w->abt_type-1 ? w->abt_scantable[1].permutated : w->abt_scantable[0].scantable; sub_cbp= sub_cbp_table[ decode012(&s->gb) ]; // printf("S%d", sub_cbp); if(sub_cbp&1){ if (ff_msmpeg4_decode_block(s, block, n, 1, scantable) < 0) return -1; } if(sub_cbp&2){ if (ff_msmpeg4_decode_block(s, w->abt_block2[n], n, 1, scantable) < 0) return -1; } s->block_last_index[n] = 63; return 0; }else{ return ff_msmpeg4_decode_block(s, block, n, 1, s->inter_scantable.permutated); } } int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { Wmv2Context * const w= (Wmv2Context*)s; int cbp, code, i; uint8_t *coded_val; if(w->j_type) return 0; if (s->pict_type == FF_P_TYPE) { if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){ /* skip mb */ s->mb_intra = 0; for(i=0;i<6;i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = 1; w->hshift=0; return 0; } code = get_vlc2(&s->gb, ff_mb_non_intra_vlc[w->cbp_table_index].table, MB_NON_INTRA_VLC_BITS, 3); if (code < 0) return -1; s->mb_intra = (~code & 0x40) >> 6; cbp = code & 0x3f; } else { s->mb_intra = 1; code = get_vlc2(&s->gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2); if (code < 0){ av_log(s->avctx, AV_LOG_ERROR, "II-cbp illegal at %d %d\n", s->mb_x, s->mb_y); return -1; } /* predict coded block pattern */ cbp = 0; for(i=0;i<6;i++) { int val = ((code >> (5 - i)) & 1); if (i < 4) { int pred = ff_msmpeg4_coded_block_pred(s, i, &coded_val); val = val ^ pred; *coded_val = val; } cbp |= val << (5 - i); } } if (!s->mb_intra) { int mx, my; //printf("P at %d %d\n", s->mb_x, s->mb_y); wmv2_pred_motion(w, &mx, &my); if(cbp){ s->dsp.clear_blocks(s->block[0]); if(s->per_mb_rl_table){ s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; } if(w->abt_flag && w->per_mb_abt){ w->per_block_abt= get_bits1(&s->gb); if(!w->per_block_abt) w->abt_type= decode012(&s->gb); }else w->per_block_abt=0; } if (wmv2_decode_motion(w, &mx, &my) < 0) return -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; for (i = 0; i < 6; i++) { if (wmv2_decode_inter_block(w, block[i], i, (cbp >> (5 - i)) & 1) < 0) { av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding inter block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); return -1; } } } else { //if(s->pict_type==FF_P_TYPE) // printf("%d%d ", s->inter_intra_pred, cbp); //printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24)); s->ac_pred = get_bits1(&s->gb); if(s->inter_intra_pred){ s->h263_aic_dir= get_vlc2(&s->gb, ff_inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1); // printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y); } if(s->per_mb_rl_table && cbp){ s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; } s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (ff_msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding intra block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); return -1; } } } return 0; } static av_cold int wmv2_decode_init(AVCodecContext *avctx){ Wmv2Context * const w= avctx->priv_data; if(avctx->idct_algo==FF_IDCT_AUTO){ avctx->idct_algo=FF_IDCT_WMV2; } if(ff_msmpeg4_decode_init(avctx) < 0) return -1; ff_wmv2_common_init(w); ff_intrax8_common_init(&w->x8,&w->s); return 0; } static av_cold int wmv2_decode_end(AVCodecContext *avctx) { Wmv2Context *w = avctx->priv_data; ff_intrax8_common_end(&w->x8); return ff_h263_decode_end(avctx); } AVCodec wmv2_decoder = { "wmv2", AVMEDIA_TYPE_VIDEO, CODEC_ID_WMV2, sizeof(Wmv2Context), wmv2_decode_init, NULL, wmv2_decode_end, ff_h263_decode_frame, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"), .pix_fmts= ff_pixfmt_list_420, };
123linslouis-android-video-cutter
jni/libavcodec/wmv2dec.c
C
asf20
14,855
/* * NuppelVideo decoder * Copyright (c) 2006 Reimar Doeffinger * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stdlib.h> #include "libavutil/bswap.h" #include "libavutil/lzo.h" #include "avcodec.h" #include "dsputil.h" #include "rtjpeg.h" typedef struct { AVFrame pic; int codec_frameheader; int quality; int width, height; unsigned int decomp_size; unsigned char* decomp_buf; uint32_t lq[64], cq[64]; RTJpegContext rtj; DSPContext dsp; } NuvContext; static const uint8_t fallback_lquant[] = { 16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19, 26, 58, 60, 55, 14, 13, 16, 24, 40, 57, 69, 56, 14, 17, 22, 29, 51, 87, 80, 62, 18, 22, 37, 56, 68, 109, 103, 77, 24, 35, 55, 64, 81, 104, 113, 92, 49, 64, 78, 87, 103, 121, 120, 101, 72, 92, 95, 98, 112, 100, 103, 99 }; static const uint8_t fallback_cquant[] = { 17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66, 99, 99, 99, 99, 24, 26, 56, 99, 99, 99, 99, 99, 47, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99 }; /** * \brief copy frame data from buffer to AVFrame, handling stride. * \param f destination AVFrame * \param src source buffer, does not use any line-stride * \param width width of the video frame * \param height height of the video frame */ static void copy_frame(AVFrame *f, const uint8_t *src, int width, int height) { AVPicture pic; avpicture_fill(&pic, src, PIX_FMT_YUV420P, width, height); av_picture_copy((AVPicture *)f, &pic, PIX_FMT_YUV420P, width, height); } /** * \brief extract quantization tables from codec data into our context */ static int get_quant(AVCodecContext *avctx, NuvContext *c, const uint8_t *buf, int size) { int i; if (size < 2 * 64 * 4) { av_log(avctx, AV_LOG_ERROR, "insufficient rtjpeg quant data\n"); return -1; } for (i = 0; i < 64; i++, buf += 4) c->lq[i] = AV_RL32(buf); for (i = 0; i < 64; i++, buf += 4) c->cq[i] = AV_RL32(buf); return 0; } /** * \brief set quantization tables from a quality value */ static void get_quant_quality(NuvContext *c, int quality) { int i; quality = FFMAX(quality, 1); for (i = 0; i < 64; i++) { c->lq[i] = (fallback_lquant[i] << 7) / quality; c->cq[i] = (fallback_cquant[i] << 7) / quality; } } static int codec_reinit(AVCodecContext *avctx, int width, int height, int quality) { NuvContext *c = avctx->priv_data; width = (width + 1) & ~1; height = (height + 1) & ~1; if (quality >= 0) get_quant_quality(c, quality); if (width != c->width || height != c->height) { if (avcodec_check_dimensions(avctx, height, width) < 0) return 0; avctx->width = c->width = width; avctx->height = c->height = height; c->decomp_size = c->height * c->width * 3 / 2; c->decomp_buf = av_realloc(c->decomp_buf, c->decomp_size + AV_LZO_OUTPUT_PADDING); if (!c->decomp_buf) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return 0; } rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq); } else if (quality != c->quality) rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq); return 1; } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; NuvContext *c = avctx->priv_data; AVFrame *picture = data; int orig_size = buf_size; int keyframe; int result; enum {NUV_UNCOMPRESSED = '0', NUV_RTJPEG = '1', NUV_RTJPEG_IN_LZO = '2', NUV_LZO = '3', NUV_BLACK = 'N', NUV_COPY_LAST = 'L'} comptype; if (buf_size < 12) { av_log(avctx, AV_LOG_ERROR, "coded frame too small\n"); return -1; } // codec data (rtjpeg quant tables) if (buf[0] == 'D' && buf[1] == 'R') { int ret; // skip rest of the frameheader. buf = &buf[12]; buf_size -= 12; ret = get_quant(avctx, c, buf, buf_size); if (ret < 0) return ret; rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq); return orig_size; } if (buf[0] != 'V' || buf_size < 12) { av_log(avctx, AV_LOG_ERROR, "not a nuv video frame\n"); return -1; } comptype = buf[1]; switch (comptype) { case NUV_RTJPEG_IN_LZO: case NUV_RTJPEG: keyframe = !buf[2]; break; case NUV_COPY_LAST: keyframe = 0; break; default: keyframe = 1; break; } // skip rest of the frameheader. buf = &buf[12]; buf_size -= 12; if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) { int outlen = c->decomp_size, inlen = buf_size; if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen)) av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n"); buf = c->decomp_buf; buf_size = c->decomp_size; } if (c->codec_frameheader) { int w, h, q; if (buf_size < 12) { av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n"); return -1; } w = AV_RL16(&buf[6]); h = AV_RL16(&buf[8]); q = buf[10]; if (!codec_reinit(avctx, w, h, q)) return -1; buf = &buf[12]; buf_size -= 12; } if (keyframe && c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); c->pic.reference = 3; c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; result = avctx->reget_buffer(avctx, &c->pic); if (result < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } c->pic.pict_type = keyframe ? FF_I_TYPE : FF_P_TYPE; c->pic.key_frame = keyframe; // decompress/copy/whatever data switch (comptype) { case NUV_LZO: case NUV_UNCOMPRESSED: { int height = c->height; if (buf_size < c->width * height * 3 / 2) { av_log(avctx, AV_LOG_ERROR, "uncompressed frame too short\n"); height = buf_size / c->width / 3 * 2; } copy_frame(&c->pic, buf, c->width, height); break; } case NUV_RTJPEG_IN_LZO: case NUV_RTJPEG: { rtjpeg_decode_frame_yuv420(&c->rtj, &c->pic, buf, buf_size); break; } case NUV_BLACK: { memset(c->pic.data[0], 0, c->width * c->height); memset(c->pic.data[1], 128, c->width * c->height / 4); memset(c->pic.data[2], 128, c->width * c->height / 4); break; } case NUV_COPY_LAST: { /* nothing more to do here */ break; } default: av_log(avctx, AV_LOG_ERROR, "unknown compression\n"); return -1; } *picture = c->pic; *data_size = sizeof(AVFrame); return orig_size; } static av_cold int decode_init(AVCodecContext *avctx) { NuvContext *c = avctx->priv_data; avctx->pix_fmt = PIX_FMT_YUV420P; c->pic.data[0] = NULL; c->decomp_buf = NULL; c->quality = -1; c->width = 0; c->height = 0; c->codec_frameheader = avctx->codec_tag == MKTAG('R', 'J', 'P', 'G'); if (avctx->extradata_size) get_quant(avctx, c, avctx->extradata, avctx->extradata_size); dsputil_init(&c->dsp, avctx); if (!codec_reinit(avctx, avctx->width, avctx->height, -1)) return 1; return 0; } static av_cold int decode_end(AVCodecContext *avctx) { NuvContext *c = avctx->priv_data; av_freep(&c->decomp_buf); if (c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); return 0; } AVCodec nuv_decoder = { "nuv", AVMEDIA_TYPE_VIDEO, CODEC_ID_NUV, sizeof(NuvContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("NuppelVideo/RTJPEG"), };
123linslouis-android-video-cutter
jni/libavcodec/nuv.c
C
asf20
9,107
/* * Quicktime Animation (RLE) Video Decoder * Copyright (C) 2004 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * QT RLE Video Decoder by Mike Melanson (melanson@pcisys.net) * For more information about the QT RLE format, visit: * http://www.pcisys.net/~melanson/codecs/ * * The QT RLE decoder has seven modes of operation: * 1, 2, 4, 8, 16, 24, and 32 bits per pixel. For modes 1, 2, 4, and 8 * the decoder outputs PAL8 colorspace data. 16-bit data yields RGB555 * data. 24-bit data is RGB24 and 32-bit data is RGB32. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "libavutil/intreadwrite.h" #include "avcodec.h" typedef struct QtrleContext { AVCodecContext *avctx; AVFrame frame; const unsigned char *buf; int size; } QtrleContext; #define CHECK_STREAM_PTR(n) \ if ((stream_ptr + n) > s->size) { \ av_log (s->avctx, AV_LOG_INFO, "Problem: stream_ptr out of bounds (%d >= %d)\n", \ stream_ptr + n, s->size); \ return; \ } #define CHECK_PIXEL_PTR(n) \ if ((pixel_ptr + n > pixel_limit) || (pixel_ptr + n < 0)) { \ av_log (s->avctx, AV_LOG_INFO, "Problem: pixel_ptr = %d, pixel_limit = %d\n", \ pixel_ptr + n, pixel_limit); \ return; \ } \ static void qtrle_decode_1bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr = 0; int row_inc = s->frame.linesize[0]; unsigned char pi0, pi1; /* 2 8-pixel values */ unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; int skip; while (lines_to_change) { CHECK_STREAM_PTR(2); skip = s->buf[stream_ptr++]; rle_code = (signed char)s->buf[stream_ptr++]; if (rle_code == 0) break; if(skip & 0x80) { lines_to_change--; row_ptr += row_inc; pixel_ptr = row_ptr + 2 * (skip & 0x7f); } else pixel_ptr += 2 * skip; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; /* get the next 2 bytes from the stream, treat them as groups * of 8 pixels, and output them rle_code times */ CHECK_STREAM_PTR(2); pi0 = s->buf[stream_ptr++]; pi1 = s->buf[stream_ptr++]; CHECK_PIXEL_PTR(rle_code * 2); while (rle_code--) { rgb[pixel_ptr++] = pi0; rgb[pixel_ptr++] = pi1; } } else { /* copy the same pixel directly to output 2 times */ rle_code *= 2; CHECK_STREAM_PTR(rle_code); CHECK_PIXEL_PTR(rle_code); while (rle_code--) rgb[pixel_ptr++] = s->buf[stream_ptr++]; } } } static inline void qtrle_decode_2n4bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change, int bpp) { int rle_code, i; int pixel_ptr; int row_inc = s->frame.linesize[0]; unsigned char pi[16]; /* 16 palette indices */ unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; int num_pixels = (bpp == 4) ? 8 : 16; while (lines_to_change--) { CHECK_STREAM_PTR(2); pixel_ptr = row_ptr + (num_pixels * (s->buf[stream_ptr++] - 1)); while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ CHECK_STREAM_PTR(1); pixel_ptr += (num_pixels * (s->buf[stream_ptr++] - 1)); CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; /* get the next 4 bytes from the stream, treat them as palette * indexes, and output them rle_code times */ CHECK_STREAM_PTR(4); for (i = num_pixels-1; i >= 0; i--) { pi[num_pixels-1-i] = (s->buf[stream_ptr] >> ((i*bpp) & 0x07)) & ((1<<bpp)-1); stream_ptr+= ((i & ((num_pixels>>2)-1)) == 0); } CHECK_PIXEL_PTR(rle_code * num_pixels); while (rle_code--) { for (i = 0; i < num_pixels; i++) rgb[pixel_ptr++] = pi[i]; } } else { /* copy the same pixel directly to output 4 times */ rle_code *= 4; CHECK_STREAM_PTR(rle_code); CHECK_PIXEL_PTR(rle_code*(num_pixels>>2)); while (rle_code--) { if(bpp == 4) { rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 4) & 0x0f; rgb[pixel_ptr++] = (s->buf[stream_ptr++]) & 0x0f; } else { rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 6) & 0x03; rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 4) & 0x03; rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 2) & 0x03; rgb[pixel_ptr++] = (s->buf[stream_ptr++]) & 0x03; } } } } row_ptr += row_inc; } } static void qtrle_decode_8bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr; int row_inc = s->frame.linesize[0]; unsigned char pi1, pi2, pi3, pi4; /* 4 palette indexes */ unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; while (lines_to_change--) { CHECK_STREAM_PTR(2); pixel_ptr = row_ptr + (4 * (s->buf[stream_ptr++] - 1)); while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ CHECK_STREAM_PTR(1); pixel_ptr += (4 * (s->buf[stream_ptr++] - 1)); CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; /* get the next 4 bytes from the stream, treat them as palette * indexes, and output them rle_code times */ CHECK_STREAM_PTR(4); pi1 = s->buf[stream_ptr++]; pi2 = s->buf[stream_ptr++]; pi3 = s->buf[stream_ptr++]; pi4 = s->buf[stream_ptr++]; CHECK_PIXEL_PTR(rle_code * 4); while (rle_code--) { rgb[pixel_ptr++] = pi1; rgb[pixel_ptr++] = pi2; rgb[pixel_ptr++] = pi3; rgb[pixel_ptr++] = pi4; } } else { /* copy the same pixel directly to output 4 times */ rle_code *= 4; CHECK_STREAM_PTR(rle_code); CHECK_PIXEL_PTR(rle_code); while (rle_code--) { rgb[pixel_ptr++] = s->buf[stream_ptr++]; } } } row_ptr += row_inc; } } static void qtrle_decode_16bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr; int row_inc = s->frame.linesize[0]; unsigned short rgb16; unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; while (lines_to_change--) { CHECK_STREAM_PTR(2); pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 2; while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ CHECK_STREAM_PTR(1); pixel_ptr += (s->buf[stream_ptr++] - 1) * 2; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; CHECK_STREAM_PTR(2); rgb16 = AV_RB16(&s->buf[stream_ptr]); stream_ptr += 2; CHECK_PIXEL_PTR(rle_code * 2); while (rle_code--) { *(unsigned short *)(&rgb[pixel_ptr]) = rgb16; pixel_ptr += 2; } } else { CHECK_STREAM_PTR(rle_code * 2); CHECK_PIXEL_PTR(rle_code * 2); /* copy pixels directly to output */ while (rle_code--) { rgb16 = AV_RB16(&s->buf[stream_ptr]); stream_ptr += 2; *(unsigned short *)(&rgb[pixel_ptr]) = rgb16; pixel_ptr += 2; } } } row_ptr += row_inc; } } static void qtrle_decode_24bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr; int row_inc = s->frame.linesize[0]; unsigned char r, g, b; unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; while (lines_to_change--) { CHECK_STREAM_PTR(2); pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 3; while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ CHECK_STREAM_PTR(1); pixel_ptr += (s->buf[stream_ptr++] - 1) * 3; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; CHECK_STREAM_PTR(3); r = s->buf[stream_ptr++]; g = s->buf[stream_ptr++]; b = s->buf[stream_ptr++]; CHECK_PIXEL_PTR(rle_code * 3); while (rle_code--) { rgb[pixel_ptr++] = r; rgb[pixel_ptr++] = g; rgb[pixel_ptr++] = b; } } else { CHECK_STREAM_PTR(rle_code * 3); CHECK_PIXEL_PTR(rle_code * 3); /* copy pixels directly to output */ while (rle_code--) { rgb[pixel_ptr++] = s->buf[stream_ptr++]; rgb[pixel_ptr++] = s->buf[stream_ptr++]; rgb[pixel_ptr++] = s->buf[stream_ptr++]; } } } row_ptr += row_inc; } } static void qtrle_decode_32bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr; int row_inc = s->frame.linesize[0]; unsigned char a, r, g, b; unsigned int argb; unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; while (lines_to_change--) { CHECK_STREAM_PTR(2); pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 4; while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ CHECK_STREAM_PTR(1); pixel_ptr += (s->buf[stream_ptr++] - 1) * 4; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; CHECK_STREAM_PTR(4); a = s->buf[stream_ptr++]; r = s->buf[stream_ptr++]; g = s->buf[stream_ptr++]; b = s->buf[stream_ptr++]; argb = (a << 24) | (r << 16) | (g << 8) | (b << 0); CHECK_PIXEL_PTR(rle_code * 4); while (rle_code--) { *(unsigned int *)(&rgb[pixel_ptr]) = argb; pixel_ptr += 4; } } else { CHECK_STREAM_PTR(rle_code * 4); CHECK_PIXEL_PTR(rle_code * 4); /* copy pixels directly to output */ while (rle_code--) { a = s->buf[stream_ptr++]; r = s->buf[stream_ptr++]; g = s->buf[stream_ptr++]; b = s->buf[stream_ptr++]; argb = (a << 24) | (r << 16) | (g << 8) | (b << 0); *(unsigned int *)(&rgb[pixel_ptr]) = argb; pixel_ptr += 4; } } } row_ptr += row_inc; } } static av_cold int qtrle_decode_init(AVCodecContext *avctx) { QtrleContext *s = avctx->priv_data; s->avctx = avctx; switch (avctx->bits_per_coded_sample) { case 1: case 33: avctx->pix_fmt = PIX_FMT_MONOWHITE; break; case 2: case 4: case 8: case 34: case 36: case 40: avctx->pix_fmt = PIX_FMT_PAL8; break; case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; case 24: avctx->pix_fmt = PIX_FMT_RGB24; break; case 32: avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log (avctx, AV_LOG_ERROR, "Unsupported colorspace: %d bits/sample?\n", avctx->bits_per_coded_sample); break; } s->frame.data[0] = NULL; return 0; } static int qtrle_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; QtrleContext *s = avctx->priv_data; int header, start_line; int stream_ptr, height, row_ptr; int has_palette = 0; s->buf = buf; s->size = buf_size; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE; if (avctx->reget_buffer(avctx, &s->frame)) { av_log (s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } /* check if this frame is even supposed to change */ if (s->size < 8) goto done; /* start after the chunk size */ stream_ptr = 4; /* fetch the header */ header = AV_RB16(&s->buf[stream_ptr]); stream_ptr += 2; /* if a header is present, fetch additional decoding parameters */ if (header & 0x0008) { if(s->size < 14) goto done; start_line = AV_RB16(&s->buf[stream_ptr]); stream_ptr += 4; height = AV_RB16(&s->buf[stream_ptr]); stream_ptr += 4; } else { start_line = 0; height = s->avctx->height; } row_ptr = s->frame.linesize[0] * start_line; switch (avctx->bits_per_coded_sample) { case 1: case 33: qtrle_decode_1bpp(s, stream_ptr, row_ptr, height); break; case 2: case 34: qtrle_decode_2n4bpp(s, stream_ptr, row_ptr, height, 2); has_palette = 1; break; case 4: case 36: qtrle_decode_2n4bpp(s, stream_ptr, row_ptr, height, 4); has_palette = 1; break; case 8: case 40: qtrle_decode_8bpp(s, stream_ptr, row_ptr, height); has_palette = 1; break; case 16: qtrle_decode_16bpp(s, stream_ptr, row_ptr, height); break; case 24: qtrle_decode_24bpp(s, stream_ptr, row_ptr, height); break; case 32: qtrle_decode_32bpp(s, stream_ptr, row_ptr, height); break; default: av_log (s->avctx, AV_LOG_ERROR, "Unsupported colorspace: %d bits/sample?\n", avctx->bits_per_coded_sample); break; } if(has_palette) { /* make the palette available on the way out */ memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); if (s->avctx->palctrl->palette_changed) { s->frame.palette_has_changed = 1; s->avctx->palctrl->palette_changed = 0; } } done: *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; /* always report that the buffer was completely consumed */ return buf_size; } static av_cold int qtrle_decode_end(AVCodecContext *avctx) { QtrleContext *s = avctx->priv_data; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); return 0; } AVCodec qtrle_decoder = { "qtrle", AVMEDIA_TYPE_VIDEO, CODEC_ID_QTRLE, sizeof(QtrleContext), qtrle_decode_init, NULL, qtrle_decode_end, qtrle_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"), };
123linslouis-android-video-cutter
jni/libavcodec/qtrle.c
C
asf20
17,777
/* * audio encoder psychoacoustic model * Copyright (C) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_PSYMODEL_H #define AVCODEC_PSYMODEL_H #include "avcodec.h" /** maximum possible number of bands */ #define PSY_MAX_BANDS 128 /** * single band psychoacoustic information */ typedef struct FFPsyBand { int bits; float energy; float threshold; float distortion; float perceptual_weight; } FFPsyBand; /** * windowing related information */ typedef struct FFPsyWindowInfo { int window_type[3]; ///< window type (short/long/transitional, etc.) - current, previous and next int window_shape; ///< window shape (sine/KBD/whatever) int num_windows; ///< number of windows in a frame int grouping[8]; ///< window grouping (for e.g. AAC) int *window_sizes; ///< sequence of window sizes inside one frame (for eg. WMA) } FFPsyWindowInfo; /** * context used by psychoacoustic model */ typedef struct FFPsyContext { AVCodecContext *avctx; ///< encoder context const struct FFPsyModel *model; ///< encoder-specific model functions FFPsyBand *psy_bands; ///< frame bands information uint8_t **bands; ///< scalefactor band sizes for possible frame sizes int *num_bands; ///< number of scalefactor bands for possible frame sizes int num_lens; ///< number of scalefactor band sets void* model_priv_data; ///< psychoacoustic model implementation private data } FFPsyContext; /** * codec-specific psychoacoustic model implementation */ typedef struct FFPsyModel { const char *name; int (*init) (FFPsyContext *apc); FFPsyWindowInfo (*window)(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type); void (*analyze)(FFPsyContext *ctx, int channel, const float *coeffs, FFPsyWindowInfo *wi); void (*end) (FFPsyContext *apc); } FFPsyModel; /** * Initialize psychoacoustic model. * * @param ctx model context * @param avctx codec context * @param num_lens number of possible frame lengths * @param bands scalefactor band lengths for all frame lengths * @param num_bands number of scalefactor bands for all frame lengths * * @return zero if successful, a negative value if not */ av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, int num_lens, const uint8_t **bands, const int* num_bands); /** * Suggest window sequence for channel. * * @param ctx model context * @param audio samples for the current frame * @param la lookahead samples (NULL when unavailable) * @param channel number of channel element to analyze * @param prev_type previous window type * * @return suggested window information in a structure */ FFPsyWindowInfo ff_psy_suggest_window(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type); /** * Perform psychoacoustic analysis and set band info (threshold, energy). * * @param ctx model context * @param channel audio channel number * @param coeffs pointer to the transformed coefficients * @param wi window information */ void ff_psy_set_band_info(FFPsyContext *ctx, int channel, const float *coeffs, FFPsyWindowInfo *wi); /** * Cleanup model context at the end. * * @param ctx model context */ av_cold void ff_psy_end(FFPsyContext *ctx); /************************************************************************** * Audio preprocessing stuff. * * This should be moved into some audio filter eventually. * **************************************************************************/ struct FFPsyPreprocessContext; /** * psychoacoustic model audio preprocessing initialization */ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *avctx); /** * Preprocess several channel in audio frame in order to compress it better. * * @param ctx preprocessing context * @param audio samples to preprocess * @param dest place to put filtered samples * @param tag channel number * @param channels number of channel to preprocess (some additional work may be done on stereo pair) */ void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, const int16_t *audio, int16_t *dest, int tag, int channels); /** * Cleanup audio preprocessing module. */ av_cold void ff_psy_preprocess_end(struct FFPsyPreprocessContext *ctx); #endif /* AVCODEC_PSYMODEL_H */
123linslouis-android-video-cutter
jni/libavcodec/psymodel.h
C
asf20
5,560
/* * Copyright (c) 2003-2010 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.264 DSP functions. * @author Michael Niedermayer <michaelni@gmx.at> */ #ifndef AVCODEC_H264DSP_H #define AVCODEC_H264DSP_H #include <stdint.h> #include "dsputil.h" //typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y); typedef void (*h264_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int offset); typedef void (*h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset); /** * Context for storing H.264 DSP functions */ typedef struct H264DSPContext{ /* weighted MC */ h264_weight_func weight_h264_pixels_tab[10]; h264_biweight_func biweight_h264_pixels_tab[10]; /* loop filter */ void (*h264_v_loop_filter_luma)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta, int8_t *tc0); void (*h264_h_loop_filter_luma)(uint8_t *pix/*align 4 */, int stride, int alpha, int beta, int8_t *tc0); /* v/h_loop_filter_luma_intra: align 16 */ void (*h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta); void (*h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta); void (*h264_v_loop_filter_chroma)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta, int8_t *tc0); void (*h264_h_loop_filter_chroma)(uint8_t *pix/*align 4*/, int stride, int alpha, int beta, int8_t *tc0); void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta); void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta); // h264_loop_filter_strength: simd only. the C version is inlined in h264.c void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field); /* IDCT */ /* NOTE!!! if you implement any of h264_idct8_add, h264_idct8_add4 then you must implement all of them NOTE!!! if you implement any of h264_idct_add, h264_idct_add16, h264_idct_add16intra, h264_idct_add8 then you must implement all of them The reason for above, is that no 2 out of one list may use a different permutation. */ void (*h264_idct_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride); void (*h264_idct8_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride); void (*h264_idct_dc_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride); void (*h264_idct8_dc_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride); void (*h264_dct)(DCTELEM block[4][4]); void (*h264_idct_add16)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]); void (*h264_idct8_add4)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]); void (*h264_idct_add8)(uint8_t **dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]); void (*h264_idct_add16intra)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]); }H264DSPContext; void ff_h264dsp_init(H264DSPContext *c); void ff_h264dsp_init_arm(H264DSPContext *c); void ff_h264dsp_init_ppc(H264DSPContext *c); void ff_h264dsp_init_x86(H264DSPContext *c); #endif /* AVCODEC_H264DSP_H */
123linslouis-android-video-cutter
jni/libavcodec/h264dsp.h
C
asf20
4,377
/* * copyright (C) 2006 Corey Hickey * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_LIBXVID_INTERNAL_H #define AVCODEC_LIBXVID_INTERNAL_H /** * @file * common functions for use with the Xvid wrappers */ int av_tempfile(char *prefix, char **filename); #endif /* AVCODEC_LIBXVID_INTERNAL_H */
123linslouis-android-video-cutter
jni/libavcodec/libxvid_internal.h
C
asf20
1,032
/* * QPEG codec * Copyright (c) 2004 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * QPEG codec. */ #include "avcodec.h" typedef struct QpegContext{ AVCodecContext *avctx; AVFrame pic; uint8_t *refdata; } QpegContext; static void qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size, int stride, int width, int height) { int i; int code; int c0, c1; int run, copy; int filled = 0; int rows_to_go; rows_to_go = height; height--; dst = dst + height * stride; while((size > 0) && (rows_to_go > 0)) { code = *src++; size--; run = copy = 0; if(code == 0xFC) /* end-of-picture code */ break; if(code >= 0xF8) { /* very long run */ c0 = *src++; c1 = *src++; size -= 2; run = ((code & 0x7) << 16) + (c0 << 8) + c1 + 2; } else if (code >= 0xF0) { /* long run */ c0 = *src++; size--; run = ((code & 0xF) << 8) + c0 + 2; } else if (code >= 0xE0) { /* short run */ run = (code & 0x1F) + 2; } else if (code >= 0xC0) { /* very long copy */ c0 = *src++; c1 = *src++; size -= 2; copy = ((code & 0x3F) << 16) + (c0 << 8) + c1 + 1; } else if (code >= 0x80) { /* long copy */ c0 = *src++; size--; copy = ((code & 0x7F) << 8) + c0 + 1; } else { /* short copy */ copy = code + 1; } /* perform actual run or copy */ if(run) { int p; p = *src++; size--; for(i = 0; i < run; i++) { dst[filled++] = p; if (filled >= width) { filled = 0; dst -= stride; rows_to_go--; if(rows_to_go <= 0) break; } } } else { size -= copy; for(i = 0; i < copy; i++) { dst[filled++] = *src++; if (filled >= width) { filled = 0; dst -= stride; rows_to_go--; if(rows_to_go <= 0) break; } } } } } static const int qpeg_table_h[16] = { 0x00, 0x20, 0x20, 0x20, 0x18, 0x10, 0x10, 0x20, 0x10, 0x08, 0x18, 0x08, 0x08, 0x18, 0x10, 0x04}; static const int qpeg_table_w[16] = { 0x00, 0x20, 0x18, 0x08, 0x18, 0x10, 0x20, 0x10, 0x08, 0x10, 0x20, 0x20, 0x08, 0x10, 0x18, 0x04}; /* Decodes delta frames */ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, int stride, int width, int height, int delta, const uint8_t *ctable, uint8_t *refdata) { int i, j; int code; int filled = 0; int orig_height; /* copy prev frame */ for(i = 0; i < height; i++) memcpy(refdata + (i * width), dst + (i * stride), width); orig_height = height; height--; dst = dst + height * stride; while((size > 0) && (height >= 0)) { code = *src++; size--; if(delta) { /* motion compensation */ while((code & 0xF0) == 0xF0) { if(delta == 1) { int me_idx; int me_w, me_h, me_x, me_y; uint8_t *me_plane; int corr, val; /* get block size by index */ me_idx = code & 0xF; me_w = qpeg_table_w[me_idx]; me_h = qpeg_table_h[me_idx]; /* extract motion vector */ corr = *src++; size--; val = corr >> 4; if(val > 7) val -= 16; me_x = val; val = corr & 0xF; if(val > 7) val -= 16; me_y = val; /* check motion vector */ if ((me_x + filled < 0) || (me_x + me_w + filled > width) || (height - me_y - me_h < 0) || (height - me_y > orig_height) || (filled + me_w > width) || (height - me_h < 0)) av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n", me_x, me_y, me_w, me_h, filled, height); else { /* do motion compensation */ me_plane = refdata + (filled + me_x) + (height - me_y) * width; for(j = 0; j < me_h; j++) { for(i = 0; i < me_w; i++) dst[filled + i - (j * stride)] = me_plane[i - (j * width)]; } } } code = *src++; size--; } } if(code == 0xE0) /* end-of-picture code */ break; if(code > 0xE0) { /* run code: 0xE1..0xFF */ int p; code &= 0x1F; p = *src++; size--; for(i = 0; i <= code; i++) { dst[filled++] = p; if(filled >= width) { filled = 0; dst -= stride; height--; } } } else if(code >= 0xC0) { /* copy code: 0xC0..0xDF */ code &= 0x1F; for(i = 0; i <= code; i++) { dst[filled++] = *src++; if(filled >= width) { filled = 0; dst -= stride; height--; } } size -= code + 1; } else if(code >= 0x80) { /* skip code: 0x80..0xBF */ int skip; code &= 0x3F; /* codes 0x80 and 0x81 are actually escape codes, skip value minus constant is in the next byte */ if(!code) skip = (*src++) + 64; else if(code == 1) skip = (*src++) + 320; else skip = code; filled += skip; while( filled >= width) { filled -= width; dst -= stride; height--; if(height < 0) break; } } else { /* zero code treated as one-pixel skip */ if(code) dst[filled++] = ctable[code & 0x7F]; else filled++; if(filled >= width) { filled = 0; dst -= stride; height--; } } } } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; QpegContext * const a = avctx->priv_data; AVFrame * const p= (AVFrame*)&a->pic; uint8_t* outdata; int delta; if(p->data[0]) avctx->release_buffer(avctx, p); p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } outdata = a->pic.data[0]; if(buf[0x85] == 0x10) { qpeg_decode_intra(buf+0x86, outdata, buf_size - 0x86, a->pic.linesize[0], avctx->width, avctx->height); } else { delta = buf[0x85]; qpeg_decode_inter(buf+0x86, outdata, buf_size - 0x86, a->pic.linesize[0], avctx->width, avctx->height, delta, buf + 4, a->refdata); } /* make the palette available on the way out */ memcpy(a->pic.data[1], a->avctx->palctrl->palette, AVPALETTE_SIZE); if (a->avctx->palctrl->palette_changed) { a->pic.palette_has_changed = 1; a->avctx->palctrl->palette_changed = 0; } *data_size = sizeof(AVFrame); *(AVFrame*)data = a->pic; return buf_size; } static av_cold int decode_init(AVCodecContext *avctx){ QpegContext * const a = avctx->priv_data; if (!avctx->palctrl) { av_log(avctx, AV_LOG_FATAL, "Missing required palette via palctrl\n"); return -1; } a->avctx = avctx; avctx->pix_fmt= PIX_FMT_PAL8; a->refdata = av_malloc(avctx->width * avctx->height); return 0; } static av_cold int decode_end(AVCodecContext *avctx){ QpegContext * const a = avctx->priv_data; AVFrame * const p= (AVFrame*)&a->pic; if(p->data[0]) avctx->release_buffer(avctx, p); av_free(a->refdata); return 0; } AVCodec qpeg_decoder = { "qpeg", AVMEDIA_TYPE_VIDEO, CODEC_ID_QPEG, sizeof(QpegContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Q-team QPEG"), };
123linslouis-android-video-cutter
jni/libavcodec/qpeg.c
C
asf20
9,757
/* * Interface to libfaac for aac encoding * Copyright (c) 2002 Gildas Bazin <gbazin@netcourrier.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Interface to libfaac for aac encoding. */ #include "avcodec.h" #include <faac.h> typedef struct FaacAudioContext { faacEncHandle faac_handle; } FaacAudioContext; static av_cold int Faac_encode_init(AVCodecContext *avctx) { FaacAudioContext *s = avctx->priv_data; faacEncConfigurationPtr faac_cfg; unsigned long samples_input, max_bytes_output; /* number of channels */ if (avctx->channels < 1 || avctx->channels > 6) { av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels); return -1; } s->faac_handle = faacEncOpen(avctx->sample_rate, avctx->channels, &samples_input, &max_bytes_output); /* check faac version */ faac_cfg = faacEncGetCurrentConfiguration(s->faac_handle); if (faac_cfg->version != FAAC_CFG_VERSION) { av_log(avctx, AV_LOG_ERROR, "wrong libfaac version (compiled for: %d, using %d)\n", FAAC_CFG_VERSION, faac_cfg->version); faacEncClose(s->faac_handle); return -1; } /* put the options in the configuration struct */ switch(avctx->profile) { case FF_PROFILE_AAC_MAIN: faac_cfg->aacObjectType = MAIN; break; case FF_PROFILE_UNKNOWN: case FF_PROFILE_AAC_LOW: faac_cfg->aacObjectType = LOW; break; case FF_PROFILE_AAC_SSR: faac_cfg->aacObjectType = SSR; break; case FF_PROFILE_AAC_LTP: faac_cfg->aacObjectType = LTP; break; default: av_log(avctx, AV_LOG_ERROR, "invalid AAC profile\n"); faacEncClose(s->faac_handle); return -1; } faac_cfg->mpegVersion = MPEG4; faac_cfg->useTns = 0; faac_cfg->allowMidside = 1; faac_cfg->bitRate = avctx->bit_rate / avctx->channels; faac_cfg->bandWidth = avctx->cutoff; if(avctx->flags & CODEC_FLAG_QSCALE) { faac_cfg->bitRate = 0; faac_cfg->quantqual = avctx->global_quality / FF_QP2LAMBDA; } faac_cfg->outputFormat = 1; faac_cfg->inputFormat = FAAC_INPUT_16BIT; avctx->frame_size = samples_input / avctx->channels; avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; /* Set decoder specific info */ avctx->extradata_size = 0; if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { unsigned char *buffer = NULL; unsigned long decoder_specific_info_size; if (!faacEncGetDecoderSpecificInfo(s->faac_handle, &buffer, &decoder_specific_info_size)) { avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE); avctx->extradata_size = decoder_specific_info_size; memcpy(avctx->extradata, buffer, avctx->extradata_size); faac_cfg->outputFormat = 0; } #undef free free(buffer); #define free please_use_av_free } if (!faacEncSetConfiguration(s->faac_handle, faac_cfg)) { av_log(avctx, AV_LOG_ERROR, "libfaac doesn't support this output format!\n"); return -1; } return 0; } static int Faac_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { FaacAudioContext *s = avctx->priv_data; int bytes_written; bytes_written = faacEncEncode(s->faac_handle, data, avctx->frame_size * avctx->channels, frame, buf_size); return bytes_written; } static av_cold int Faac_encode_close(AVCodecContext *avctx) { FaacAudioContext *s = avctx->priv_data; av_freep(&avctx->coded_frame); av_freep(&avctx->extradata); faacEncClose(s->faac_handle); return 0; } AVCodec libfaac_encoder = { "libfaac", AVMEDIA_TYPE_AUDIO, CODEC_ID_AAC, sizeof(FaacAudioContext), Faac_encode_init, Faac_encode_frame, Faac_encode_close, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libfaac AAC (Advanced Audio Codec)"), };
123linslouis-android-video-cutter
jni/libavcodec/libfaac.c
C
asf20
5,124
/* * ADX ADPCM codecs * Copyright (c) 2001,2003 BERO * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "adx.h" /** * @file * SEGA CRI adx codecs. * * Reference documents: * http://ku-www.ss.titech.ac.jp/~yatsushi/adx.html * adx2wav & wav2adx http://www.geocities.co.jp/Playtown/2004/ */ static av_cold int adx_decode_init(AVCodecContext *avctx) { avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } /* 18 bytes <-> 32 samples */ static void adx_decode(short *out,const unsigned char *in,PREV *prev) { int scale = AV_RB16(in); int i; int s0,s1,s2,d; // printf("%x ",scale); in+=2; s1 = prev->s1; s2 = prev->s2; for(i=0;i<16;i++) { d = in[i]; // d>>=4; if (d&8) d-=16; d = ((signed char)d >> 4); s0 = (BASEVOL*d*scale + SCALE1*s1 - SCALE2*s2)>>14; s2 = s1; s1 = av_clip_int16(s0); *out++=s1; d = in[i]; //d&=15; if (d&8) d-=16; d = ((signed char)(d<<4) >> 4); s0 = (BASEVOL*d*scale + SCALE1*s1 - SCALE2*s2)>>14; s2 = s1; s1 = av_clip_int16(s0); *out++=s1; } prev->s1 = s1; prev->s2 = s2; } static void adx_decode_stereo(short *out,const unsigned char *in,PREV *prev) { short tmp[32*2]; int i; adx_decode(tmp ,in ,prev); adx_decode(tmp+32,in+18,prev+1); for(i=0;i<32;i++) { out[i*2] = tmp[i]; out[i*2+1] = tmp[i+32]; } } /* return data offset or 0 */ static int adx_decode_header(AVCodecContext *avctx,const unsigned char *buf,size_t bufsize) { int offset; if (buf[0]!=0x80) return 0; offset = (AV_RB32(buf)^0x80000000)+4; if (bufsize<offset || memcmp(buf+offset-6,"(c)CRI",6)) return 0; avctx->channels = buf[7]; avctx->sample_rate = AV_RB32(buf+8); avctx->bit_rate = avctx->sample_rate*avctx->channels*18*8/32; return offset; } static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf0 = avpkt->data; int buf_size = avpkt->size; ADXContext *c = avctx->priv_data; short *samples = data; const uint8_t *buf = buf0; int rest = buf_size; if (!c->header_parsed) { int hdrsize = adx_decode_header(avctx,buf,rest); if (hdrsize==0) return -1; c->header_parsed = 1; buf += hdrsize; rest -= hdrsize; } /* 18 bytes of data are expanded into 32*2 bytes of audio, so guard against buffer overflows */ if(rest/18 > *data_size/64) rest = (*data_size/64) * 18; if (c->in_temp) { int copysize = 18*avctx->channels - c->in_temp; memcpy(c->dec_temp+c->in_temp,buf,copysize); rest -= copysize; buf += copysize; if (avctx->channels==1) { adx_decode(samples,c->dec_temp,c->prev); samples += 32; } else { adx_decode_stereo(samples,c->dec_temp,c->prev); samples += 32*2; } } // if (avctx->channels==1) { while(rest>=18) { adx_decode(samples,buf,c->prev); rest-=18; buf+=18; samples+=32; } } else { while(rest>=18*2) { adx_decode_stereo(samples,buf,c->prev); rest-=18*2; buf+=18*2; samples+=32*2; } } // c->in_temp = rest; if (rest) { memcpy(c->dec_temp,buf,rest); buf+=rest; } *data_size = (uint8_t*)samples - (uint8_t*)data; // printf("%d:%d ",buf-buf0,*data_size); fflush(stdout); return buf-buf0; } AVCodec adpcm_adx_decoder = { "adpcm_adx", AVMEDIA_TYPE_AUDIO, CODEC_ID_ADPCM_ADX, sizeof(ADXContext), adx_decode_init, NULL, NULL, adx_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"), };
123linslouis-android-video-cutter
jni/libavcodec/adxdec.c
C
asf20
4,663
/* * G.729 decoder * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include <inttypes.h> #include <limits.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include "avcodec.h" #include "libavutil/avutil.h" #include "get_bits.h" #include "g729.h" #include "lsp.h" #include "celp_math.h" #include "acelp_filters.h" #include "acelp_pitch_delay.h" #include "acelp_vectors.h" #include "g729data.h" /** * minimum quantized LSF value (3.2.4) * 0.005 in Q13 */ #define LSFQ_MIN 40 /** * maximum quantized LSF value (3.2.4) * 3.135 in Q13 */ #define LSFQ_MAX 25681 /** * minimum LSF distance (3.2.4) * 0.0391 in Q13 */ #define LSFQ_DIFF_MIN 321 /** * minimum gain pitch value (3.8, Equation 47) * 0.2 in (1.14) */ #define SHARP_MIN 3277 /** * maximum gain pitch value (3.8, Equation 47) * (EE) This does not comply with the specification. * Specification says about 0.8, which should be * 13107 in (1.14), but reference C code uses * 13017 (equals to 0.7945) instead of it. */ #define SHARP_MAX 13017 typedef struct { uint8_t ac_index_bits[2]; ///< adaptive codebook index for second subframe (size in bits) uint8_t parity_bit; ///< parity bit for pitch delay uint8_t gc_1st_index_bits; ///< gain codebook (first stage) index (size in bits) uint8_t gc_2nd_index_bits; ///< gain codebook (second stage) index (size in bits) uint8_t fc_signs_bits; ///< number of pulses in fixed-codebook vector uint8_t fc_indexes_bits; ///< size (in bits) of fixed-codebook index entry } G729FormatDescription; typedef struct { int pitch_delay_int_prev; ///< integer part of previous subframe's pitch delay (4.1.3) /// (2.13) LSP quantizer outputs int16_t past_quantizer_output_buf[MA_NP + 1][10]; int16_t* past_quantizer_outputs[MA_NP + 1]; int16_t lsfq[10]; ///< (2.13) quantized LSF coefficients from previous frame int16_t lsp_buf[2][10]; ///< (0.15) LSP coefficients (previous and current frames) (3.2.5) int16_t *lsp[2]; ///< pointers to lsp_buf } G729Context; static const G729FormatDescription format_g729_8k = { .ac_index_bits = {8,5}, .parity_bit = 1, .gc_1st_index_bits = GC_1ST_IDX_BITS_8K, .gc_2nd_index_bits = GC_2ND_IDX_BITS_8K, .fc_signs_bits = 4, .fc_indexes_bits = 13, }; static const G729FormatDescription format_g729d_6k4 = { .ac_index_bits = {8,4}, .parity_bit = 0, .gc_1st_index_bits = GC_1ST_IDX_BITS_6K4, .gc_2nd_index_bits = GC_2ND_IDX_BITS_6K4, .fc_signs_bits = 2, .fc_indexes_bits = 9, }; /** * \brief pseudo random number generator */ static inline uint16_t g729_prng(uint16_t value) { return 31821 * value + 13849; } /** * Get parity bit of bit 2..7 */ static inline int get_parity(uint8_t value) { return (0x6996966996696996ULL >> (value >> 2)) & 1; } static void lsf_decode(int16_t* lsfq, int16_t* past_quantizer_outputs[MA_NP + 1], int16_t ma_predictor, int16_t vq_1st, int16_t vq_2nd_low, int16_t vq_2nd_high) { int i,j; static const uint8_t min_distance[2]={10, 5}; //(2.13) int16_t* quantizer_output = past_quantizer_outputs[MA_NP]; for (i = 0; i < 5; i++) { quantizer_output[i] = cb_lsp_1st[vq_1st][i ] + cb_lsp_2nd[vq_2nd_low ][i ]; quantizer_output[i + 5] = cb_lsp_1st[vq_1st][i + 5] + cb_lsp_2nd[vq_2nd_high][i + 5]; } for (j = 0; j < 2; j++) { for (i = 1; i < 10; i++) { int diff = (quantizer_output[i - 1] - quantizer_output[i] + min_distance[j]) >> 1; if (diff > 0) { quantizer_output[i - 1] -= diff; quantizer_output[i ] += diff; } } } for (i = 0; i < 10; i++) { int sum = quantizer_output[i] * cb_ma_predictor_sum[ma_predictor][i]; for (j = 0; j < MA_NP; j++) sum += past_quantizer_outputs[j][i] * cb_ma_predictor[ma_predictor][j][i]; lsfq[i] = sum >> 15; } /* Rotate past_quantizer_outputs. */ memmove(past_quantizer_outputs + 1, past_quantizer_outputs, MA_NP * sizeof(int16_t*)); past_quantizer_outputs[0] = quantizer_output; ff_acelp_reorder_lsf(lsfq, LSFQ_DIFF_MIN, LSFQ_MIN, LSFQ_MAX, 10); } static av_cold int decoder_init(AVCodecContext * avctx) { G729Context* ctx = avctx->priv_data; int i,k; if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, "Only mono sound is supported (requested channels: %d).\n", avctx->channels); return AVERROR(EINVAL); } /* Both 8kbit/s and 6.4kbit/s modes uses two subframes per frame. */ avctx->frame_size = SUBFRAME_SIZE << 1; for (k = 0; k < MA_NP + 1; k++) { ctx->past_quantizer_outputs[k] = ctx->past_quantizer_output_buf[k]; for (i = 1; i < 11; i++) ctx->past_quantizer_outputs[k][i - 1] = (18717 * i) >> 3; } ctx->lsp[0] = ctx->lsp_buf[0]; ctx->lsp[1] = ctx->lsp_buf[1]; memcpy(ctx->lsp[0], lsp_init, 10 * sizeof(int16_t)); return 0; } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int16_t *out_frame = data; GetBitContext gb; G729FormatDescription format; int frame_erasure = 0; ///< frame erasure detected during decoding int bad_pitch = 0; ///< parity check failed int i; G729Context *ctx = avctx->priv_data; int16_t lp[2][11]; // (3.12) uint8_t ma_predictor; ///< switched MA predictor of LSP quantizer uint8_t quantizer_1st; ///< first stage vector of quantizer uint8_t quantizer_2nd_lo; ///< second stage lower vector of quantizer (size in bits) uint8_t quantizer_2nd_hi; ///< second stage higher vector of quantizer (size in bits) int pitch_delay_int; // pitch delay, integer part int pitch_delay_3x; // pitch delay, multiplied by 3 if (*data_size < SUBFRAME_SIZE << 2) { av_log(avctx, AV_LOG_ERROR, "Error processing packet: output buffer too small\n"); return AVERROR(EIO); } if (buf_size == 10) { format = format_g729_8k; av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729 @ 8kbit/s"); } else if (buf_size == 8) { format = format_g729d_6k4; av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729D @ 6.4kbit/s"); } else { av_log(avctx, AV_LOG_ERROR, "Packet size %d is unknown.\n", buf_size); return AVERROR_INVALIDDATA; } for (i=0; i < buf_size; i++) frame_erasure |= buf[i]; frame_erasure = !frame_erasure; init_get_bits(&gb, buf, buf_size); ma_predictor = get_bits(&gb, 1); quantizer_1st = get_bits(&gb, VQ_1ST_BITS); quantizer_2nd_lo = get_bits(&gb, VQ_2ND_BITS); quantizer_2nd_hi = get_bits(&gb, VQ_2ND_BITS); lsf_decode(ctx->lsfq, ctx->past_quantizer_outputs, ma_predictor, quantizer_1st, quantizer_2nd_lo, quantizer_2nd_hi); ff_acelp_lsf2lsp(ctx->lsp[1], ctx->lsfq, 10); ff_acelp_lp_decode(&lp[0][0], &lp[1][0], ctx->lsp[1], ctx->lsp[0], 10); FFSWAP(int16_t*, ctx->lsp[1], ctx->lsp[0]); for (i = 0; i < 2; i++) { uint8_t ac_index; ///< adaptive codebook index uint8_t pulses_signs; ///< fixed-codebook vector pulse signs int fc_indexes; ///< fixed-codebook indexes uint8_t gc_1st_index; ///< gain codebook (first stage) index uint8_t gc_2nd_index; ///< gain codebook (second stage) index ac_index = get_bits(&gb, format.ac_index_bits[i]); if(!i && format.parity_bit) bad_pitch = get_parity(ac_index) == get_bits1(&gb); fc_indexes = get_bits(&gb, format.fc_indexes_bits); pulses_signs = get_bits(&gb, format.fc_signs_bits); gc_1st_index = get_bits(&gb, format.gc_1st_index_bits); gc_2nd_index = get_bits(&gb, format.gc_2nd_index_bits); if(!i) { if (bad_pitch) pitch_delay_3x = 3 * ctx->pitch_delay_int_prev; else pitch_delay_3x = ff_acelp_decode_8bit_to_1st_delay3(ac_index); } else { int pitch_delay_min = av_clip(ctx->pitch_delay_int_prev - 5, PITCH_DELAY_MIN, PITCH_DELAY_MAX - 9); if(packet_type == FORMAT_G729D_6K4) pitch_delay_3x = ff_acelp_decode_4bit_to_2nd_delay3(ac_index, pitch_delay_min); else pitch_delay_3x = ff_acelp_decode_5_6_bit_to_2nd_delay3(ac_index, pitch_delay_min); } /* Round pitch delay to nearest (used everywhere except ff_acelp_interpolate). */ pitch_delay_int = (pitch_delay_3x + 1) / 3; ff_acelp_weighted_vector_sum(fc + pitch_delay_int, fc + pitch_delay_int, fc, 1 << 14, av_clip(ctx->gain_pitch, SHARP_MIN, SHARP_MAX), 0, 14, SUBFRAME_SIZE - pitch_delay_int); if (frame_erasure) { ctx->gain_pitch = (29491 * ctx->gain_pitch) >> 15; // 0.90 (0.15) ctx->gain_code = ( 2007 * ctx->gain_code ) >> 11; // 0.98 (0.11) gain_corr_factor = 0; } else { ctx->gain_pitch = cb_gain_1st_8k[gc_1st_index][0] + cb_gain_2nd_8k[gc_2nd_index][0]; gain_corr_factor = cb_gain_1st_8k[gc_1st_index][1] + cb_gain_2nd_8k[gc_2nd_index][1]; ff_acelp_weighted_vector_sum(ctx->exc + i * SUBFRAME_SIZE, ctx->exc + i * SUBFRAME_SIZE, fc, (!voicing && frame_erasure) ? 0 : ctx->gain_pitch, ( voicing && frame_erasure) ? 0 : ctx->gain_code, 1 << 13, 14, SUBFRAME_SIZE); ctx->pitch_delay_int_prev = pitch_delay_int; } *data_size = SUBFRAME_SIZE << 2; return buf_size; } AVCodec g729_decoder = { "g729", AVMEDIA_TYPE_AUDIO, CODEC_ID_G729, sizeof(G729Context), decoder_init, NULL, NULL, decode_frame, .long_name = NULL_IF_CONFIG_SMALL("G.729"), };
123linslouis-android-video-cutter
jni/libavcodec/g729dec.c
C
asf20
11,337
/** * @file * VP6 compatible video decoder * * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_VP6DATA_H #define AVCODEC_VP6DATA_H #include "vp56data.h" static const uint8_t vp6_def_fdv_vector_model[2][8] = { { 247, 210, 135, 68, 138, 220, 239, 246 }, { 244, 184, 201, 44, 173, 221, 239, 253 }, }; static const uint8_t vp6_def_pdv_vector_model[2][7] = { { 225, 146, 172, 147, 214, 39, 156 }, { 204, 170, 119, 235, 140, 230, 228 }, }; static const uint8_t vp6_def_coeff_reorder[] = { 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 6, 6, 7, 7, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, }; static const uint8_t vp6_def_runv_coeff_model[2][14] = { { 198, 197, 196, 146, 198, 204, 169, 142, 130, 136, 149, 149, 191, 249 }, { 135, 201, 181, 154, 98, 117, 132, 126, 146, 169, 184, 240, 246, 254 }, }; static const uint8_t vp6_sig_dct_pct[2][2] = { { 237, 246 }, { 231, 243 }, }; static const uint8_t vp6_pdv_pct[2][7] = { { 253, 253, 254, 254, 254, 254, 254 }, { 245, 253, 254, 254, 254, 254, 254 }, }; static const uint8_t vp6_fdv_pct[2][8] = { { 254, 254, 254, 254, 254, 250, 250, 252 }, { 254, 254, 254, 254, 254, 251, 251, 254 }, }; static const uint8_t vp6_dccv_pct[2][11] = { { 146, 255, 181, 207, 232, 243, 238, 251, 244, 250, 249 }, { 179, 255, 214, 240, 250, 255, 244, 255, 255, 255, 255 }, }; static const uint8_t vp6_coeff_reorder_pct[] = { 255, 132, 132, 159, 153, 151, 161, 170, 164, 162, 136, 110, 103, 114, 129, 118, 124, 125, 132, 136, 114, 110, 142, 135, 134, 123, 143, 126, 153, 183, 166, 161, 171, 180, 179, 164, 203, 218, 225, 217, 215, 206, 203, 217, 229, 241, 248, 243, 253, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }; static const uint8_t vp6_runv_pct[2][14] = { { 219, 246, 238, 249, 232, 239, 249, 255, 248, 253, 239, 244, 241, 248 }, { 198, 232, 251, 253, 219, 241, 253, 255, 248, 249, 244, 238, 251, 255 }, }; static const uint8_t vp6_ract_pct[3][2][6][11] = { { { { 227, 246, 230, 247, 244, 255, 255, 255, 255, 255, 255 }, { 255, 255, 209, 231, 231, 249, 249, 253, 255, 255, 255 }, { 255, 255, 225, 242, 241, 251, 253, 255, 255, 255, 255 }, { 255, 255, 241, 253, 252, 255, 255, 255, 255, 255, 255 }, { 255, 255, 248, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } }, { { 240, 255, 248, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 240, 253, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } }, { { { 206, 203, 227, 239, 247, 255, 253, 255, 255, 255, 255 }, { 207, 199, 220, 236, 243, 252, 252, 255, 255, 255, 255 }, { 212, 219, 230, 243, 244, 253, 252, 255, 255, 255, 255 }, { 236, 237, 247, 252, 253, 255, 255, 255, 255, 255, 255 }, { 240, 240, 248, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } }, { { 230, 233, 249, 255, 255, 255, 255, 255, 255, 255, 255 }, { 238, 238, 250, 255, 255, 255, 255, 255, 255, 255, 255 }, { 248, 251, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } }, { { { 225, 239, 227, 231, 244, 253, 243, 255, 255, 253, 255 }, { 232, 234, 224, 228, 242, 249, 242, 252, 251, 251, 255 }, { 235, 249, 238, 240, 251, 255, 249, 255, 253, 253, 255 }, { 249, 253, 251, 250, 255, 255, 255, 255, 255, 255, 255 }, { 251, 250, 249, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } }, { { 243, 244, 250, 250, 255, 255, 255, 255, 255, 255, 255 }, { 249, 248, 250, 253, 255, 255, 255, 255, 255, 255, 255 }, { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } } }; static const int vp6_dccv_lc[3][5][2] = { { { 122, 133 }, { 0, 1 }, { 78, 171 }, { 139, 117 }, { 168, 79 } }, { { 133, 51 }, { 0, 1 }, { 169, 71 }, { 214, 44 }, { 210, 38 } }, { { 142, -16 }, { 0, 1 }, { 221, -30 }, { 246, -3 }, { 203, 17 } }, }; static const uint8_t vp6_coeff_groups[] = { 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, }; static const int16_t vp6_block_copy_filter[17][8][4] = { { { 0, 128, 0, 0 }, /* 0 */ { -3, 122, 9, 0 }, { -4, 109, 24, -1 }, { -5, 91, 45, -3 }, { -4, 68, 68, -4 }, { -3, 45, 91, -5 }, { -1, 24, 109, -4 }, { 0, 9, 122, -3 } }, { { 0, 128, 0, 0 }, /* 1 */ { -4, 124, 9, -1 }, { -5, 110, 25, -2 }, { -6, 91, 46, -3 }, { -5, 69, 69, -5 }, { -3, 46, 91, -6 }, { -2, 25, 110, -5 }, { -1, 9, 124, -4 } }, { { 0, 128, 0, 0 }, /* 2 */ { -4, 123, 10, -1 }, { -6, 110, 26, -2 }, { -7, 92, 47, -4 }, { -6, 70, 70, -6 }, { -4, 47, 92, -7 }, { -2, 26, 110, -6 }, { -1, 10, 123, -4 } }, { { 0, 128, 0, 0 }, /* 3 */ { -5, 124, 10, -1 }, { -7, 110, 27, -2 }, { -7, 91, 48, -4 }, { -6, 70, 70, -6 }, { -4, 48, 92, -8 }, { -2, 27, 110, -7 }, { -1, 10, 124, -5 } }, { { 0, 128, 0, 0 }, /* 4 */ { -6, 124, 11, -1 }, { -8, 111, 28, -3 }, { -8, 92, 49, -5 }, { -7, 71, 71, -7 }, { -5, 49, 92, -8 }, { -3, 28, 111, -8 }, { -1, 11, 124, -6 } }, { { 0, 128, 0, 0 }, /* 5 */ { -6, 123, 12, -1 }, { -9, 111, 29, -3 }, { -9, 93, 50, -6 }, { -8, 72, 72, -8 }, { -6, 50, 93, -9 }, { -3, 29, 111, -9 }, { -1, 12, 123, -6 } }, { { 0, 128, 0, 0 }, /* 6 */ { -7, 124, 12, -1 }, { -10, 111, 30, -3 }, { -10, 93, 51, -6 }, { -9, 73, 73, -9 }, { -6, 51, 93, -10 }, { -3, 30, 111, -10 }, { -1, 12, 124, -7 } }, { { 0, 128, 0, 0 }, /* 7 */ { -7, 123, 13, -1 }, { -11, 112, 31, -4 }, { -11, 94, 52, -7 }, { -10, 74, 74, -10 }, { -7, 52, 94, -11 }, { -4, 31, 112, -11 }, { -1, 13, 123, -7 } }, { { 0, 128, 0, 0 }, /* 8 */ { -8, 124, 13, -1 }, { -12, 112, 32, -4 }, { -12, 94, 53, -7 }, { -10, 74, 74, -10 }, { -7, 53, 94, -12 }, { -4, 32, 112, -12 }, { -1, 13, 124, -8 } }, { { 0, 128, 0, 0 }, /* 9 */ { -9, 124, 14, -1 }, { -13, 112, 33, -4 }, { -13, 95, 54, -8 }, { -11, 75, 75, -11 }, { -8, 54, 95, -13 }, { -4, 33, 112, -13 }, { -1, 14, 124, -9 } }, { { 0, 128, 0, 0 }, /* 10 */ { -9, 123, 15, -1 }, { -14, 113, 34, -5 }, { -14, 95, 55, -8 }, { -12, 76, 76, -12 }, { -8, 55, 95, -14 }, { -5, 34, 112, -13 }, { -1, 15, 123, -9 } }, { { 0, 128, 0, 0 }, /* 11 */ { -10, 124, 15, -1 }, { -14, 113, 34, -5 }, { -15, 96, 56, -9 }, { -13, 77, 77, -13 }, { -9, 56, 96, -15 }, { -5, 34, 113, -14 }, { -1, 15, 124, -10 } }, { { 0, 128, 0, 0 }, /* 12 */ { -10, 123, 16, -1 }, { -15, 113, 35, -5 }, { -16, 98, 56, -10 }, { -14, 78, 78, -14 }, { -10, 56, 98, -16 }, { -5, 35, 113, -15 }, { -1, 16, 123, -10 } }, { { 0, 128, 0, 0 }, /* 13 */ { -11, 124, 17, -2 }, { -16, 113, 36, -5 }, { -17, 98, 57, -10 }, { -14, 78, 78, -14 }, { -10, 57, 98, -17 }, { -5, 36, 113, -16 }, { -2, 17, 124, -11 } }, { { 0, 128, 0, 0 }, /* 14 */ { -12, 125, 17, -2 }, { -17, 114, 37, -6 }, { -18, 99, 58, -11 }, { -15, 79, 79, -15 }, { -11, 58, 99, -18 }, { -6, 37, 114, -17 }, { -2, 17, 125, -12 } }, { { 0, 128, 0, 0 }, /* 15 */ { -12, 124, 18, -2 }, { -18, 114, 38, -6 }, { -19, 99, 59, -11 }, { -16, 80, 80, -16 }, { -11, 59, 99, -19 }, { -6, 38, 114, -18 }, { -2, 18, 124, -12 } }, { { 0, 128, 0, 0 }, /* 16 */ { -4, 118, 16, -2 }, { -7, 106, 34, -5 }, { -8, 90, 53, -7 }, { -8, 72, 72, -8 }, { -7, 53, 90, -8 }, { -5, 34, 106, -7 }, { -2, 16, 118, -4 } }, }; static const VP56Tree vp6_pcr_tree[] = { { 8, 0}, { 4, 1}, { 2, 2}, {-1}, {-2}, { 2, 3}, {-3}, {-4}, { 8, 4}, { 4, 5}, { 2, 6}, {-5}, {-6}, { 2, 7}, {-7}, {-8}, {-0}, }; static const uint8_t vp6_coord_div[] = { 4, 4, 4, 4, 8, 8 }; static const uint8_t vp6_huff_coeff_map[] = { 13, 14, 11, 0, 1, 15, 16, 18, 2, 17, 3, 4, 19, 20, 5, 6, 21, 22, 7, 8, 9, 10 }; static const uint8_t vp6_huff_run_map[] = { 10, 13, 11, 12, 0, 1, 2, 3, 14, 8, 15, 16, 4, 5, 6, 7 }; #endif /* AVCODEC_VP6DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/vp6data.h
C
asf20
10,636
/* * SIPR decoder for the 16k mode * * Copyright (c) 2008 Vladimir Voroshilov * Copyright (c) 2009 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <math.h> #include "sipr.h" #include "libavutil/mathematics.h" #include "lsp.h" #include "celp_math.h" #include "acelp_vectors.h" #include "acelp_pitch_delay.h" #include "acelp_filters.h" #include "celp_filters.h" #include "sipr16kdata.h" /** * Convert an lsf vector into an lsp vector. * * @param lsf input lsf vector * @param lsp output lsp vector */ static void lsf2lsp(const float *lsf, double *lsp) { int i; for (i = 0; i < LP_FILTER_ORDER_16k; i++) lsp[i] = cosf(lsf[i]); } static void dequant(float *out, const int *idx, const float *cbs[]) { int i; for (i = 0; i < 4; i++) memcpy(out + 3*i, cbs[i] + 3*idx[i], 3*sizeof(float)); memcpy(out + 12, cbs[4] + 4*idx[4], 4*sizeof(float)); } static void lsf_decode_fp_16k(float* lsf_history, float* isp_new, const int* parm, int ma_pred) { int i; float isp_q[LP_FILTER_ORDER_16k]; dequant(isp_q, parm, lsf_codebooks_16k); for (i = 0; i < LP_FILTER_ORDER_16k; i++) { isp_new[i] = (1 - qu[ma_pred]) * isp_q[i] + qu[ma_pred] * lsf_history[i] + mean_lsf_16k[i]; } memcpy(lsf_history, isp_q, LP_FILTER_ORDER_16k * sizeof(float)); } static int dec_delay3_1st(int index) { if (index < 390) { return index + 88; } else return 3 * index - 690; } static int dec_delay3_2nd(int index, int pit_min, int pit_max, int pitch_lag_prev) { if (index < 62) { int pitch_delay_min = av_clip(pitch_lag_prev - 10, pit_min, pit_max - 19); return 3 * pitch_delay_min + index - 2; } else return 3 * pitch_lag_prev; } static void postfilter(float *out_data, float* synth, float* iir_mem, float* filt_mem[2], float* mem_preemph) { float buf[30 + LP_FILTER_ORDER_16k]; float *tmpbuf = buf + LP_FILTER_ORDER_16k; float s; int i; for (i = 0; i < LP_FILTER_ORDER_16k; i++) filt_mem[0][i] = iir_mem[i] * ff_pow_0_5[i]; memcpy(tmpbuf - LP_FILTER_ORDER_16k, mem_preemph, LP_FILTER_ORDER_16k*sizeof(*buf)); ff_celp_lp_synthesis_filterf(tmpbuf, filt_mem[1], synth, 30, LP_FILTER_ORDER_16k); memcpy(synth - LP_FILTER_ORDER_16k, mem_preemph, LP_FILTER_ORDER_16k * sizeof(*synth)); ff_celp_lp_synthesis_filterf(synth, filt_mem[0], synth, 30, LP_FILTER_ORDER_16k); memcpy(out_data + 30 - LP_FILTER_ORDER_16k, synth + 30 - LP_FILTER_ORDER_16k, LP_FILTER_ORDER_16k * sizeof(*synth)); ff_celp_lp_synthesis_filterf(out_data + 30, filt_mem[0], synth + 30, 2 * L_SUBFR_16k - 30, LP_FILTER_ORDER_16k); memcpy(mem_preemph, out_data + 2*L_SUBFR_16k - LP_FILTER_ORDER_16k, LP_FILTER_ORDER_16k * sizeof(*synth)); FFSWAP(float *, filt_mem[0], filt_mem[1]); for (i = 0, s = 0; i < 30; i++, s += 1.0/30) out_data[i] = tmpbuf[i] + s * (synth[i] - tmpbuf[i]); } /** * Floating point version of ff_acelp_lp_decode(). */ static void acelp_lp_decodef(float *lp_1st, float *lp_2nd, const double *lsp_2nd, const double *lsp_prev) { double lsp_1st[LP_FILTER_ORDER_16k]; int i; /* LSP values for first subframe (3.2.5 of G.729, Equation 24) */ for (i = 0; i < LP_FILTER_ORDER_16k; i++) lsp_1st[i] = (lsp_2nd[i] + lsp_prev[i]) * 0.5; ff_acelp_lspd2lpc(lsp_1st, lp_1st, LP_FILTER_ORDER_16k >> 1); /* LSP values for second subframe (3.2.5 of G.729) */ ff_acelp_lspd2lpc(lsp_2nd, lp_2nd, LP_FILTER_ORDER_16k >> 1); } /** * Floating point version of ff_acelp_decode_gain_code(). */ static float acelp_decode_gain_codef(float gain_corr_factor, const float *fc_v, float mr_energy, const float *quant_energy, const float *ma_prediction_coeff, int subframe_size, int ma_pred_order) { mr_energy += ff_dot_productf(quant_energy, ma_prediction_coeff, ma_pred_order); mr_energy = gain_corr_factor * exp(M_LN10 / 20. * mr_energy) / sqrt((0.01 + ff_dot_productf(fc_v, fc_v, subframe_size))); return mr_energy; } #define DIVIDE_BY_3(x) ((x) * 10923 >> 15) void ff_sipr_decode_frame_16k(SiprContext *ctx, SiprParameters *params, float *out_data) { int frame_size = SUBFRAME_COUNT_16k * L_SUBFR_16k; float *synth = ctx->synth_buf + LP_FILTER_ORDER_16k; float lsf_new[LP_FILTER_ORDER_16k]; double lsp_new[LP_FILTER_ORDER_16k]; float Az[2][LP_FILTER_ORDER_16k]; float fixed_vector[L_SUBFR_16k]; float pitch_fac, gain_code; int i; int pitch_delay_3x; float *excitation = ctx->excitation + 292; lsf_decode_fp_16k(ctx->lsf_history, lsf_new, params->vq_indexes, params->ma_pred_switch); ff_set_min_dist_lsf(lsf_new, LSFQ_DIFF_MIN / 2, LP_FILTER_ORDER_16k); lsf2lsp(lsf_new, lsp_new); acelp_lp_decodef(Az[0], Az[1], lsp_new, ctx->lsp_history_16k); memcpy(ctx->lsp_history_16k, lsp_new, LP_FILTER_ORDER_16k * sizeof(double)); memcpy(synth - LP_FILTER_ORDER_16k, ctx->synth, LP_FILTER_ORDER_16k * sizeof(*synth)); for (i = 0; i < SUBFRAME_COUNT_16k; i++) { int i_subfr = i * L_SUBFR_16k; AMRFixed f; float gain_corr_factor; int pitch_delay_int; int pitch_delay_frac; if (!i) { pitch_delay_3x = dec_delay3_1st(params->pitch_delay[i]); } else pitch_delay_3x = dec_delay3_2nd(params->pitch_delay[i], PITCH_MIN, PITCH_MAX, ctx->pitch_lag_prev); pitch_fac = gain_pitch_cb_16k[params->gp_index[i]]; f.pitch_fac = FFMIN(pitch_fac, 1.0); f.pitch_lag = DIVIDE_BY_3(pitch_delay_3x+1); ctx->pitch_lag_prev = f.pitch_lag; pitch_delay_int = DIVIDE_BY_3(pitch_delay_3x + 2); pitch_delay_frac = pitch_delay_3x + 2 - 3*pitch_delay_int; ff_acelp_interpolatef(&excitation[i_subfr], &excitation[i_subfr] - pitch_delay_int + 1, sinc_win, 3, pitch_delay_frac + 1, LP_FILTER_ORDER, L_SUBFR_16k); memset(fixed_vector, 0, sizeof(fixed_vector)); ff_decode_10_pulses_35bits(params->fc_indexes[i], &f, ff_fc_4pulses_8bits_tracks_13, 5, 4); ff_set_fixed_vector(fixed_vector, &f, 1.0, L_SUBFR_16k); gain_corr_factor = gain_cb_16k[params->gc_index[i]]; gain_code = gain_corr_factor * acelp_decode_gain_codef(sqrt(L_SUBFR_16k), fixed_vector, 19.0 - 15.0/(0.05*M_LN10/M_LN2), pred_16k, ctx->energy_history, L_SUBFR_16k, 2); ctx->energy_history[1] = ctx->energy_history[0]; ctx->energy_history[0] = 20.0 * log10f(gain_corr_factor); ff_weighted_vector_sumf(&excitation[i_subfr], &excitation[i_subfr], fixed_vector, pitch_fac, gain_code, L_SUBFR_16k); ff_celp_lp_synthesis_filterf(synth + i_subfr, Az[i], &excitation[i_subfr], L_SUBFR_16k, LP_FILTER_ORDER_16k); } memcpy(ctx->synth, synth + frame_size - LP_FILTER_ORDER_16k, LP_FILTER_ORDER_16k * sizeof(*synth)); memmove(ctx->excitation, ctx->excitation + 2 * L_SUBFR_16k, (L_INTERPOL+PITCH_MAX) * sizeof(float)); postfilter(out_data, synth, ctx->iir_mem, ctx->filt_mem, ctx->mem_preemph); memcpy(ctx->iir_mem, Az[1], LP_FILTER_ORDER_16k * sizeof(float)); } void ff_sipr_init_16k(SiprContext *ctx) { int i; for (i = 0; i < LP_FILTER_ORDER_16k; i++) ctx->lsp_history_16k[i] = cos((i + 1) * M_PI/(LP_FILTER_ORDER_16k + 1)); ctx->filt_mem[0] = ctx->filt_buf[0]; ctx->filt_mem[1] = ctx->filt_buf[1]; ctx->pitch_lag_prev = 180; }
123linslouis-android-video-cutter
jni/libavcodec/sipr16k.c
C
asf20
9,220
/* * PC Paintbrush PCX (.pcx) image decoder * Copyright (c) 2007, 2008 Ivo van Poorten * * This decoder does not support CGA palettes. I am unable to find samples * and Netpbm cannot generate them. * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "bytestream.h" #include "get_bits.h" typedef struct PCXContext { AVFrame picture; } PCXContext; static av_cold int pcx_init(AVCodecContext *avctx) { PCXContext *s = avctx->priv_data; avcodec_get_frame_defaults(&s->picture); avctx->coded_frame= &s->picture; return 0; } /** * @return advanced src pointer */ static const uint8_t *pcx_rle_decode(const uint8_t *src, uint8_t *dst, unsigned int bytes_per_scanline, int compressed) { unsigned int i = 0; unsigned char run, value; if (compressed) { while (i<bytes_per_scanline) { run = 1; value = *src++; if (value >= 0xc0) { run = value & 0x3f; value = *src++; } while (i<bytes_per_scanline && run--) dst[i++] = value; } } else { memcpy(dst, src, bytes_per_scanline); src += bytes_per_scanline; } return src; } static void pcx_palette(const uint8_t **src, uint32_t *dst, unsigned int pallen) { unsigned int i; for (i=0; i<pallen; i++) *dst++ = bytestream_get_be24(src); if (pallen < 256) memset(dst, 0, (256 - pallen) * sizeof(*dst)); } static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; PCXContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; int compressed, xmin, ymin, xmax, ymax; unsigned int w, h, bits_per_pixel, bytes_per_line, nplanes, stride, y, x, bytes_per_scanline; uint8_t *ptr; uint8_t const *bufstart = buf; if (buf[0] != 0x0a || buf[1] > 5) { av_log(avctx, AV_LOG_ERROR, "this is not PCX encoded data\n"); return -1; } compressed = buf[2]; xmin = AV_RL16(buf+ 4); ymin = AV_RL16(buf+ 6); xmax = AV_RL16(buf+ 8); ymax = AV_RL16(buf+10); if (xmax < xmin || ymax < ymin) { av_log(avctx, AV_LOG_ERROR, "invalid image dimensions\n"); return -1; } w = xmax - xmin + 1; h = ymax - ymin + 1; bits_per_pixel = buf[3]; bytes_per_line = AV_RL16(buf+66); nplanes = buf[65]; bytes_per_scanline = nplanes * bytes_per_line; if (bytes_per_scanline < w * bits_per_pixel * nplanes / 8) { av_log(avctx, AV_LOG_ERROR, "PCX data is corrupted\n"); return -1; } switch ((nplanes<<8) + bits_per_pixel) { case 0x0308: avctx->pix_fmt = PIX_FMT_RGB24; break; case 0x0108: case 0x0104: case 0x0102: case 0x0101: case 0x0401: case 0x0301: case 0x0201: avctx->pix_fmt = PIX_FMT_PAL8; break; default: av_log(avctx, AV_LOG_ERROR, "invalid PCX file\n"); return -1; } buf += 128; if (p->data[0]) avctx->release_buffer(avctx, p); if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = FF_I_TYPE; ptr = p->data[0]; stride = p->linesize[0]; if (nplanes == 3 && bits_per_pixel == 8) { uint8_t scanline[bytes_per_scanline]; for (y=0; y<h; y++) { buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed); for (x=0; x<w; x++) { ptr[3*x ] = scanline[x ]; ptr[3*x+1] = scanline[x+ bytes_per_line ]; ptr[3*x+2] = scanline[x+(bytes_per_line<<1)]; } ptr += stride; } } else if (nplanes == 1 && bits_per_pixel == 8) { uint8_t scanline[bytes_per_scanline]; const uint8_t *palstart = bufstart + buf_size - 769; for (y=0; y<h; y++, ptr+=stride) { buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed); memcpy(ptr, scanline, w); } if (buf != palstart) { av_log(avctx, AV_LOG_WARNING, "image data possibly corrupted\n"); buf = palstart; } if (*buf++ != 12) { av_log(avctx, AV_LOG_ERROR, "expected palette after image data\n"); return -1; } } else if (nplanes == 1) { /* all packed formats, max. 16 colors */ uint8_t scanline[bytes_per_scanline]; GetBitContext s; for (y=0; y<h; y++) { init_get_bits(&s, scanline, bytes_per_scanline<<3); buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed); for (x=0; x<w; x++) ptr[x] = get_bits(&s, bits_per_pixel); ptr += stride; } } else { /* planar, 4, 8 or 16 colors */ uint8_t scanline[bytes_per_scanline]; int i; for (y=0; y<h; y++) { buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed); for (x=0; x<w; x++) { int m = 0x80 >> (x&7), v = 0; for (i=nplanes - 1; i>=0; i--) { v <<= 1; v += !!(scanline[i*bytes_per_line + (x>>3)] & m); } ptr[x] = v; } ptr += stride; } } if (nplanes == 1 && bits_per_pixel == 8) { pcx_palette(&buf, (uint32_t *) p->data[1], 256); } else if (bits_per_pixel < 8) { const uint8_t *palette = bufstart+16; pcx_palette(&palette, (uint32_t *) p->data[1], 16); } *picture = s->picture; *data_size = sizeof(AVFrame); return buf - bufstart; } static av_cold int pcx_end(AVCodecContext *avctx) { PCXContext *s = avctx->priv_data; if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); return 0; } AVCodec pcx_decoder = { "pcx", AVMEDIA_TYPE_VIDEO, CODEC_ID_PCX, sizeof(PCXContext), pcx_init, NULL, pcx_end, pcx_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"), };
123linslouis-android-video-cutter
jni/libavcodec/pcx.c
C
asf20
7,325
/* * Copyright (C) 2004 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Standard C DSP-oriented functions cribbed from the original VP3 * source code. */ #include "avcodec.h" #include "dsputil.h" #define IdctAdjustBeforeShift 8 #define xC1S7 64277 #define xC2S6 60547 #define xC3S5 54491 #define xC4S4 46341 #define xC5S3 36410 #define xC6S2 25080 #define xC7S1 12785 #define M(a,b) (((a) * (b))>>16) static av_always_inline void idct(uint8_t *dst, int stride, int16_t *input, int type) { int16_t *ip = input; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H; int Ed, Gd, Add, Bdd, Fd, Hd; int i; /* Inverse DCT on the rows now */ for (i = 0; i < 8; i++) { /* Check for non-zero values */ if ( ip[0] | ip[1] | ip[2] | ip[3] | ip[4] | ip[5] | ip[6] | ip[7] ) { A = M(xC1S7, ip[1]) + M(xC7S1, ip[7]); B = M(xC7S1, ip[1]) - M(xC1S7, ip[7]); C = M(xC3S5, ip[3]) + M(xC5S3, ip[5]); D = M(xC3S5, ip[5]) - M(xC5S3, ip[3]); Ad = M(xC4S4, (A - C)); Bd = M(xC4S4, (B - D)); Cd = A + C; Dd = B + D; E = M(xC4S4, (ip[0] + ip[4])); F = M(xC4S4, (ip[0] - ip[4])); G = M(xC2S6, ip[2]) + M(xC6S2, ip[6]); H = M(xC6S2, ip[2]) - M(xC2S6, ip[6]); Ed = E - G; Gd = E + G; Add = F + Ad; Bdd = Bd - H; Fd = F - Ad; Hd = Bd + H; /* Final sequence of operations over-write original inputs. */ ip[0] = Gd + Cd ; ip[7] = Gd - Cd ; ip[1] = Add + Hd; ip[2] = Add - Hd; ip[3] = Ed + Dd ; ip[4] = Ed - Dd ; ip[5] = Fd + Bdd; ip[6] = Fd - Bdd; } ip += 8; /* next row */ } ip = input; for ( i = 0; i < 8; i++) { /* Check for non-zero values (bitwise or faster than ||) */ if ( ip[1 * 8] | ip[2 * 8] | ip[3 * 8] | ip[4 * 8] | ip[5 * 8] | ip[6 * 8] | ip[7 * 8] ) { A = M(xC1S7, ip[1*8]) + M(xC7S1, ip[7*8]); B = M(xC7S1, ip[1*8]) - M(xC1S7, ip[7*8]); C = M(xC3S5, ip[3*8]) + M(xC5S3, ip[5*8]); D = M(xC3S5, ip[5*8]) - M(xC5S3, ip[3*8]); Ad = M(xC4S4, (A - C)); Bd = M(xC4S4, (B - D)); Cd = A + C; Dd = B + D; E = M(xC4S4, (ip[0*8] + ip[4*8])) + 8; F = M(xC4S4, (ip[0*8] - ip[4*8])) + 8; if(type==1){ //HACK E += 16*128; F += 16*128; } G = M(xC2S6, ip[2*8]) + M(xC6S2, ip[6*8]); H = M(xC6S2, ip[2*8]) - M(xC2S6, ip[6*8]); Ed = E - G; Gd = E + G; Add = F + Ad; Bdd = Bd - H; Fd = F - Ad; Hd = Bd + H; /* Final sequence of operations over-write original inputs. */ if(type==0){ ip[0*8] = (Gd + Cd ) >> 4; ip[7*8] = (Gd - Cd ) >> 4; ip[1*8] = (Add + Hd ) >> 4; ip[2*8] = (Add - Hd ) >> 4; ip[3*8] = (Ed + Dd ) >> 4; ip[4*8] = (Ed - Dd ) >> 4; ip[5*8] = (Fd + Bdd ) >> 4; ip[6*8] = (Fd - Bdd ) >> 4; }else if(type==1){ dst[0*stride] = cm[(Gd + Cd ) >> 4]; dst[7*stride] = cm[(Gd - Cd ) >> 4]; dst[1*stride] = cm[(Add + Hd ) >> 4]; dst[2*stride] = cm[(Add - Hd ) >> 4]; dst[3*stride] = cm[(Ed + Dd ) >> 4]; dst[4*stride] = cm[(Ed - Dd ) >> 4]; dst[5*stride] = cm[(Fd + Bdd ) >> 4]; dst[6*stride] = cm[(Fd - Bdd ) >> 4]; }else{ dst[0*stride] = cm[dst[0*stride] + ((Gd + Cd ) >> 4)]; dst[7*stride] = cm[dst[7*stride] + ((Gd - Cd ) >> 4)]; dst[1*stride] = cm[dst[1*stride] + ((Add + Hd ) >> 4)]; dst[2*stride] = cm[dst[2*stride] + ((Add - Hd ) >> 4)]; dst[3*stride] = cm[dst[3*stride] + ((Ed + Dd ) >> 4)]; dst[4*stride] = cm[dst[4*stride] + ((Ed - Dd ) >> 4)]; dst[5*stride] = cm[dst[5*stride] + ((Fd + Bdd ) >> 4)]; dst[6*stride] = cm[dst[6*stride] + ((Fd - Bdd ) >> 4)]; } } else { if(type==0){ ip[0*8] = ip[1*8] = ip[2*8] = ip[3*8] = ip[4*8] = ip[5*8] = ip[6*8] = ip[7*8] = ((xC4S4 * ip[0*8] + (IdctAdjustBeforeShift<<16))>>20); }else if(type==1){ dst[0*stride]= dst[1*stride]= dst[2*stride]= dst[3*stride]= dst[4*stride]= dst[5*stride]= dst[6*stride]= dst[7*stride]= cm[128 + ((xC4S4 * ip[0*8] + (IdctAdjustBeforeShift<<16))>>20)]; }else{ if(ip[0*8]){ int v= ((xC4S4 * ip[0*8] + (IdctAdjustBeforeShift<<16))>>20); dst[0*stride] = cm[dst[0*stride] + v]; dst[1*stride] = cm[dst[1*stride] + v]; dst[2*stride] = cm[dst[2*stride] + v]; dst[3*stride] = cm[dst[3*stride] + v]; dst[4*stride] = cm[dst[4*stride] + v]; dst[5*stride] = cm[dst[5*stride] + v]; dst[6*stride] = cm[dst[6*stride] + v]; dst[7*stride] = cm[dst[7*stride] + v]; } } } ip++; /* next column */ dst++; } } void ff_vp3_idct_c(DCTELEM *block/* align 16*/){ idct(NULL, 0, block, 0); } void ff_vp3_idct_put_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/){ idct(dest, line_size, block, 1); } void ff_vp3_idct_add_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/){ idct(dest, line_size, block, 2); } void ff_vp3_idct_dc_add_c(uint8_t *dest/*align 8*/, int line_size, const DCTELEM *block/*align 16*/){ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int i, dc = block[0]; dc = (46341*dc)>>16; dc = (46341*dc + (8<<16))>>20; for(i = 0; i < 8; i++){ dest[0] = cm[dest[0]+dc]; dest[1] = cm[dest[1]+dc]; dest[2] = cm[dest[2]+dc]; dest[3] = cm[dest[3]+dc]; dest[4] = cm[dest[4]+dc]; dest[5] = cm[dest[5]+dc]; dest[6] = cm[dest[6]+dc]; dest[7] = cm[dest[7]+dc]; dest += line_size; } } void ff_vp3_v_loop_filter_c(uint8_t *first_pixel, int stride, int *bounding_values) { unsigned char *end; int filter_value; const int nstride= -stride; for (end= first_pixel + 8; first_pixel < end; first_pixel++) { filter_value = (first_pixel[2 * nstride] - first_pixel[ stride]) +3*(first_pixel[0 ] - first_pixel[nstride]); filter_value = bounding_values[(filter_value + 4) >> 3]; first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value); first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value); } } void ff_vp3_h_loop_filter_c(uint8_t *first_pixel, int stride, int *bounding_values) { unsigned char *end; int filter_value; for (end= first_pixel + 8*stride; first_pixel != end; first_pixel += stride) { filter_value = (first_pixel[-2] - first_pixel[ 1]) +3*(first_pixel[ 0] - first_pixel[-1]); filter_value = bounding_values[(filter_value + 4) >> 3]; first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value); first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value); } }
123linslouis-android-video-cutter
jni/libavcodec/vp3dsp.c
C
asf20
8,676