code
stringlengths
1
2.01M
repo_name
stringlengths
3
62
path
stringlengths
1
267
language
stringclasses
231 values
license
stringclasses
13 values
size
int64
1
2.01M
/* * Generate a header file for hardcoded ff_cos_* tables * * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <string.h> #include <math.h> #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #define BITS 16 #define FLOATFMT "%.18e" int main(int argc, char *argv[]) { int i, j; int do_sin = argc == 2 && !strcmp(argv[1], "sin"); double (*func)(double) = do_sin ? sin : cos; printf("/* This file was generated by libavcodec/costablegen */\n"); printf("#include \"libavcodec/fft.h\"\n"); for (i = 4; i <= BITS; i++) { int m = 1 << i; double freq = 2*M_PI/m; printf("%s(%i) = {\n ", do_sin ? "SINTABLE" : "COSTABLE", m); for (j = 0; j < m/2 - 1; j++) { int idx = j > m/4 ? m/2 - j : j; if (do_sin && j >= m/4) idx = m/4 - j; printf(" "FLOATFMT",", func(idx*freq)); if ((j & 3) == 3) printf("\n "); } printf(" "FLOATFMT"\n};\n", func(do_sin ? -(m/4 - 1)*freq : freq)); } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/costablegen.c
C
asf20
1,860
/* * H.26L/H.264/AVC/JVT/14496-10/... loop filter * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.264 / AVC / MPEG4 part10 loop filter. * @author Michael Niedermayer <michaelni@gmx.at> */ #include "libavutil/intreadwrite.h" #include "internal.h" #include "dsputil.h" #include "avcodec.h" #include "mpegvideo.h" #include "h264.h" #include "mathops.h" #include "rectangle.h" //#undef NDEBUG #include <assert.h> /* Deblocking filter (p153) */ static const uint8_t alpha_table[52*3] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 5, 6, 7, 8, 9, 10, 12, 13, 15, 17, 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, 63, 71, 80, 90,101,113,127,144,162,182,203,226, 255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255, }; static const uint8_t beta_table[52*3] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, }; static const uint8_t tc0_table[52*3][4] = { {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 }, {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 }, {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 }, {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, }; static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h) { const unsigned int index_a = qp + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]; tc[1] = tc0_table[index_a][bS[1]]; tc[2] = tc0_table[index_a][bS[2]]; tc[3] = tc0_table[index_a][bS[3]]; h->h264dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta); } } static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { const unsigned int index_a = qp + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]+1; tc[1] = tc0_table[index_a][bS[1]]+1; tc[2] = tc0_table[index_a][bS[2]]+1; tc[3] = tc0_table[index_a][bS[3]]+1; h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta); } } static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int bsi, int qp ) { int i; int index_a = qp + h->slice_alpha_c0_offset; int alpha = alpha_table[index_a]; int beta = beta_table[qp + h->slice_beta_offset]; for( i = 0; i < 8; i++, pix += stride) { const int bS_index = (i >> 1) * bsi; if( bS[bS_index] == 0 ) { continue; } if( bS[bS_index] < 4 ) { const int tc0 = tc0_table[index_a][bS[bS_index]]; const int p0 = pix[-1]; const int p1 = pix[-2]; const int p2 = pix[-3]; const int q0 = pix[0]; const int q1 = pix[1]; const int q2 = pix[2]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int tc = tc0; int i_delta; if( FFABS( p2 - p0 ) < beta ) { if(tc0) pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } if( FFABS( q2 - q0 ) < beta ) { if(tc0) pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); } }else{ const int p0 = pix[-1]; const int p1 = pix[-2]; const int p2 = pix[-3]; const int q0 = pix[0]; const int q1 = pix[1]; const int q2 = pix[2]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){ if( FFABS( p2 - p0 ) < beta) { const int p3 = pix[-4]; /* p0', p1', p2' */ pix[-1] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; pix[-2] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; pix[-3] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; } else { /* p0' */ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; } if( FFABS( q2 - q0 ) < beta) { const int q3 = pix[3]; /* q0', q1', q2' */ pix[0] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; pix[1] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; pix[2] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; } else { /* q0' */ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } }else{ /* p0', q0' */ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, p2, p1, p0, q0, q1, q2, pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]); } } } } static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int bsi, int qp ) { int i; int index_a = qp + h->slice_alpha_c0_offset; int alpha = alpha_table[index_a]; int beta = beta_table[qp + h->slice_beta_offset]; for( i = 0; i < 4; i++, pix += stride) { const int bS_index = i*bsi; if( bS[bS_index] == 0 ) { continue; } if( bS[bS_index] < 4 ) { const int tc = tc0_table[index_a][bS[bS_index]] + 1; const int p0 = pix[-1]; const int p1 = pix[-2]; const int q0 = pix[0]; const int q1 = pix[1]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { const int i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ tprintf(h->s.avctx, "filter_mb_mbaff_edgecv i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); } }else{ const int p0 = pix[-1]; const int p1 = pix[-2]; const int q0 = pix[0]; const int q1 = pix[1]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */ tprintf(h->s.avctx, "filter_mb_mbaff_edgecv i:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, pix[-3], p1, p0, q0, q1, pix[2], pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]); } } } } static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { const unsigned int index_a = qp + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]; tc[1] = tc0_table[index_a][bS[1]]; tc[2] = tc0_table[index_a][bS[2]]; tc[3] = tc0_table[index_a][bS[3]]; h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta); } } static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { const unsigned int index_a = qp + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]+1; tc[1] = tc0_table[index_a][bS[1]]+1; tc[2] = tc0_table[index_a][bS[2]]+1; tc[3] = tc0_table[index_a][bS[3]]+1; h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta); } } void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { MpegEncContext * const s = &h->s; int mb_xy; int mb_type, left_type; int qp, qp0, qp1, qpc, qpc0, qpc1, qp_thresh; mb_xy = h->mb_xy; if(!h->top_type || !h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) { ff_h264_filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize); return; } assert(!FRAME_MBAFF); left_type= h->left_type[0]; mb_type = s->current_picture.mb_type[mb_xy]; qp = s->current_picture.qscale_table[mb_xy]; qp0 = s->current_picture.qscale_table[mb_xy-1]; qp1 = s->current_picture.qscale_table[h->top_mb_xy]; qpc = get_chroma_qp( h, 0, qp ); qpc0 = get_chroma_qp( h, 0, qp0 ); qpc1 = get_chroma_qp( h, 0, qp1 ); qp0 = (qp + qp0 + 1) >> 1; qp1 = (qp + qp1 + 1) >> 1; qpc0 = (qpc + qpc0 + 1) >> 1; qpc1 = (qpc + qpc1 + 1) >> 1; qp_thresh = 15+52 - h->slice_alpha_c0_offset; if(qp <= qp_thresh && qp0 <= qp_thresh && qp1 <= qp_thresh && qpc <= qp_thresh && qpc0 <= qp_thresh && qpc1 <= qp_thresh) return; if( IS_INTRA(mb_type) ) { int16_t bS4[4] = {4,4,4,4}; int16_t bS3[4] = {3,3,3,3}; int16_t *bSH = FIELD_PICTURE ? bS3 : bS4; if(left_type) filter_mb_edgev( &img_y[4*0], linesize, bS4, qp0, h); if( IS_8x8DCT(mb_type) ) { filter_mb_edgev( &img_y[4*2], linesize, bS3, qp, h); filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, h); filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, h); } else { filter_mb_edgev( &img_y[4*1], linesize, bS3, qp, h); filter_mb_edgev( &img_y[4*2], linesize, bS3, qp, h); filter_mb_edgev( &img_y[4*3], linesize, bS3, qp, h); filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, h); filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, h); filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, h); filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, h); } if(left_type){ filter_mb_edgecv( &img_cb[2*0], uvlinesize, bS4, qpc0, h); filter_mb_edgecv( &img_cr[2*0], uvlinesize, bS4, qpc0, h); } filter_mb_edgecv( &img_cb[2*2], uvlinesize, bS3, qpc, h); filter_mb_edgecv( &img_cr[2*2], uvlinesize, bS3, qpc, h); filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, h); filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h); return; } else { LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]); int edges; if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) { edges = 4; AV_WN64A(bS[0][0], 0x0002000200020002ULL); AV_WN64A(bS[0][2], 0x0002000200020002ULL); AV_WN64A(bS[1][0], 0x0002000200020002ULL); AV_WN64A(bS[1][2], 0x0002000200020002ULL); } else { int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0; int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0; int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1; edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4; h->h264dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache, h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE); } if( IS_INTRA(left_type) ) AV_WN64A(bS[0][0], 0x0004000400040004ULL); if( IS_INTRA(h->top_type) ) AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL); #define FILTER(hv,dir,edge)\ if(AV_RN64A(bS[dir][edge])) { \ filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\ if(!(edge&1)) {\ filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ }\ } if(left_type) FILTER(v,0,0); if( edges == 1 ) { FILTER(h,1,0); } else if( IS_8x8DCT(mb_type) ) { FILTER(v,0,2); FILTER(h,1,0); FILTER(h,1,2); } else { FILTER(v,0,1); FILTER(v,0,2); FILTER(v,0,3); FILTER(h,1,0); FILTER(h,1,1); FILTER(h,1,2); FILTER(h,1,3); } #undef FILTER } } static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){ int v; v= h->ref_cache[0][b_idx] != h->ref_cache[0][bn_idx]; if(!v && h->ref_cache[0][b_idx]!=-1) v= h->mv_cache[0][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U | FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit; if(h->list_count==2){ if(!v) v = h->ref_cache[1][b_idx] != h->ref_cache[1][bn_idx] | h->mv_cache[1][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U | FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit; if(v){ if(h->ref_cache[0][b_idx] != h->ref_cache[1][bn_idx] | h->ref_cache[1][b_idx] != h->ref_cache[0][bn_idx]) return 1; return h->mv_cache[0][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U | FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit | h->mv_cache[1][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U | FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit; } } return v; } static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int dir) { MpegEncContext * const s = &h->s; int edge; const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy; const int mbm_type = dir == 0 ? h->left_type[0] : h->top_type; // how often to recheck mv-based bS when iterating between edges static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1}, {0,3,1,1,3,3,3,3}}; const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7]; const int edges = mask_edge== 3 && !(h->cbp&15) ? 1 : 4; // how often to recheck mv-based bS when iterating along each edge const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)); if(mbm_type && !first_vertical_edge_done){ if (FRAME_MBAFF && (dir == 1) && ((mb_y&1) == 0) && IS_INTERLACED(mbm_type&~mb_type) ) { // This is a special case in the norm where the filtering must // be done twice (one each of the field) even if we are in a // frame macroblock. // unsigned int tmp_linesize = 2 * linesize; unsigned int tmp_uvlinesize = 2 * uvlinesize; int mbn_xy = mb_xy - 2 * s->mb_stride; int j; for(j=0; j<2; j++, mbn_xy += s->mb_stride){ DECLARE_ALIGNED(8, int16_t, bS)[4]; int qp; if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) { AV_WN64A(bS, 0x0003000300030003ULL); } else { if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){ bS[0]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+0]); bS[1]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+1]); bS[2]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_cache[scan8[0]+2]); bS[3]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_cache[scan8[0]+3]); }else{ const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 4+3*8; int i; for( i = 0; i < 4; i++ ) { bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]); } } } // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h ); filter_mb_edgech( &img_cb[j*uvlinesize], tmp_uvlinesize, bS, ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h); filter_mb_edgech( &img_cr[j*uvlinesize], tmp_uvlinesize, bS, ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1, h); } }else{ DECLARE_ALIGNED(8, int16_t, bS)[4]; int qp; if( IS_INTRA(mb_type|mbm_type)) { AV_WN64A(bS, 0x0003000300030003ULL); if ( (!IS_INTERLACED(mb_type|mbm_type)) || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0)) ) AV_WN64A(bS, 0x0004000400040004ULL); } else { int i; int mv_done; if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) { AV_WN64A(bS, 0x0001000100010001ULL); mv_done = 1; } else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) { int b_idx= 8 + 4; int bn_idx= b_idx - (dir ? 8:1); bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, 8 + 4, bn_idx, mvy_limit); mv_done = 1; } else mv_done = 0; for( i = 0; i < 4; i++ ) { int x = dir == 0 ? 0 : i; int y = dir == 0 ? i : 0; int b_idx= 8 + 4 + x + 8*y; int bn_idx= b_idx - (dir ? 8:1); if( h->non_zero_count_cache[b_idx] | h->non_zero_count_cache[bn_idx] ) { bS[i] = 2; } else if(!mv_done) { bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit); } } } /* Filter edge */ // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. if(bS[0]+bS[1]+bS[2]+bS[3]){ qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbm_xy] + 1 ) >> 1; //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]); tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } if( dir == 0 ) { filter_mb_edgev( &img_y[0], linesize, bS, qp, h ); { int qp= ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1; filter_mb_edgecv( &img_cb[0], uvlinesize, bS, qp, h); if(h->pps.chroma_qp_diff) qp= ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1; filter_mb_edgecv( &img_cr[0], uvlinesize, bS, qp, h); } } else { filter_mb_edgeh( &img_y[0], linesize, bS, qp, h ); { int qp= ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1; filter_mb_edgech( &img_cb[0], uvlinesize, bS, qp, h); if(h->pps.chroma_qp_diff) qp= ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1; filter_mb_edgech( &img_cr[0], uvlinesize, bS, qp, h); } } } } } /* Calculate bS */ for( edge = 1; edge < edges; edge++ ) { DECLARE_ALIGNED(8, int16_t, bS)[4]; int qp; if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type) continue; if( IS_INTRA(mb_type)) { AV_WN64A(bS, 0x0003000300030003ULL); } else { int i; int mv_done; if( edge & mask_edge ) { AV_ZERO64(bS); mv_done = 1; } else if( mask_par0 ) { int b_idx= 8 + 4 + edge * (dir ? 8:1); int bn_idx= b_idx - (dir ? 8:1); bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, b_idx, bn_idx, mvy_limit); mv_done = 1; } else mv_done = 0; for( i = 0; i < 4; i++ ) { int x = dir == 0 ? edge : i; int y = dir == 0 ? i : edge; int b_idx= 8 + 4 + x + 8*y; int bn_idx= b_idx - (dir ? 8:1); if( h->non_zero_count_cache[b_idx] | h->non_zero_count_cache[bn_idx] ) { bS[i] = 2; } else if(!mv_done) { bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit); } } if(bS[0]+bS[1]+bS[2]+bS[3] == 0) continue; } /* Filter edge */ // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = s->current_picture.qscale_table[mb_xy]; //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]); tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } if( dir == 0 ) { filter_mb_edgev( &img_y[4*edge], linesize, bS, qp, h ); if( (edge&1) == 0 ) { filter_mb_edgecv( &img_cb[2*edge], uvlinesize, bS, h->chroma_qp[0], h); filter_mb_edgecv( &img_cr[2*edge], uvlinesize, bS, h->chroma_qp[1], h); } } else { filter_mb_edgeh( &img_y[4*edge*linesize], linesize, bS, qp, h ); if( (edge&1) == 0 ) { filter_mb_edgech( &img_cb[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], h); filter_mb_edgech( &img_cr[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], h); } } } } void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { MpegEncContext * const s = &h->s; const int mb_xy= mb_x + mb_y*s->mb_stride; const int mb_type = s->current_picture.mb_type[mb_xy]; const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; int first_vertical_edge_done = 0; av_unused int dir; if (FRAME_MBAFF // and current and left pair do not have the same interlaced type && IS_INTERLACED(mb_type^h->left_type[0]) // and left mb is in available to us && h->left_type[0]) { /* First vertical edge is different in MBAFF frames * There are 8 different bS to compute and 2 different Qp */ DECLARE_ALIGNED(8, int16_t, bS)[8]; int qp[2]; int bqp[2]; int rqp[2]; int mb_qp, mbn0_qp, mbn1_qp; int i; first_vertical_edge_done = 1; if( IS_INTRA(mb_type) ) { AV_WN64A(&bS[0], 0x0004000400040004ULL); AV_WN64A(&bS[4], 0x0004000400040004ULL); } else { static const uint8_t offset[2][2][8]={ { {7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1}, {7+8*2, 7+8*2, 7+8*2, 7+8*2, 7+8*3, 7+8*3, 7+8*3, 7+8*3}, },{ {7+8*0, 7+8*1, 7+8*2, 7+8*3, 7+8*0, 7+8*1, 7+8*2, 7+8*3}, {7+8*0, 7+8*1, 7+8*2, 7+8*3, 7+8*0, 7+8*1, 7+8*2, 7+8*3}, } }; const uint8_t *off= offset[MB_FIELD][mb_y&1]; for( i = 0; i < 8; i++ ) { int j= MB_FIELD ? i>>2 : i&1; int mbn_xy = h->left_mb_xy[j]; int mbn_type= h->left_type[j]; if( IS_INTRA( mbn_type ) ) bS[i] = 4; else{ bS[i] = 1 + !!(h->non_zero_count_cache[12+8*(i>>1)] | ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ? (h->cbp_table[mbn_xy] & ((MB_FIELD ? (i&2) : (mb_y&1)) ? 8 : 2)) : h->non_zero_count[mbn_xy][ off[i] ])); } } } mb_qp = s->current_picture.qscale_table[mb_xy]; mbn0_qp = s->current_picture.qscale_table[h->left_mb_xy[0]]; mbn1_qp = s->current_picture.qscale_table[h->left_mb_xy[1]]; qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1; bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) + get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1; rqp[0] = ( get_chroma_qp( h, 1, mb_qp ) + get_chroma_qp( h, 1, mbn0_qp ) + 1 ) >> 1; qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1; bqp[1] = ( get_chroma_qp( h, 0, mb_qp ) + get_chroma_qp( h, 0, mbn1_qp ) + 1 ) >> 1; rqp[1] = ( get_chroma_qp( h, 1, mb_qp ) + get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1; /* Filter edge */ tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize); { int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } if(MB_FIELD){ filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0] ); filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1] ); filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0] ); filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1] ); filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0] ); filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1] ); }else{ filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0] ); filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1] ); filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0] ); filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1] ); filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0] ); filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1] ); } } #if CONFIG_SMALL for( dir = 0; dir < 2; dir++ ) filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, dir); #else filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, 0); filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, 1); #endif }
123linslouis-android-video-cutter
jni/libavcodec/h264_loopfilter.c
C
asf20
35,677
/* * Header file for hardcoded AAC tables * * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AAC_TABLEGEN_H #define AAC_TABLEGEN_H #include "aac_tablegen_decl.h" #if CONFIG_HARDCODED_TABLES #include "libavcodec/aac_tables.h" #else #include "../libavutil/mathematics.h" float ff_aac_pow2sf_tab[428]; void ff_aac_tableinit(void) { int i; for (i = 0; i < 428; i++) ff_aac_pow2sf_tab[i] = pow(2, (i - 200) / 4.); } #endif /* CONFIG_HARDCODED_TABLES */ #endif /* AAC_TABLEGEN_H */
123linslouis-android-video-cutter
jni/libavcodec/aac_tablegen.h
C
asf20
1,283
/* * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_DCADSP_H #define AVCODEC_DCADSP_H typedef struct DCADSPContext { void (*lfe_fir)(float *out, const float *in, const float *coefs, int decifactor, float scale, float bias); } DCADSPContext; void ff_dcadsp_init(DCADSPContext *s); void ff_dcadsp_init_arm(DCADSPContext *s); #endif /* AVCODEC_DCADSP_H */
123linslouis-android-video-cutter
jni/libavcodec/dcadsp.h
C
asf20
1,108
/* * Copyright (C) 2007 FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "xiph.h" int ff_split_xiph_headers(uint8_t *extradata, int extradata_size, int first_header_size, uint8_t *header_start[3], int header_len[3]) { int i; if (extradata_size >= 6 && AV_RB16(extradata) == first_header_size) { int overall_len = 6; for (i=0; i<3; i++) { header_len[i] = AV_RB16(extradata); extradata += 2; header_start[i] = extradata; extradata += header_len[i]; if (overall_len > extradata_size - header_len[i]) return -1; overall_len += header_len[i]; } } else if (extradata_size >= 3 && extradata_size < INT_MAX - 0x1ff && extradata[0] == 2) { int overall_len = 3; extradata++; for (i=0; i<2; i++, extradata++) { header_len[i] = 0; for (; overall_len < extradata_size && *extradata==0xff; extradata++) { header_len[i] += 0xff; overall_len += 0xff + 1; } header_len[i] += *extradata; overall_len += *extradata; if (overall_len > extradata_size) return -1; } header_len[2] = extradata_size - overall_len; header_start[0] = extradata; header_start[1] = header_start[0] + header_len[0]; header_start[2] = header_start[1] + header_len[1]; } else { return -1; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/xiph.c
C
asf20
2,318
/* * Various fixed-point math operations * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <inttypes.h> #include <limits.h> #include <assert.h> #include "avcodec.h" #include "celp_math.h" #ifdef G729_BITEXACT /** * Cosine table: base_cos[i] = (1<<15) * cos(i*PI/64) */ static const int16_t base_cos[64] = { 32767, 32729, 32610, 32413, 32138, 31786, 31357, 30853, 30274, 29622, 28899, 28106, 27246, 26320, 25330, 24279, 23170, 22006, 20788, 19520, 18205, 16846, 15447, 14010, 12540, 11039, 9512, 7962, 6393, 4808, 3212, 1608, 0, -1608, -3212, -4808, -6393, -7962, -9512, -11039, -12540, -14010, -15447, -16846, -18205, -19520, -20788, -22006, -23170, -24279, -25330, -26320, -27246, -28106, -28899, -29622, -30274, -30853, -31357, -31786, -32138, -32413, -32610, -32729 }; /** * Slope used to compute cos(x) * * cos(ind*64+offset) = base_cos[ind]+offset*slope_cos[ind] * values multiplied by 1<<19 */ static const int16_t slope_cos[64] = { -632, -1893, -3150, -4399, -5638, -6863, -8072, -9261, -10428, -11570, -12684, -13767, -14817, -15832, -16808, -17744, -18637, -19486, -20287, -21039, -21741, -22390, -22986, -23526, -24009, -24435, -24801, -25108, -25354, -25540, -25664, -25726, -25726, -25664, -25540, -25354, -25108, -24801, -24435, -24009, -23526, -22986, -22390, -21741, -21039, -20287, -19486, -18637, -17744, -16808, -15832, -14817, -13767, -12684, -11570, -10428, -9261, -8072, -6863, -5638, -4399, -3150, -1893, -632 }; /** * Table used to compute exp2(x) * * tab_exp2[i] = (1<<14) * exp2(i/32) = 2^(i/32) i=0..32 */ static const uint16_t tab_exp2[33] = { 16384, 16743, 17109, 17484, 17867, 18258, 18658, 19066, 19484, 19911, 20347, 20792, 21247, 21713, 22188, 22674, 23170, 23678, 24196, 24726, 25268, 25821, 26386, 26964, 27554, 28158, 28774, 29405, 30048, 30706, 31379, 32066, 32767 }; int16_t ff_cos(uint16_t arg) { uint8_t offset= arg; uint8_t ind = arg >> 8; assert(arg < 0x4000); return FFMAX(base_cos[ind] + ((slope_cos[ind] * offset) >> 12), -0x8000); } int ff_exp2(uint16_t power) { uint16_t frac_x0; uint16_t frac_dx; int result; assert(power <= 0x7fff); frac_x0 = power >> 10; frac_dx = (power & 0x03ff) << 5; result = tab_exp2[frac_x0] << 15; result += frac_dx * (tab_exp2[frac_x0+1] - tab_exp2[frac_x0]); return result >> 10; } #else // G729_BITEXACT /** * Cosine table: base_cos[i] = (1<<15) * cos(i*PI/64) */ static const int16_t tab_cos[65] = { 32767, 32738, 32617, 32421, 32145, 31793, 31364, 30860, 30280, 29629, 28905, 28113, 27252, 26326, 25336, 24285, 23176, 22011, 20793, 19525, 18210, 16851, 15451, 14014, 12543, 11043, 9515, 7965, 6395, 4810, 3214, 1609, 1, -1607, -3211, -4808, -6393, -7962, -9513, -11040, -12541, -14012, -15449, -16848, -18207, -19523, -20791, -22009, -23174, -24283, -25334, -26324, -27250, -28111, -28904, -29627, -30279, -30858, -31363, -31792, -32144, -32419, -32616, -32736, -32768, }; static const uint16_t exp2a[]= { 0, 1435, 2901, 4400, 5931, 7496, 9096, 10730, 12400, 14106, 15850, 17632, 19454, 21315, 23216, 25160, 27146, 29175, 31249, 33368, 35534, 37747, 40009, 42320, 44682, 47095, 49562, 52082, 54657, 57289, 59979, 62727, }; static const uint16_t exp2b[]= { 3, 712, 1424, 2134, 2845, 3557, 4270, 4982, 5696, 6409, 7124, 7839, 8554, 9270, 9986, 10704, 11421, 12138, 12857, 13576, 14295, 15014, 15734, 16455, 17176, 17898, 18620, 19343, 20066, 20790, 21514, 22238, }; int16_t ff_cos(uint16_t arg) { uint8_t offset= arg; uint8_t ind = arg >> 8; assert(arg <= 0x3fff); return tab_cos[ind] + (offset * (tab_cos[ind+1] - tab_cos[ind]) >> 8); } int ff_exp2(uint16_t power) { unsigned int result= exp2a[power>>10] + 0x10000; assert(power <= 0x7fff); result= (result<<3) + ((result*exp2b[(power>>5)&31])>>17); return result + ((result*(power&31)*89)>>22); } #endif // else G729_BITEXACT /** * Table used to compute log2(x) * * tab_log2[i] = (1<<15) * log2(1 + i/32), i=0..32 */ static const uint16_t tab_log2[33] = { #ifdef G729_BITEXACT 0, 1455, 2866, 4236, 5568, 6863, 8124, 9352, 10549, 11716, 12855, 13967, 15054, 16117, 17156, 18172, 19167, 20142, 21097, 22033, 22951, 23852, 24735, 25603, 26455, 27291, 28113, 28922, 29716, 30497, 31266, 32023, 32767, #else 4, 1459, 2870, 4240, 5572, 6867, 8127, 9355, 10552, 11719, 12858, 13971, 15057, 16120, 17158, 18175, 19170, 20145, 21100, 22036, 22954, 23854, 24738, 25605, 26457, 27294, 28116, 28924, 29719, 30500, 31269, 32025, 32769, #endif }; int ff_log2(uint32_t value) { uint8_t power_int; uint8_t frac_x0; uint16_t frac_dx; // Stripping zeros from beginning power_int = av_log2(value); value <<= (31 - power_int); // b31 is always non-zero now frac_x0 = (value & 0x7c000000) >> 26; // b26-b31 and [32..63] -> [0..31] frac_dx = (value & 0x03fff800) >> 11; value = tab_log2[frac_x0]; value += (frac_dx * (tab_log2[frac_x0+1] - tab_log2[frac_x0])) >> 15; return (power_int << 15) + value; } float ff_dot_productf(const float* a, const float* b, int length) { float sum = 0; int i; for(i=0; i<length; i++) sum += a[i] * b[i]; return sum; }
123linslouis-android-video-cutter
jni/libavcodec/celp_math.c
C
asf20
6,214
/* * Bethesda VID video decoder * Copyright (C) 2007 Nicholas Tung * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Bethesda Softworks VID Video Decoder * @author Nicholas Tung [ntung (at. ntung com] (2007-03) * @sa http://wiki.multimedia.cx/index.php?title=Bethsoft_VID * @sa http://www.svatopluk.com/andux/docs/dfvid.html */ #include "libavutil/common.h" #include "dsputil.h" #include "bethsoftvideo.h" #include "bytestream.h" typedef struct BethsoftvidContext { AVFrame frame; } BethsoftvidContext; static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx) { BethsoftvidContext *vid = avctx->priv_data; vid->frame.reference = 1; vid->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; avctx->pix_fmt = PIX_FMT_PAL8; return 0; } static void set_palette(AVFrame * frame, const uint8_t * palette_buffer) { uint32_t * palette = (uint32_t *)frame->data[1]; int a; for(a = 0; a < 256; a++){ palette[a] = AV_RB24(&palette_buffer[a * 3]) * 4; } frame->palette_has_changed = 1; } static int bethsoftvid_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; BethsoftvidContext * vid = avctx->priv_data; char block_type; uint8_t * dst; uint8_t * frame_end; int remaining = avctx->width; // number of bytes remaining on a line const int wrap_to_next_line = vid->frame.linesize[0] - avctx->width; int code; int yoffset; if (avctx->reget_buffer(avctx, &vid->frame)) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } dst = vid->frame.data[0]; frame_end = vid->frame.data[0] + vid->frame.linesize[0] * avctx->height; switch(block_type = *buf++){ case PALETTE_BLOCK: set_palette(&vid->frame, buf); return 0; case VIDEO_YOFF_P_FRAME: yoffset = bytestream_get_le16(&buf); if(yoffset >= avctx->height) return -1; dst += vid->frame.linesize[0] * yoffset; } // main code while((code = *buf++)){ int length = code & 0x7f; // copy any bytes starting at the current position, and ending at the frame width while(length > remaining){ if(code < 0x80) bytestream_get_buffer(&buf, dst, remaining); else if(block_type == VIDEO_I_FRAME) memset(dst, buf[0], remaining); length -= remaining; // decrement the number of bytes to be copied dst += remaining + wrap_to_next_line; // skip over extra bytes at end of frame remaining = avctx->width; if(dst == frame_end) goto end; } // copy any remaining bytes after / if line overflows if(code < 0x80) bytestream_get_buffer(&buf, dst, length); else if(block_type == VIDEO_I_FRAME) memset(dst, *buf++, length); remaining -= length; dst += length; } end: *data_size = sizeof(AVFrame); *(AVFrame*)data = vid->frame; return buf_size; } static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx) { BethsoftvidContext * vid = avctx->priv_data; if(vid->frame.data[0]) avctx->release_buffer(avctx, &vid->frame); return 0; } AVCodec bethsoftvid_decoder = { .name = "bethsoftvid", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_BETHSOFTVID, .priv_data_size = sizeof(BethsoftvidContext), .init = bethsoftvid_decode_init, .close = bethsoftvid_decode_end, .decode = bethsoftvid_decode_frame, .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Bethesda VID video"), };
123linslouis-android-video-cutter
jni/libavcodec/bethsoftvideo.c
C
asf20
4,620
/* * 4XM codec * Copyright (c) 2003 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * 4XM codec. */ #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "dsputil.h" #include "get_bits.h" #include "bytestream.h" //#undef NDEBUG //#include <assert.h> #define BLOCK_TYPE_VLC_BITS 5 #define ACDC_VLC_BITS 9 #define CFRAME_BUFFER_COUNT 100 static const uint8_t block_type_tab[2][4][8][2]={ { { //{8,4,2}x{8,4,2} { 0,1}, { 2,2}, { 6,3}, {14,4}, {30,5}, {31,5}, { 0,0} },{ //{8,4}x1 { 0,1}, { 0,0}, { 2,2}, { 6,3}, {14,4}, {15,4}, { 0,0} },{ //1x{8,4} { 0,1}, { 2,2}, { 0,0}, { 6,3}, {14,4}, {15,4}, { 0,0} },{ //1x2, 2x1 { 0,1}, { 0,0}, { 0,0}, { 2,2}, { 6,3}, {14,4}, {15,4} } },{ { //{8,4,2}x{8,4,2} { 1,2}, { 4,3}, { 5,3}, {0,2}, {6,3}, {7,3}, {0,0} },{//{8,4}x1 { 1,2}, { 0,0}, { 2,2}, {0,2}, {6,3}, {7,3}, {0,0} },{//1x{8,4} { 1,2}, { 2,2}, { 0,0}, {0,2}, {6,3}, {7,3}, {0,0} },{//1x2, 2x1 { 1,2}, { 0,0}, { 0,0}, {0,2}, {2,2}, {6,3}, {7,3} } } }; static const uint8_t size2index[4][4]={ {-1, 3, 1, 1}, { 3, 0, 0, 0}, { 2, 0, 0, 0}, { 2, 0, 0, 0}, }; static const int8_t mv[256][2]={ { 0, 0},{ 0, -1},{ -1, 0},{ 1, 0},{ 0, 1},{ -1, -1},{ 1, -1},{ -1, 1}, { 1, 1},{ 0, -2},{ -2, 0},{ 2, 0},{ 0, 2},{ -1, -2},{ 1, -2},{ -2, -1}, { 2, -1},{ -2, 1},{ 2, 1},{ -1, 2},{ 1, 2},{ -2, -2},{ 2, -2},{ -2, 2}, { 2, 2},{ 0, -3},{ -3, 0},{ 3, 0},{ 0, 3},{ -1, -3},{ 1, -3},{ -3, -1}, { 3, -1},{ -3, 1},{ 3, 1},{ -1, 3},{ 1, 3},{ -2, -3},{ 2, -3},{ -3, -2}, { 3, -2},{ -3, 2},{ 3, 2},{ -2, 3},{ 2, 3},{ 0, -4},{ -4, 0},{ 4, 0}, { 0, 4},{ -1, -4},{ 1, -4},{ -4, -1},{ 4, -1},{ 4, 1},{ -1, 4},{ 1, 4}, { -3, -3},{ -3, 3},{ 3, 3},{ -2, -4},{ -4, -2},{ 4, -2},{ -4, 2},{ -2, 4}, { 2, 4},{ -3, -4},{ 3, -4},{ 4, -3},{ -5, 0},{ -4, 3},{ -3, 4},{ 3, 4}, { -1, -5},{ -5, -1},{ -5, 1},{ -1, 5},{ -2, -5},{ 2, -5},{ 5, -2},{ 5, 2}, { -4, -4},{ -4, 4},{ -3, -5},{ -5, -3},{ -5, 3},{ 3, 5},{ -6, 0},{ 0, 6}, { -6, -1},{ -6, 1},{ 1, 6},{ 2, -6},{ -6, 2},{ 2, 6},{ -5, -4},{ 5, 4}, { 4, 5},{ -6, -3},{ 6, 3},{ -7, 0},{ -1, -7},{ 5, -5},{ -7, 1},{ -1, 7}, { 4, -6},{ 6, 4},{ -2, -7},{ -7, 2},{ -3, -7},{ 7, -3},{ 3, 7},{ 6, -5}, { 0, -8},{ -1, -8},{ -7, -4},{ -8, 1},{ 4, 7},{ 2, -8},{ -2, 8},{ 6, 6}, { -8, 3},{ 5, -7},{ -5, 7},{ 8, -4},{ 0, -9},{ -9, -1},{ 1, 9},{ 7, -6}, { -7, 6},{ -5, -8},{ -5, 8},{ -9, 3},{ 9, -4},{ 7, -7},{ 8, -6},{ 6, 8}, { 10, 1},{-10, 2},{ 9, -5},{ 10, -3},{ -8, -7},{-10, -4},{ 6, -9},{-11, 0}, { 11, 1},{-11, -2},{ -2, 11},{ 7, -9},{ -7, 9},{ 10, 6},{ -4, 11},{ 8, -9}, { 8, 9},{ 5, 11},{ 7,-10},{ 12, -3},{ 11, 6},{ -9, -9},{ 8, 10},{ 5, 12}, {-11, 7},{ 13, 2},{ 6,-12},{ 10, 9},{-11, 8},{ -7, 12},{ 0, 14},{ 14, -2}, { -9, 11},{ -6, 13},{-14, -4},{ -5,-14},{ 5, 14},{-15, -1},{-14, -6},{ 3,-15}, { 11,-11},{ -7, 14},{ -5, 15},{ 8,-14},{ 15, 6},{ 3, 16},{ 7,-15},{-16, 5}, { 0, 17},{-16, -6},{-10, 14},{-16, 7},{ 12, 13},{-16, 8},{-17, 6},{-18, 3}, { -7, 17},{ 15, 11},{ 16, 10},{ 2,-19},{ 3,-19},{-11,-16},{-18, 8},{-19, -6}, { 2,-20},{-17,-11},{-10,-18},{ 8, 19},{-21, -1},{-20, 7},{ -4, 21},{ 21, 5}, { 15, 16},{ 2,-22},{-10,-20},{-22, 5},{ 20,-11},{ -7,-22},{-12, 20},{ 23, -5}, { 13,-20},{ 24, -2},{-15, 19},{-11, 22},{ 16, 19},{ 23,-10},{-18,-18},{ -9,-24}, { 24,-10},{ -3, 26},{-23, 13},{-18,-20},{ 17, 21},{ -4, 27},{ 27, 6},{ 1,-28}, {-11, 26},{-17,-23},{ 7, 28},{ 11,-27},{ 29, 5},{-23,-19},{-28,-11},{-21, 22}, {-30, 7},{-17, 26},{-27, 16},{ 13, 29},{ 19,-26},{ 10,-31},{-14,-30},{ 20,-27}, {-29, 18},{-16,-31},{-28,-22},{ 21,-30},{-25, 28},{ 26,-29},{ 25,-32},{-32,-32} }; // this is simply the scaled down elementwise product of the standard jpeg quantizer table and the AAN premul table static const uint8_t dequant_table[64]={ 16, 15, 13, 19, 24, 31, 28, 17, 17, 23, 25, 31, 36, 63, 45, 21, 18, 24, 27, 37, 52, 59, 49, 20, 16, 28, 34, 40, 60, 80, 51, 20, 18, 31, 48, 66, 68, 86, 56, 21, 19, 38, 56, 59, 64, 64, 48, 20, 27, 48, 55, 55, 56, 51, 35, 15, 20, 35, 34, 32, 31, 22, 15, 8, }; static VLC block_type_vlc[2][4]; typedef struct CFrameBuffer{ unsigned int allocated_size; unsigned int size; int id; uint8_t *data; }CFrameBuffer; typedef struct FourXContext{ AVCodecContext *avctx; DSPContext dsp; AVFrame current_picture, last_picture; GetBitContext pre_gb; ///< ac/dc prefix GetBitContext gb; const uint8_t *bytestream; const uint16_t *wordstream; int mv[256]; VLC pre_vlc; int last_dc; DECLARE_ALIGNED(16, DCTELEM, block)[6][64]; void *bitstream_buffer; unsigned int bitstream_buffer_size; int version; CFrameBuffer cfrm[CFRAME_BUFFER_COUNT]; } FourXContext; #define FIX_1_082392200 70936 #define FIX_1_414213562 92682 #define FIX_1_847759065 121095 #define FIX_2_613125930 171254 #define MULTIPLY(var,const) (((var)*(const)) >> 16) static void idct(DCTELEM block[64]){ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int tmp10, tmp11, tmp12, tmp13; int z5, z10, z11, z12, z13; int i; int temp[64]; for(i=0; i<8; i++){ tmp10 = block[8*0 + i] + block[8*4 + i]; tmp11 = block[8*0 + i] - block[8*4 + i]; tmp13 = block[8*2 + i] + block[8*6 + i]; tmp12 = MULTIPLY(block[8*2 + i] - block[8*6 + i], FIX_1_414213562) - tmp13; tmp0 = tmp10 + tmp13; tmp3 = tmp10 - tmp13; tmp1 = tmp11 + tmp12; tmp2 = tmp11 - tmp12; z13 = block[8*5 + i] + block[8*3 + i]; z10 = block[8*5 + i] - block[8*3 + i]; z11 = block[8*1 + i] + block[8*7 + i]; z12 = block[8*1 + i] - block[8*7 + i]; tmp7 = z11 + z13; tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); z5 = MULTIPLY(z10 + z12, FIX_1_847759065); tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5; tmp6 = tmp12 - tmp7; tmp5 = tmp11 - tmp6; tmp4 = tmp10 + tmp5; temp[8*0 + i] = tmp0 + tmp7; temp[8*7 + i] = tmp0 - tmp7; temp[8*1 + i] = tmp1 + tmp6; temp[8*6 + i] = tmp1 - tmp6; temp[8*2 + i] = tmp2 + tmp5; temp[8*5 + i] = tmp2 - tmp5; temp[8*4 + i] = tmp3 + tmp4; temp[8*3 + i] = tmp3 - tmp4; } for(i=0; i<8*8; i+=8){ tmp10 = temp[0 + i] + temp[4 + i]; tmp11 = temp[0 + i] - temp[4 + i]; tmp13 = temp[2 + i] + temp[6 + i]; tmp12 = MULTIPLY(temp[2 + i] - temp[6 + i], FIX_1_414213562) - tmp13; tmp0 = tmp10 + tmp13; tmp3 = tmp10 - tmp13; tmp1 = tmp11 + tmp12; tmp2 = tmp11 - tmp12; z13 = temp[5 + i] + temp[3 + i]; z10 = temp[5 + i] - temp[3 + i]; z11 = temp[1 + i] + temp[7 + i]; z12 = temp[1 + i] - temp[7 + i]; tmp7 = z11 + z13; tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); z5 = MULTIPLY(z10 + z12, FIX_1_847759065); tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5; tmp6 = tmp12 - tmp7; tmp5 = tmp11 - tmp6; tmp4 = tmp10 + tmp5; block[0 + i] = (tmp0 + tmp7)>>6; block[7 + i] = (tmp0 - tmp7)>>6; block[1 + i] = (tmp1 + tmp6)>>6; block[6 + i] = (tmp1 - tmp6)>>6; block[2 + i] = (tmp2 + tmp5)>>6; block[5 + i] = (tmp2 - tmp5)>>6; block[4 + i] = (tmp3 + tmp4)>>6; block[3 + i] = (tmp3 - tmp4)>>6; } } static av_cold void init_vlcs(FourXContext *f){ static VLC_TYPE table[8][32][2]; int i; for(i=0; i<8; i++){ block_type_vlc[0][i].table= table[i]; block_type_vlc[0][i].table_allocated= 32; init_vlc(&block_type_vlc[0][i], BLOCK_TYPE_VLC_BITS, 7, &block_type_tab[0][i][0][1], 2, 1, &block_type_tab[0][i][0][0], 2, 1, INIT_VLC_USE_NEW_STATIC); } } static void init_mv(FourXContext *f){ int i; for(i=0; i<256; i++){ if(f->version>1) f->mv[i] = mv[i][0] + mv[i][1] *f->current_picture.linesize[0]/2; else f->mv[i] = (i&15) - 8 + ((i>>4)-8)*f->current_picture.linesize[0]/2; } } static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stride, int scale, int dc){ int i; dc*= 0x10001; switch(log2w){ case 0: for(i=0; i<h; i++){ dst[0] = scale*src[0] + dc; if(scale) src += stride; dst += stride; } break; case 1: for(i=0; i<h; i++){ ((uint32_t*)dst)[0] = scale*((uint32_t*)src)[0] + dc; if(scale) src += stride; dst += stride; } break; case 2: for(i=0; i<h; i++){ ((uint32_t*)dst)[0] = scale*((uint32_t*)src)[0] + dc; ((uint32_t*)dst)[1] = scale*((uint32_t*)src)[1] + dc; if(scale) src += stride; dst += stride; } break; case 3: for(i=0; i<h; i++){ ((uint32_t*)dst)[0] = scale*((uint32_t*)src)[0] + dc; ((uint32_t*)dst)[1] = scale*((uint32_t*)src)[1] + dc; ((uint32_t*)dst)[2] = scale*((uint32_t*)src)[2] + dc; ((uint32_t*)dst)[3] = scale*((uint32_t*)src)[3] + dc; if(scale) src += stride; dst += stride; } break; default: assert(0); } } static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int log2w, int log2h, int stride){ const int index= size2index[log2h][log2w]; const int h= 1<<log2h; int code= get_vlc2(&f->gb, block_type_vlc[1-(f->version>1)][index].table, BLOCK_TYPE_VLC_BITS, 1); uint16_t *start= (uint16_t*)f->last_picture.data[0]; uint16_t *end= start + stride*(f->avctx->height-h+1) - (1<<log2w); assert(code>=0 && code<=6); if(code == 0){ src += f->mv[ *f->bytestream++ ]; if(start > src || src > end){ av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return; } mcdc(dst, src, log2w, h, stride, 1, 0); }else if(code == 1){ log2h--; decode_p_block(f, dst , src , log2w, log2h, stride); decode_p_block(f, dst + (stride<<log2h), src + (stride<<log2h), log2w, log2h, stride); }else if(code == 2){ log2w--; decode_p_block(f, dst , src , log2w, log2h, stride); decode_p_block(f, dst + (1<<log2w), src + (1<<log2w), log2w, log2h, stride); }else if(code == 3 && f->version<2){ mcdc(dst, src, log2w, h, stride, 1, 0); }else if(code == 4){ src += f->mv[ *f->bytestream++ ]; if(start > src || src > end){ av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return; } mcdc(dst, src, log2w, h, stride, 1, le2me_16(*f->wordstream++)); }else if(code == 5){ mcdc(dst, src, log2w, h, stride, 0, le2me_16(*f->wordstream++)); }else if(code == 6){ if(log2w){ dst[0] = le2me_16(*f->wordstream++); dst[1] = le2me_16(*f->wordstream++); }else{ dst[0 ] = le2me_16(*f->wordstream++); dst[stride] = le2me_16(*f->wordstream++); } } } static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){ int x, y; const int width= f->avctx->width; const int height= f->avctx->height; uint16_t *src= (uint16_t*)f->last_picture.data[0]; uint16_t *dst= (uint16_t*)f->current_picture.data[0]; const int stride= f->current_picture.linesize[0]>>1; unsigned int bitstream_size, bytestream_size, wordstream_size, extra; if(f->version>1){ extra=20; bitstream_size= AV_RL32(buf+8); wordstream_size= AV_RL32(buf+12); bytestream_size= AV_RL32(buf+16); }else{ extra=0; bitstream_size = AV_RL16(buf-4); wordstream_size= AV_RL16(buf-2); bytestream_size= FFMAX(length - bitstream_size - wordstream_size, 0); } if(bitstream_size+ bytestream_size+ wordstream_size + extra != length || bitstream_size > (1<<26) || bytestream_size > (1<<26) || wordstream_size > (1<<26) ){ av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size, bitstream_size+ bytestream_size+ wordstream_size - length); return -1; } av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, bitstream_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!f->bitstream_buffer) return AVERROR(ENOMEM); f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra), bitstream_size/4); init_get_bits(&f->gb, f->bitstream_buffer, 8*bitstream_size); f->wordstream= (const uint16_t*)(buf + extra + bitstream_size); f->bytestream= buf + extra + bitstream_size + wordstream_size; init_mv(f); for(y=0; y<height; y+=8){ for(x=0; x<width; x+=8){ decode_p_block(f, dst + x, src + x, 3, 3, stride); } src += 8*stride; dst += 8*stride; } if( bitstream_size != (get_bits_count(&f->gb)+31)/32*4 || (((const char*)f->wordstream - (const char*)buf + 2)&~2) != extra + bitstream_size + wordstream_size || (((const char*)f->bytestream - (const char*)buf + 3)&~3) != extra + bitstream_size + wordstream_size + bytestream_size) av_log(f->avctx, AV_LOG_ERROR, " %d %td %td bytes left\n", bitstream_size - (get_bits_count(&f->gb)+31)/32*4, -(((const char*)f->bytestream - (const char*)buf + 3)&~3) + (extra + bitstream_size + wordstream_size + bytestream_size), -(((const char*)f->wordstream - (const char*)buf + 2)&~2) + (extra + bitstream_size + wordstream_size) ); return 0; } /** * decode block and dequantize. * Note this is almost identical to MJPEG. */ static int decode_i_block(FourXContext *f, DCTELEM *block){ int code, i, j, level, val; /* DC coef */ val = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3); if (val>>4){ av_log(f->avctx, AV_LOG_ERROR, "error dc run != 0\n"); } if(val) val = get_xbits(&f->gb, val); val = val * dequant_table[0] + f->last_dc; f->last_dc = block[0] = val; /* AC coefs */ i = 1; for(;;) { code = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3); /* EOB */ if (code == 0) break; if (code == 0xf0) { i += 16; } else { level = get_xbits(&f->gb, code & 0xf); i += code >> 4; if (i >= 64) { av_log(f->avctx, AV_LOG_ERROR, "run %d oveflow\n", i); return 0; } j= ff_zigzag_direct[i]; block[j] = level * dequant_table[j]; i++; if (i >= 64) break; } } return 0; } static inline void idct_put(FourXContext *f, int x, int y){ DCTELEM (*block)[64]= f->block; int stride= f->current_picture.linesize[0]>>1; int i; uint16_t *dst = ((uint16_t*)f->current_picture.data[0]) + y * stride + x; for(i=0; i<4; i++){ block[i][0] += 0x80*8*8; idct(block[i]); } if(!(f->avctx->flags&CODEC_FLAG_GRAY)){ for(i=4; i<6; i++) idct(block[i]); } /* Note transform is: y= ( 1b + 4g + 2r)/14 cb=( 3b - 2g - 1r)/14 cr=(-1b - 4g + 5r)/14 */ for(y=0; y<8; y++){ for(x=0; x<8; x++){ DCTELEM *temp= block[(x>>2) + 2*(y>>2)] + 2*(x&3) + 2*8*(y&3); //FIXME optimize int cb= block[4][x + 8*y]; int cr= block[5][x + 8*y]; int cg= (cb + cr)>>1; int y; cb+=cb; y = temp[0]; dst[0 ]= ((y+cb)>>3) + (((y-cg)&0xFC)<<3) + (((y+cr)&0xF8)<<8); y = temp[1]; dst[1 ]= ((y+cb)>>3) + (((y-cg)&0xFC)<<3) + (((y+cr)&0xF8)<<8); y = temp[8]; dst[ stride]= ((y+cb)>>3) + (((y-cg)&0xFC)<<3) + (((y+cr)&0xF8)<<8); y = temp[9]; dst[1+stride]= ((y+cb)>>3) + (((y-cg)&0xFC)<<3) + (((y+cr)&0xF8)<<8); dst += 2; } dst += 2*stride - 2*8; } } static int decode_i_mb(FourXContext *f){ int i; f->dsp.clear_blocks(f->block[0]); for(i=0; i<6; i++){ if(decode_i_block(f, f->block[i]) < 0) return -1; } return 0; } static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const buf){ int frequency[512]; uint8_t flag[512]; int up[512]; uint8_t len_tab[257]; int bits_tab[257]; int start, end; const uint8_t *ptr= buf; int j; memset(frequency, 0, sizeof(frequency)); memset(up, -1, sizeof(up)); start= *ptr++; end= *ptr++; for(;;){ int i; for(i=start; i<=end; i++){ frequency[i]= *ptr++; } start= *ptr++; if(start==0) break; end= *ptr++; } frequency[256]=1; while((ptr - buf)&3) ptr++; // 4byte align for(j=257; j<512; j++){ int min_freq[2]= {256*256, 256*256}; int smallest[2]= {0, 0}; int i; for(i=0; i<j; i++){ if(frequency[i] == 0) continue; if(frequency[i] < min_freq[1]){ if(frequency[i] < min_freq[0]){ min_freq[1]= min_freq[0]; smallest[1]= smallest[0]; min_freq[0]= frequency[i];smallest[0]= i; }else{ min_freq[1]= frequency[i];smallest[1]= i; } } } if(min_freq[1] == 256*256) break; frequency[j]= min_freq[0] + min_freq[1]; flag[ smallest[0] ]= 0; flag[ smallest[1] ]= 1; up[ smallest[0] ]= up[ smallest[1] ]= j; frequency[ smallest[0] ]= frequency[ smallest[1] ]= 0; } for(j=0; j<257; j++){ int node; int len=0; int bits=0; for(node= j; up[node] != -1; node= up[node]){ bits += flag[node]<<len; len++; if(len > 31) av_log(f->avctx, AV_LOG_ERROR, "vlc length overflow\n"); //can this happen at all ? } bits_tab[j]= bits; len_tab[j]= len; } init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257, len_tab , 1, 1, bits_tab, 4, 4, 0); return ptr; } static int mix(int c0, int c1){ int blue = 2*(c0&0x001F) + (c1&0x001F); int green= (2*(c0&0x03E0) + (c1&0x03E0))>>5; int red = 2*(c0>>10) + (c1>>10); return red/3*1024 + green/3*32 + blue/3; } static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){ int x, y, x2, y2; const int width= f->avctx->width; const int height= f->avctx->height; uint16_t *dst= (uint16_t*)f->current_picture.data[0]; const int stride= f->current_picture.linesize[0]>>1; for(y=0; y<height; y+=16){ for(x=0; x<width; x+=16){ unsigned int color[4], bits; memset(color, 0, sizeof(color)); //warning following is purely guessed ... color[0]= bytestream_get_le16(&buf); color[1]= bytestream_get_le16(&buf); if(color[0]&0x8000) av_log(NULL, AV_LOG_ERROR, "unk bit 1\n"); if(color[1]&0x8000) av_log(NULL, AV_LOG_ERROR, "unk bit 2\n"); color[2]= mix(color[0], color[1]); color[3]= mix(color[1], color[0]); bits= bytestream_get_le32(&buf); for(y2=0; y2<16; y2++){ for(x2=0; x2<16; x2++){ int index= 2*(x2>>2) + 8*(y2>>2); dst[y2*stride+x2]= color[(bits>>index)&3]; } } dst+=16; } dst += 16*stride - width; } return 0; } static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){ int x, y; const int width= f->avctx->width; const int height= f->avctx->height; uint16_t *dst= (uint16_t*)f->current_picture.data[0]; const int stride= f->current_picture.linesize[0]>>1; const unsigned int bitstream_size= AV_RL32(buf); const int token_count av_unused = AV_RL32(buf + bitstream_size + 8); unsigned int prestream_size= 4*AV_RL32(buf + bitstream_size + 4); const uint8_t *prestream= buf + bitstream_size + 12; if(prestream_size + bitstream_size + 12 != length || bitstream_size > (1<<26) || prestream_size > (1<<26)){ av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", prestream_size, bitstream_size, length); return -1; } prestream= read_huffman_tables(f, prestream); init_get_bits(&f->gb, buf + 4, 8*bitstream_size); prestream_size= length + buf - prestream; av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, prestream_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!f->bitstream_buffer) return AVERROR(ENOMEM); f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream, prestream_size/4); init_get_bits(&f->pre_gb, f->bitstream_buffer, 8*prestream_size); f->last_dc= 0*128*8*8; for(y=0; y<height; y+=16){ for(x=0; x<width; x+=16){ if(decode_i_mb(f) < 0) return -1; idct_put(f, x, y); } dst += 16*stride; } if(get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3) != 256) av_log(f->avctx, AV_LOG_ERROR, "end mismatch\n"); return 0; } static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FourXContext * const f = avctx->priv_data; AVFrame *picture = data; AVFrame *p, temp; int i, frame_4cc, frame_size; frame_4cc= AV_RL32(buf); if(buf_size != AV_RL32(buf+4)+8 || buf_size < 20){ av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n", buf_size, AV_RL32(buf+4)); } if(frame_4cc == AV_RL32("cfrm")){ int free_index=-1; const int data_size= buf_size - 20; const int id= AV_RL32(buf+12); const int whole_size= AV_RL32(buf+16); CFrameBuffer *cfrm; for(i=0; i<CFRAME_BUFFER_COUNT; i++){ if(f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number) av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n", f->cfrm[i].id); } for(i=0; i<CFRAME_BUFFER_COUNT; i++){ if(f->cfrm[i].id == id) break; if(f->cfrm[i].size == 0 ) free_index= i; } if(i>=CFRAME_BUFFER_COUNT){ i= free_index; f->cfrm[i].id= id; } cfrm= &f->cfrm[i]; cfrm->data= av_fast_realloc(cfrm->data, &cfrm->allocated_size, cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE); if(!cfrm->data){ //explicit check needed as memcpy below might not catch a NULL av_log(f->avctx, AV_LOG_ERROR, "realloc falure"); return -1; } memcpy(cfrm->data + cfrm->size, buf+20, data_size); cfrm->size += data_size; if(cfrm->size >= whole_size){ buf= cfrm->data; frame_size= cfrm->size; if(id != avctx->frame_number){ av_log(f->avctx, AV_LOG_ERROR, "cframe id mismatch %d %d\n", id, avctx->frame_number); } cfrm->size= cfrm->id= 0; frame_4cc= AV_RL32("pfrm"); }else return buf_size; }else{ buf= buf + 12; frame_size= buf_size - 12; } temp= f->current_picture; f->current_picture= f->last_picture; f->last_picture= temp; p= &f->current_picture; avctx->coded_frame= p; avctx->flags |= CODEC_FLAG_EMU_EDGE; // alternatively we would have to use our own buffer management if(p->data[0]) avctx->release_buffer(avctx, p); p->reference= 1; if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if(frame_4cc == AV_RL32("ifr2")){ p->pict_type= FF_I_TYPE; if(decode_i2_frame(f, buf-4, frame_size) < 0) return -1; }else if(frame_4cc == AV_RL32("ifrm")){ p->pict_type= FF_I_TYPE; if(decode_i_frame(f, buf, frame_size) < 0) return -1; }else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){ p->pict_type= FF_P_TYPE; if(decode_p_frame(f, buf, frame_size) < 0) return -1; }else if(frame_4cc == AV_RL32("snd_")){ av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", buf_size); }else{ av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", buf_size); } p->key_frame= p->pict_type == FF_I_TYPE; *picture= *p; *data_size = sizeof(AVPicture); emms_c(); return buf_size; } static av_cold void common_init(AVCodecContext *avctx){ FourXContext * const f = avctx->priv_data; dsputil_init(&f->dsp, avctx); f->avctx= avctx; } static av_cold int decode_init(AVCodecContext *avctx){ FourXContext * const f = avctx->priv_data; if(avctx->extradata_size != 4 || !avctx->extradata) { av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n"); return 1; } f->version= AV_RL32(avctx->extradata)>>16; common_init(avctx); init_vlcs(f); if(f->version>2) avctx->pix_fmt= PIX_FMT_RGB565; else avctx->pix_fmt= PIX_FMT_BGR555; return 0; } static av_cold int decode_end(AVCodecContext *avctx){ FourXContext * const f = avctx->priv_data; int i; av_freep(&f->bitstream_buffer); f->bitstream_buffer_size=0; for(i=0; i<CFRAME_BUFFER_COUNT; i++){ av_freep(&f->cfrm[i].data); f->cfrm[i].allocated_size= 0; } free_vlc(&f->pre_vlc); if(f->current_picture.data[0]) avctx->release_buffer(avctx, &f->current_picture); if(f->last_picture.data[0]) avctx->release_buffer(avctx, &f->last_picture); return 0; } AVCodec fourxm_decoder = { "4xm", AVMEDIA_TYPE_VIDEO, CODEC_ID_4XM, sizeof(FourXContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("4X Movie"), };
123linslouis-android-video-cutter
jni/libavcodec/4xm.c
C
asf20
27,255
/* * (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * FFT and MDCT tests. */ #include "libavutil/mathematics.h" #include "libavutil/lfg.h" #include "libavutil/log.h" #include "fft.h" #include <math.h> #include <unistd.h> #include <sys/time.h> #include <stdlib.h> #include <string.h> #undef exit /* reference fft */ #define MUL16(a,b) ((a) * (b)) #define CMAC(pre, pim, are, aim, bre, bim) \ {\ pre += (MUL16(are, bre) - MUL16(aim, bim));\ pim += (MUL16(are, bim) + MUL16(bre, aim));\ } FFTComplex *exptab; static void fft_ref_init(int nbits, int inverse) { int n, i; double c1, s1, alpha; n = 1 << nbits; exptab = av_malloc((n / 2) * sizeof(FFTComplex)); for (i = 0; i < (n/2); i++) { alpha = 2 * M_PI * (float)i / (float)n; c1 = cos(alpha); s1 = sin(alpha); if (!inverse) s1 = -s1; exptab[i].re = c1; exptab[i].im = s1; } } static void fft_ref(FFTComplex *tabr, FFTComplex *tab, int nbits) { int n, i, j, k, n2; double tmp_re, tmp_im, s, c; FFTComplex *q; n = 1 << nbits; n2 = n >> 1; for (i = 0; i < n; i++) { tmp_re = 0; tmp_im = 0; q = tab; for (j = 0; j < n; j++) { k = (i * j) & (n - 1); if (k >= n2) { c = -exptab[k - n2].re; s = -exptab[k - n2].im; } else { c = exptab[k].re; s = exptab[k].im; } CMAC(tmp_re, tmp_im, c, s, q->re, q->im); q++; } tabr[i].re = tmp_re; tabr[i].im = tmp_im; } } static void imdct_ref(float *out, float *in, int nbits) { int n = 1<<nbits; int k, i, a; double sum, f; for (i = 0; i < n; i++) { sum = 0; for (k = 0; k < n/2; k++) { a = (2 * i + 1 + (n / 2)) * (2 * k + 1); f = cos(M_PI * a / (double)(2 * n)); sum += f * in[k]; } out[i] = -sum; } } /* NOTE: no normalisation by 1 / N is done */ static void mdct_ref(float *output, float *input, int nbits) { int n = 1<<nbits; int k, i; double a, s; /* do it by hand */ for (k = 0; k < n/2; k++) { s = 0; for (i = 0; i < n; i++) { a = (2*M_PI*(2*i+1+n/2)*(2*k+1) / (4 * n)); s += input[i] * cos(a); } output[k] = s; } } static void idct_ref(float *output, float *input, int nbits) { int n = 1<<nbits; int k, i; double a, s; /* do it by hand */ for (i = 0; i < n; i++) { s = 0.5 * input[0]; for (k = 1; k < n; k++) { a = M_PI*k*(i+0.5) / n; s += input[k] * cos(a); } output[i] = 2 * s / n; } } static void dct_ref(float *output, float *input, int nbits) { int n = 1<<nbits; int k, i; double a, s; /* do it by hand */ for (k = 0; k < n; k++) { s = 0; for (i = 0; i < n; i++) { a = M_PI*k*(i+0.5) / n; s += input[i] * cos(a); } output[k] = s; } } static float frandom(AVLFG *prng) { return (int16_t)av_lfg_get(prng) / 32768.0; } static int64_t gettime(void) { struct timeval tv; gettimeofday(&tv,NULL); return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec; } static void check_diff(float *tab1, float *tab2, int n, double scale) { int i; double max= 0; double error= 0; for (i = 0; i < n; i++) { double e= fabsf(tab1[i] - (tab2[i] / scale)); if (e >= 1e-3) { av_log(NULL, AV_LOG_ERROR, "ERROR %d: %f %f\n", i, tab1[i], tab2[i]); } error+= e*e; if(e>max) max= e; } av_log(NULL, AV_LOG_INFO, "max:%f e:%g\n", max, sqrt(error)/n); } static void help(void) { av_log(NULL, AV_LOG_INFO,"usage: fft-test [-h] [-s] [-i] [-n b]\n" "-h print this help\n" "-s speed test\n" "-m (I)MDCT test\n" "-d (I)DCT test\n" "-r (I)RDFT test\n" "-i inverse transform test\n" "-n b set the transform size to 2^b\n" "-f x set scale factor for output data of (I)MDCT to x\n" ); exit(1); } enum tf_transform { TRANSFORM_FFT, TRANSFORM_MDCT, TRANSFORM_RDFT, TRANSFORM_DCT, }; int main(int argc, char **argv) { FFTComplex *tab, *tab1, *tab_ref; FFTSample *tab2; int it, i, c; int do_speed = 0; enum tf_transform transform = TRANSFORM_FFT; int do_inverse = 0; FFTContext s1, *s = &s1; FFTContext m1, *m = &m1; RDFTContext r1, *r = &r1; DCTContext d1, *d = &d1; int fft_nbits, fft_size, fft_size_2; double scale = 1.0; AVLFG prng; av_lfg_init(&prng, 1); fft_nbits = 9; for(;;) { c = getopt(argc, argv, "hsimrdn:f:"); if (c == -1) break; switch(c) { case 'h': help(); break; case 's': do_speed = 1; break; case 'i': do_inverse = 1; break; case 'm': transform = TRANSFORM_MDCT; break; case 'r': transform = TRANSFORM_RDFT; break; case 'd': transform = TRANSFORM_DCT; break; case 'n': fft_nbits = atoi(optarg); break; case 'f': scale = atof(optarg); break; } } fft_size = 1 << fft_nbits; fft_size_2 = fft_size >> 1; tab = av_malloc(fft_size * sizeof(FFTComplex)); tab1 = av_malloc(fft_size * sizeof(FFTComplex)); tab_ref = av_malloc(fft_size * sizeof(FFTComplex)); tab2 = av_malloc(fft_size * sizeof(FFTSample)); switch (transform) { case TRANSFORM_MDCT: av_log(NULL, AV_LOG_INFO,"Scale factor is set to %f\n", scale); if (do_inverse) av_log(NULL, AV_LOG_INFO,"IMDCT"); else av_log(NULL, AV_LOG_INFO,"MDCT"); ff_mdct_init(m, fft_nbits, do_inverse, scale); break; case TRANSFORM_FFT: if (do_inverse) av_log(NULL, AV_LOG_INFO,"IFFT"); else av_log(NULL, AV_LOG_INFO,"FFT"); ff_fft_init(s, fft_nbits, do_inverse); fft_ref_init(fft_nbits, do_inverse); break; case TRANSFORM_RDFT: if (do_inverse) av_log(NULL, AV_LOG_INFO,"IDFT_C2R"); else av_log(NULL, AV_LOG_INFO,"DFT_R2C"); ff_rdft_init(r, fft_nbits, do_inverse ? IDFT_C2R : DFT_R2C); fft_ref_init(fft_nbits, do_inverse); break; case TRANSFORM_DCT: if (do_inverse) av_log(NULL, AV_LOG_INFO,"DCT_III"); else av_log(NULL, AV_LOG_INFO,"DCT_II"); ff_dct_init(d, fft_nbits, do_inverse ? DCT_III : DCT_II); break; } av_log(NULL, AV_LOG_INFO," %d test\n", fft_size); /* generate random data */ for (i = 0; i < fft_size; i++) { tab1[i].re = frandom(&prng); tab1[i].im = frandom(&prng); } /* checking result */ av_log(NULL, AV_LOG_INFO,"Checking...\n"); switch (transform) { case TRANSFORM_MDCT: if (do_inverse) { imdct_ref((float *)tab_ref, (float *)tab1, fft_nbits); ff_imdct_calc(m, tab2, (float *)tab1); check_diff((float *)tab_ref, tab2, fft_size, scale); } else { mdct_ref((float *)tab_ref, (float *)tab1, fft_nbits); ff_mdct_calc(m, tab2, (float *)tab1); check_diff((float *)tab_ref, tab2, fft_size / 2, scale); } break; case TRANSFORM_FFT: memcpy(tab, tab1, fft_size * sizeof(FFTComplex)); ff_fft_permute(s, tab); ff_fft_calc(s, tab); fft_ref(tab_ref, tab1, fft_nbits); check_diff((float *)tab_ref, (float *)tab, fft_size * 2, 1.0); break; case TRANSFORM_RDFT: if (do_inverse) { tab1[ 0].im = 0; tab1[fft_size_2].im = 0; for (i = 1; i < fft_size_2; i++) { tab1[fft_size_2+i].re = tab1[fft_size_2-i].re; tab1[fft_size_2+i].im = -tab1[fft_size_2-i].im; } memcpy(tab2, tab1, fft_size * sizeof(FFTSample)); tab2[1] = tab1[fft_size_2].re; ff_rdft_calc(r, tab2); fft_ref(tab_ref, tab1, fft_nbits); for (i = 0; i < fft_size; i++) { tab[i].re = tab2[i]; tab[i].im = 0; } check_diff((float *)tab_ref, (float *)tab, fft_size * 2, 0.5); } else { for (i = 0; i < fft_size; i++) { tab2[i] = tab1[i].re; tab1[i].im = 0; } ff_rdft_calc(r, tab2); fft_ref(tab_ref, tab1, fft_nbits); tab_ref[0].im = tab_ref[fft_size_2].re; check_diff((float *)tab_ref, (float *)tab2, fft_size, 1.0); } break; case TRANSFORM_DCT: memcpy(tab, tab1, fft_size * sizeof(FFTComplex)); ff_dct_calc(d, tab); if (do_inverse) { idct_ref(tab_ref, tab1, fft_nbits); } else { dct_ref(tab_ref, tab1, fft_nbits); } check_diff((float *)tab_ref, (float *)tab, fft_size, 1.0); break; } /* do a speed test */ if (do_speed) { int64_t time_start, duration; int nb_its; av_log(NULL, AV_LOG_INFO,"Speed test...\n"); /* we measure during about 1 seconds */ nb_its = 1; for(;;) { time_start = gettime(); for (it = 0; it < nb_its; it++) { switch (transform) { case TRANSFORM_MDCT: if (do_inverse) { ff_imdct_calc(m, (float *)tab, (float *)tab1); } else { ff_mdct_calc(m, (float *)tab, (float *)tab1); } break; case TRANSFORM_FFT: memcpy(tab, tab1, fft_size * sizeof(FFTComplex)); ff_fft_calc(s, tab); break; case TRANSFORM_RDFT: memcpy(tab2, tab1, fft_size * sizeof(FFTSample)); ff_rdft_calc(r, tab2); break; case TRANSFORM_DCT: memcpy(tab2, tab1, fft_size * sizeof(FFTSample)); ff_dct_calc(d, tab2); break; } } duration = gettime() - time_start; if (duration >= 1000000) break; nb_its *= 2; } av_log(NULL, AV_LOG_INFO,"time: %0.1f us/transform [total time=%0.2f s its=%d]\n", (double)duration / nb_its, (double)duration / 1000000.0, nb_its); } switch (transform) { case TRANSFORM_MDCT: ff_mdct_end(m); break; case TRANSFORM_FFT: ff_fft_end(s); break; case TRANSFORM_RDFT: ff_rdft_end(r); break; case TRANSFORM_DCT: ff_dct_end(d); break; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/fft-test.c
C
asf20
11,990
/* * VC-1 and WMV3 decoder - DSP functions * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * VC-1 and WMV3 decoder * */ #include "dsputil.h" /** Apply overlap transform to horizontal edge */ static void vc1_v_overlap_c(uint8_t* src, int stride) { int i; int a, b, c, d; int d1, d2; int rnd = 1; for(i = 0; i < 8; i++) { a = src[-2*stride]; b = src[-stride]; c = src[0]; d = src[stride]; d1 = (a - d + 3 + rnd) >> 3; d2 = (a - d + b - c + 4 - rnd) >> 3; src[-2*stride] = a - d1; src[-stride] = av_clip_uint8(b - d2); src[0] = av_clip_uint8(c + d2); src[stride] = d + d1; src++; rnd = !rnd; } } /** Apply overlap transform to vertical edge */ static void vc1_h_overlap_c(uint8_t* src, int stride) { int i; int a, b, c, d; int d1, d2; int rnd = 1; for(i = 0; i < 8; i++) { a = src[-2]; b = src[-1]; c = src[0]; d = src[1]; d1 = (a - d + 3 + rnd) >> 3; d2 = (a - d + b - c + 4 - rnd) >> 3; src[-2] = a - d1; src[-1] = av_clip_uint8(b - d2); src[0] = av_clip_uint8(c + d2); src[1] = d + d1; src += stride; rnd = !rnd; } } /** * VC-1 in-loop deblocking filter for one line * @param src source block type * @param stride block stride * @param pq block quantizer * @return whether other 3 pairs should be filtered or not * @see 8.6 */ static av_always_inline int vc1_filter_line(uint8_t* src, int stride, int pq){ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int a0 = (2*(src[-2*stride] - src[ 1*stride]) - 5*(src[-1*stride] - src[ 0*stride]) + 4) >> 3; int a0_sign = a0 >> 31; /* Store sign */ a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */ if(a0 < pq){ int a1 = FFABS((2*(src[-4*stride] - src[-1*stride]) - 5*(src[-3*stride] - src[-2*stride]) + 4) >> 3); int a2 = FFABS((2*(src[ 0*stride] - src[ 3*stride]) - 5*(src[ 1*stride] - src[ 2*stride]) + 4) >> 3); if(a1 < a0 || a2 < a0){ int clip = src[-1*stride] - src[ 0*stride]; int clip_sign = clip >> 31; clip = ((clip ^ clip_sign) - clip_sign)>>1; if(clip){ int a3 = FFMIN(a1, a2); int d = 5 * (a3 - a0); int d_sign = (d >> 31); d = ((d ^ d_sign) - d_sign) >> 3; d_sign ^= a0_sign; if( d_sign ^ clip_sign ) d = 0; else{ d = FFMIN(d, clip); d = (d ^ d_sign) - d_sign; /* Restore sign */ src[-1*stride] = cm[src[-1*stride] - d]; src[ 0*stride] = cm[src[ 0*stride] + d]; } return 1; } } } return 0; } /** * VC-1 in-loop deblocking filter * @param src source block type * @param step distance between horizontally adjacent elements * @param stride distance between vertically adjacent elements * @param len edge length to filter (4 or 8 pixels) * @param pq block quantizer * @see 8.6 */ static inline void vc1_loop_filter(uint8_t* src, int step, int stride, int len, int pq) { int i; int filt3; for(i = 0; i < len; i += 4){ filt3 = vc1_filter_line(src + 2*step, stride, pq); if(filt3){ vc1_filter_line(src + 0*step, stride, pq); vc1_filter_line(src + 1*step, stride, pq); vc1_filter_line(src + 3*step, stride, pq); } src += step * 4; } } static void vc1_v_loop_filter4_c(uint8_t *src, int stride, int pq) { vc1_loop_filter(src, 1, stride, 4, pq); } static void vc1_h_loop_filter4_c(uint8_t *src, int stride, int pq) { vc1_loop_filter(src, stride, 1, 4, pq); } static void vc1_v_loop_filter8_c(uint8_t *src, int stride, int pq) { vc1_loop_filter(src, 1, stride, 8, pq); } static void vc1_h_loop_filter8_c(uint8_t *src, int stride, int pq) { vc1_loop_filter(src, stride, 1, 8, pq); } static void vc1_v_loop_filter16_c(uint8_t *src, int stride, int pq) { vc1_loop_filter(src, 1, stride, 16, pq); } static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq) { vc1_loop_filter(src, stride, 1, 16, pq); } /** Do inverse transform on 8x8 block */ static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; int dc = block[0]; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; dc = (3 * dc + 1) >> 1; dc = (3 * dc + 16) >> 5; for(i = 0; i < 8; i++){ dest[0] = cm[dest[0]+dc]; dest[1] = cm[dest[1]+dc]; dest[2] = cm[dest[2]+dc]; dest[3] = cm[dest[3]+dc]; dest[4] = cm[dest[4]+dc]; dest[5] = cm[dest[5]+dc]; dest[6] = cm[dest[6]+dc]; dest[7] = cm[dest[7]+dc]; dest += linesize; } } static void vc1_inv_trans_8x8_c(DCTELEM block[64]) { int i; register int t1,t2,t3,t4,t5,t6,t7,t8; DCTELEM *src, *dst; src = block; dst = block; for(i = 0; i < 8; i++){ t1 = 12 * (src[0] + src[4]) + 4; t2 = 12 * (src[0] - src[4]) + 4; t3 = 16 * src[2] + 6 * src[6]; t4 = 6 * src[2] - 16 * src[6]; t5 = t1 + t3; t6 = t2 + t4; t7 = t2 - t4; t8 = t1 - t3; t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7]; t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7]; t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7]; t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7]; dst[0] = (t5 + t1) >> 3; dst[1] = (t6 + t2) >> 3; dst[2] = (t7 + t3) >> 3; dst[3] = (t8 + t4) >> 3; dst[4] = (t8 - t4) >> 3; dst[5] = (t7 - t3) >> 3; dst[6] = (t6 - t2) >> 3; dst[7] = (t5 - t1) >> 3; src += 8; dst += 8; } src = block; dst = block; for(i = 0; i < 8; i++){ t1 = 12 * (src[ 0] + src[32]) + 64; t2 = 12 * (src[ 0] - src[32]) + 64; t3 = 16 * src[16] + 6 * src[48]; t4 = 6 * src[16] - 16 * src[48]; t5 = t1 + t3; t6 = t2 + t4; t7 = t2 - t4; t8 = t1 - t3; t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56]; t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56]; t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56]; t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56]; dst[ 0] = (t5 + t1) >> 7; dst[ 8] = (t6 + t2) >> 7; dst[16] = (t7 + t3) >> 7; dst[24] = (t8 + t4) >> 7; dst[32] = (t8 - t4 + 1) >> 7; dst[40] = (t7 - t3 + 1) >> 7; dst[48] = (t6 - t2 + 1) >> 7; dst[56] = (t5 - t1 + 1) >> 7; src++; dst++; } } /** Do inverse transform on 8x4 part of block */ static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; int dc = block[0]; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; dc = ( 3 * dc + 1) >> 1; dc = (17 * dc + 64) >> 7; for(i = 0; i < 4; i++){ dest[0] = cm[dest[0]+dc]; dest[1] = cm[dest[1]+dc]; dest[2] = cm[dest[2]+dc]; dest[3] = cm[dest[3]+dc]; dest[4] = cm[dest[4]+dc]; dest[5] = cm[dest[5]+dc]; dest[6] = cm[dest[6]+dc]; dest[7] = cm[dest[7]+dc]; dest += linesize; } } static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; register int t1,t2,t3,t4,t5,t6,t7,t8; DCTELEM *src, *dst; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; src = block; dst = block; for(i = 0; i < 4; i++){ t1 = 12 * (src[0] + src[4]) + 4; t2 = 12 * (src[0] - src[4]) + 4; t3 = 16 * src[2] + 6 * src[6]; t4 = 6 * src[2] - 16 * src[6]; t5 = t1 + t3; t6 = t2 + t4; t7 = t2 - t4; t8 = t1 - t3; t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7]; t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7]; t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7]; t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7]; dst[0] = (t5 + t1) >> 3; dst[1] = (t6 + t2) >> 3; dst[2] = (t7 + t3) >> 3; dst[3] = (t8 + t4) >> 3; dst[4] = (t8 - t4) >> 3; dst[5] = (t7 - t3) >> 3; dst[6] = (t6 - t2) >> 3; dst[7] = (t5 - t1) >> 3; src += 8; dst += 8; } src = block; for(i = 0; i < 8; i++){ t1 = 17 * (src[ 0] + src[16]) + 64; t2 = 17 * (src[ 0] - src[16]) + 64; t3 = 22 * src[ 8] + 10 * src[24]; t4 = 22 * src[24] - 10 * src[ 8]; dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)]; dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)]; dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)]; dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)]; src ++; dest++; } } /** Do inverse transform on 4x8 parts of block */ static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; int dc = block[0]; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; dc = (17 * dc + 4) >> 3; dc = (12 * dc + 64) >> 7; for(i = 0; i < 8; i++){ dest[0] = cm[dest[0]+dc]; dest[1] = cm[dest[1]+dc]; dest[2] = cm[dest[2]+dc]; dest[3] = cm[dest[3]+dc]; dest += linesize; } } static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; register int t1,t2,t3,t4,t5,t6,t7,t8; DCTELEM *src, *dst; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; src = block; dst = block; for(i = 0; i < 8; i++){ t1 = 17 * (src[0] + src[2]) + 4; t2 = 17 * (src[0] - src[2]) + 4; t3 = 22 * src[1] + 10 * src[3]; t4 = 22 * src[3] - 10 * src[1]; dst[0] = (t1 + t3) >> 3; dst[1] = (t2 - t4) >> 3; dst[2] = (t2 + t4) >> 3; dst[3] = (t1 - t3) >> 3; src += 8; dst += 8; } src = block; for(i = 0; i < 4; i++){ t1 = 12 * (src[ 0] + src[32]) + 64; t2 = 12 * (src[ 0] - src[32]) + 64; t3 = 16 * src[16] + 6 * src[48]; t4 = 6 * src[16] - 16 * src[48]; t5 = t1 + t3; t6 = t2 + t4; t7 = t2 - t4; t8 = t1 - t3; t1 = 16 * src[ 8] + 15 * src[24] + 9 * src[40] + 4 * src[56]; t2 = 15 * src[ 8] - 4 * src[24] - 16 * src[40] - 9 * src[56]; t3 = 9 * src[ 8] - 16 * src[24] + 4 * src[40] + 15 * src[56]; t4 = 4 * src[ 8] - 9 * src[24] + 15 * src[40] - 16 * src[56]; dest[0*linesize] = cm[dest[0*linesize] + ((t5 + t1) >> 7)]; dest[1*linesize] = cm[dest[1*linesize] + ((t6 + t2) >> 7)]; dest[2*linesize] = cm[dest[2*linesize] + ((t7 + t3) >> 7)]; dest[3*linesize] = cm[dest[3*linesize] + ((t8 + t4) >> 7)]; dest[4*linesize] = cm[dest[4*linesize] + ((t8 - t4 + 1) >> 7)]; dest[5*linesize] = cm[dest[5*linesize] + ((t7 - t3 + 1) >> 7)]; dest[6*linesize] = cm[dest[6*linesize] + ((t6 - t2 + 1) >> 7)]; dest[7*linesize] = cm[dest[7*linesize] + ((t5 - t1 + 1) >> 7)]; src ++; dest++; } } /** Do inverse transform on 4x4 part of block */ static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; int dc = block[0]; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; dc = (17 * dc + 4) >> 3; dc = (17 * dc + 64) >> 7; for(i = 0; i < 4; i++){ dest[0] = cm[dest[0]+dc]; dest[1] = cm[dest[1]+dc]; dest[2] = cm[dest[2]+dc]; dest[3] = cm[dest[3]+dc]; dest += linesize; } } static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; register int t1,t2,t3,t4; DCTELEM *src, *dst; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; src = block; dst = block; for(i = 0; i < 4; i++){ t1 = 17 * (src[0] + src[2]) + 4; t2 = 17 * (src[0] - src[2]) + 4; t3 = 22 * src[1] + 10 * src[3]; t4 = 22 * src[3] - 10 * src[1]; dst[0] = (t1 + t3) >> 3; dst[1] = (t2 - t4) >> 3; dst[2] = (t2 + t4) >> 3; dst[3] = (t1 - t3) >> 3; src += 8; dst += 8; } src = block; for(i = 0; i < 4; i++){ t1 = 17 * (src[ 0] + src[16]) + 64; t2 = 17 * (src[ 0] - src[16]) + 64; t3 = 22 * src[ 8] + 10 * src[24]; t4 = 22 * src[24] - 10 * src[ 8]; dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)]; dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)]; dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)]; dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)]; src ++; dest++; } } /* motion compensation functions */ /** Filter in case of 2 filters */ #define VC1_MSPEL_FILTER_16B(DIR, TYPE) \ static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, int stride, int mode) \ { \ switch(mode){ \ case 0: /* no shift - should not occur */ \ return 0; \ case 1: /* 1/4 shift */ \ return -4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2]; \ case 2: /* 1/2 shift */ \ return -src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2]; \ case 3: /* 3/4 shift */ \ return -3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2]; \ } \ return 0; /* should not occur */ \ } VC1_MSPEL_FILTER_16B(ver, uint8_t); VC1_MSPEL_FILTER_16B(hor, int16_t); /** Filter used to interpolate fractional pel values */ static av_always_inline int vc1_mspel_filter(const uint8_t *src, int stride, int mode, int r) { switch(mode){ case 0: //no shift return src[0]; case 1: // 1/4 shift return (-4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2] + 32 - r) >> 6; case 2: // 1/2 shift return (-src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2] + 8 - r) >> 4; case 3: // 3/4 shift return (-3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2] + 32 - r) >> 6; } return 0; //should not occur } /** Function used to do motion compensation with bicubic interpolation */ #define VC1_MSPEL_MC(OP, OPNAME)\ static void OPNAME ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride, int hmode, int vmode, int rnd)\ {\ int i, j;\ \ if (vmode) { /* Horizontal filter to apply */\ int r;\ \ if (hmode) { /* Vertical filter to apply, output to tmp */\ static const int shift_value[] = { 0, 5, 1, 5 };\ int shift = (shift_value[hmode]+shift_value[vmode])>>1;\ int16_t tmp[11*8], *tptr = tmp;\ \ r = (1<<(shift-1)) + rnd-1;\ \ src -= 1;\ for(j = 0; j < 8; j++) {\ for(i = 0; i < 11; i++)\ tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode)+r)>>shift;\ src += stride;\ tptr += 11;\ }\ \ r = 64-rnd;\ tptr = tmp+1;\ for(j = 0; j < 8; j++) {\ for(i = 0; i < 8; i++)\ OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode)+r)>>7);\ dst += stride;\ tptr += 11;\ }\ \ return;\ }\ else { /* No horizontal filter, output 8 lines to dst */\ r = 1-rnd;\ \ for(j = 0; j < 8; j++) {\ for(i = 0; i < 8; i++)\ OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r));\ src += stride;\ dst += stride;\ }\ return;\ }\ }\ \ /* Horizontal mode with no vertical mode */\ for(j = 0; j < 8; j++) {\ for(i = 0; i < 8; i++)\ OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd));\ dst += stride;\ src += stride;\ }\ } #define op_put(a, b) a = av_clip_uint8(b) #define op_avg(a, b) a = (a + av_clip_uint8(b) + 1) >> 1 VC1_MSPEL_MC(op_put, put_) VC1_MSPEL_MC(op_avg, avg_) /* pixel functions - really are entry points to vc1_mspel_mc */ #define PUT_VC1_MSPEL(a, b)\ static void put_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \ put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \ }\ static void avg_vc1_mspel_mc ## a ## b ##_c(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \ avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \ } PUT_VC1_MSPEL(1, 0) PUT_VC1_MSPEL(2, 0) PUT_VC1_MSPEL(3, 0) PUT_VC1_MSPEL(0, 1) PUT_VC1_MSPEL(1, 1) PUT_VC1_MSPEL(2, 1) PUT_VC1_MSPEL(3, 1) PUT_VC1_MSPEL(0, 2) PUT_VC1_MSPEL(1, 2) PUT_VC1_MSPEL(2, 2) PUT_VC1_MSPEL(3, 2) PUT_VC1_MSPEL(0, 3) PUT_VC1_MSPEL(1, 3) PUT_VC1_MSPEL(2, 3) PUT_VC1_MSPEL(3, 3) av_cold void ff_vc1dsp_init(DSPContext* dsp, AVCodecContext *avctx) { dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c; dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c; dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c; dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c; dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_c; dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_c; dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_c; dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_c; dsp->vc1_h_overlap = vc1_h_overlap_c; dsp->vc1_v_overlap = vc1_v_overlap_c; dsp->vc1_v_loop_filter4 = vc1_v_loop_filter4_c; dsp->vc1_h_loop_filter4 = vc1_h_loop_filter4_c; dsp->vc1_v_loop_filter8 = vc1_v_loop_filter8_c; dsp->vc1_h_loop_filter8 = vc1_h_loop_filter8_c; dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_c; dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_c; dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_c; dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_c; dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_c; dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_c; dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_c; dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_c; dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_c; dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_c; dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_c; dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_c; dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_c; dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_c; dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_c; dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_c; dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_c; dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_c; dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_c; dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_c; dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_c; dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_c; dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_c; dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_c; dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_c; dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_c; dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_c; dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_c; dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_c; dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_c; dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_c; dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_c; dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_c; dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_c; }
123linslouis-android-video-cutter
jni/libavcodec/vc1dsp.c
C
asf20
21,270
/* * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_UNARY_H #define AVCODEC_UNARY_H #include "get_bits.h" /** * Get unary code of limited length * @param gb GetBitContext * @param[in] stop The bitstop value (unary code of 1's or 0's) * @param[in] len Maximum length * @return Unary length/index */ static inline int get_unary(GetBitContext *gb, int stop, int len) { int i; for(i = 0; i < len && get_bits1(gb) != stop; i++); return i; } /** * Get unary code terminated by a 0 with a maximum length of 33 * @param gb GetBitContext * @return Unary length/index */ static inline int get_unary_0_33(GetBitContext *gb) { return get_unary(gb, 0, 33); } static inline int get_unary_0_9(GetBitContext *gb) { return get_unary(gb, 0, 9); } #endif /* AVCODEC_UNARY_H */
123linslouis-android-video-cutter
jni/libavcodec/unary.h
C
asf20
1,586
/* * Musepack SV8 decoder * Copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MPEG Audio Layer 1/2 -like codec with frames of 1152 samples * divided into 32 subbands. */ #include "libavutil/lfg.h" #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "mpegaudio.h" #include "mpc.h" #include "mpcdata.h" #include "mpc8data.h" #include "mpc8huff.h" static VLC band_vlc, scfi_vlc[2], dscf_vlc[2], res_vlc[2]; static VLC q1_vlc, q2_vlc[2], q3_vlc[2], quant_vlc[4][2], q9up_vlc; static const int q3_offsets[2] = { MPC8_Q3_OFFSET, MPC8_Q4_OFFSET }; static const int quant_offsets[6] = { MPC8_Q5_OFFSET, MPC8_Q6_OFFSET, MPC8_Q7_OFFSET, MPC8_Q8_OFFSET }; static inline int mpc8_dec_base(GetBitContext *gb, int k, int n) { int len = mpc8_cnk_len[k-1][n-1] - 1; int code = len ? get_bits_long(gb, len) : 0; if (code >= mpc8_cnk_lost[k-1][n-1]) code = ((code << 1) | get_bits1(gb)) - mpc8_cnk_lost[k-1][n-1]; return code; } static inline int mpc8_dec_enum(GetBitContext *gb, int k, int n) { int bits = 0; const uint32_t * C = mpc8_cnk[k-1]; int code = mpc8_dec_base(gb, k, n); do { n--; if (code >= C[n]) { bits |= 1 << n; code -= C[n]; C -= 32; k--; } } while(k > 0); return bits; } static inline int mpc8_get_mod_golomb(GetBitContext *gb, int m) { if(mpc8_cnk_len[0][m] < 1) return 0; return mpc8_dec_base(gb, 1, m+1); } static int mpc8_get_mask(GetBitContext *gb, int size, int t) { int mask = 0; if(t && t != size) mask = mpc8_dec_enum(gb, FFMIN(t, size - t), size); if((t << 1) > size) mask = ~mask; return mask; } static const uint16_t vlc_offsets[13] = { 0, 640, 1184, 1748, 2298, 2426, 2554, 3066, 3578, 4106, 4618, 5196, 5708 }; static av_cold int mpc8_decode_init(AVCodecContext * avctx) { int i; MPCContext *c = avctx->priv_data; GetBitContext gb; static int vlc_initialized = 0; static VLC_TYPE band_table[542][2]; static VLC_TYPE q1_table[520][2]; static VLC_TYPE q9up_table[524][2]; static VLC_TYPE scfi0_table[1 << MPC8_SCFI0_BITS][2]; static VLC_TYPE scfi1_table[1 << MPC8_SCFI1_BITS][2]; static VLC_TYPE dscf0_table[560][2]; static VLC_TYPE dscf1_table[598][2]; static VLC_TYPE q3_0_table[512][2]; static VLC_TYPE q3_1_table[516][2]; static VLC_TYPE codes_table[5708][2]; if(avctx->extradata_size < 2){ av_log(avctx, AV_LOG_ERROR, "Too small extradata size (%i)!\n", avctx->extradata_size); return -1; } memset(c->oldDSCF, 0, sizeof(c->oldDSCF)); av_lfg_init(&c->rnd, 0xDEADBEEF); dsputil_init(&c->dsp, avctx); ff_mpc_init(); init_get_bits(&gb, avctx->extradata, 16); skip_bits(&gb, 3);//sample rate c->maxbands = get_bits(&gb, 5) + 1; skip_bits(&gb, 4);//channels c->MSS = get_bits1(&gb); c->frames = 1 << (get_bits(&gb, 3) * 2); avctx->sample_fmt = SAMPLE_FMT_S16; avctx->channel_layout = (avctx->channels==2) ? CH_LAYOUT_STEREO : CH_LAYOUT_MONO; if(vlc_initialized) return 0; av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n"); band_vlc.table = band_table; band_vlc.table_allocated = 542; init_vlc(&band_vlc, MPC8_BANDS_BITS, MPC8_BANDS_SIZE, mpc8_bands_bits, 1, 1, mpc8_bands_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q1_vlc.table = q1_table; q1_vlc.table_allocated = 520; init_vlc(&q1_vlc, MPC8_Q1_BITS, MPC8_Q1_SIZE, mpc8_q1_bits, 1, 1, mpc8_q1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q9up_vlc.table = q9up_table; q9up_vlc.table_allocated = 524; init_vlc(&q9up_vlc, MPC8_Q9UP_BITS, MPC8_Q9UP_SIZE, mpc8_q9up_bits, 1, 1, mpc8_q9up_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); scfi_vlc[0].table = scfi0_table; scfi_vlc[0].table_allocated = 1 << MPC8_SCFI0_BITS; init_vlc(&scfi_vlc[0], MPC8_SCFI0_BITS, MPC8_SCFI0_SIZE, mpc8_scfi0_bits, 1, 1, mpc8_scfi0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); scfi_vlc[1].table = scfi1_table; scfi_vlc[1].table_allocated = 1 << MPC8_SCFI1_BITS; init_vlc(&scfi_vlc[1], MPC8_SCFI1_BITS, MPC8_SCFI1_SIZE, mpc8_scfi1_bits, 1, 1, mpc8_scfi1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); dscf_vlc[0].table = dscf0_table; dscf_vlc[0].table_allocated = 560; init_vlc(&dscf_vlc[0], MPC8_DSCF0_BITS, MPC8_DSCF0_SIZE, mpc8_dscf0_bits, 1, 1, mpc8_dscf0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); dscf_vlc[1].table = dscf1_table; dscf_vlc[1].table_allocated = 598; init_vlc(&dscf_vlc[1], MPC8_DSCF1_BITS, MPC8_DSCF1_SIZE, mpc8_dscf1_bits, 1, 1, mpc8_dscf1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q3_vlc[0].table = q3_0_table; q3_vlc[0].table_allocated = 512; init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE, mpc8_q3_bits, 1, 1, mpc8_q3_codes, 1, 1, mpc8_q3_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); q3_vlc[1].table = q3_1_table; q3_vlc[1].table_allocated = 516; init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE, mpc8_q4_bits, 1, 1, mpc8_q4_codes, 1, 1, mpc8_q4_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); for(i = 0; i < 2; i++){ res_vlc[i].table = &codes_table[vlc_offsets[0+i]]; res_vlc[i].table_allocated = vlc_offsets[1+i] - vlc_offsets[0+i]; init_vlc(&res_vlc[i], MPC8_RES_BITS, MPC8_RES_SIZE, &mpc8_res_bits[i], 1, 1, &mpc8_res_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); q2_vlc[i].table = &codes_table[vlc_offsets[2+i]]; q2_vlc[i].table_allocated = vlc_offsets[3+i] - vlc_offsets[2+i]; init_vlc(&q2_vlc[i], MPC8_Q2_BITS, MPC8_Q2_SIZE, &mpc8_q2_bits[i], 1, 1, &mpc8_q2_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[0][i].table = &codes_table[vlc_offsets[4+i]]; quant_vlc[0][i].table_allocated = vlc_offsets[5+i] - vlc_offsets[4+i]; init_vlc(&quant_vlc[0][i], MPC8_Q5_BITS, MPC8_Q5_SIZE, &mpc8_q5_bits[i], 1, 1, &mpc8_q5_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[1][i].table = &codes_table[vlc_offsets[6+i]]; quant_vlc[1][i].table_allocated = vlc_offsets[7+i] - vlc_offsets[6+i]; init_vlc(&quant_vlc[1][i], MPC8_Q6_BITS, MPC8_Q6_SIZE, &mpc8_q6_bits[i], 1, 1, &mpc8_q6_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[2][i].table = &codes_table[vlc_offsets[8+i]]; quant_vlc[2][i].table_allocated = vlc_offsets[9+i] - vlc_offsets[8+i]; init_vlc(&quant_vlc[2][i], MPC8_Q7_BITS, MPC8_Q7_SIZE, &mpc8_q7_bits[i], 1, 1, &mpc8_q7_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[3][i].table = &codes_table[vlc_offsets[10+i]]; quant_vlc[3][i].table_allocated = vlc_offsets[11+i] - vlc_offsets[10+i]; init_vlc(&quant_vlc[3][i], MPC8_Q8_BITS, MPC8_Q8_SIZE, &mpc8_q8_bits[i], 1, 1, &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); } vlc_initialized = 1; return 0; } static int mpc8_decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MPCContext *c = avctx->priv_data; GetBitContext gb2, *gb = &gb2; int i, j, k, ch, cnt, res, t; Band *bands = c->bands; int off; int maxband, keyframe; int last[2]; keyframe = c->cur_frame == 0; if(keyframe){ memset(c->Q, 0, sizeof(c->Q)); c->last_bits_used = 0; } init_get_bits(gb, buf, buf_size * 8); skip_bits(gb, c->last_bits_used & 7); if(keyframe) maxband = mpc8_get_mod_golomb(gb, c->maxbands + 1); else{ maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2); if(maxband > 32) maxband -= 33; } c->last_max_band = maxband; /* read subband indexes */ if(maxband){ last[0] = last[1] = 0; for(i = maxband - 1; i >= 0; i--){ for(ch = 0; ch < 2; ch++){ last[ch] = get_vlc2(gb, res_vlc[last[ch] > 2].table, MPC8_RES_BITS, 2) + last[ch]; if(last[ch] > 15) last[ch] -= 17; bands[i].res[ch] = last[ch]; } } if(c->MSS){ int mask; cnt = 0; for(i = 0; i < maxband; i++) if(bands[i].res[0] || bands[i].res[1]) cnt++; t = mpc8_get_mod_golomb(gb, cnt); mask = mpc8_get_mask(gb, cnt, t); for(i = maxband - 1; i >= 0; i--) if(bands[i].res[0] || bands[i].res[1]){ bands[i].msf = mask & 1; mask >>= 1; } } } for(i = maxband; i < c->maxbands; i++) bands[i].res[0] = bands[i].res[1] = 0; if(keyframe){ for(i = 0; i < 32; i++) c->oldDSCF[0][i] = c->oldDSCF[1][i] = 1; } for(i = 0; i < maxband; i++){ if(bands[i].res[0] || bands[i].res[1]){ cnt = !!bands[i].res[0] + !!bands[i].res[1] - 1; if(cnt >= 0){ t = get_vlc2(gb, scfi_vlc[cnt].table, scfi_vlc[cnt].bits, 1); if(bands[i].res[0]) bands[i].scfi[0] = t >> (2 * cnt); if(bands[i].res[1]) bands[i].scfi[1] = t & 3; } } } for(i = 0; i < maxband; i++){ for(ch = 0; ch < 2; ch++){ if(!bands[i].res[ch]) continue; if(c->oldDSCF[ch][i]){ bands[i].scf_idx[ch][0] = get_bits(gb, 7) - 6; c->oldDSCF[ch][i] = 0; }else{ t = get_vlc2(gb, dscf_vlc[1].table, MPC8_DSCF1_BITS, 2); if(t == 64) t += get_bits(gb, 6); bands[i].scf_idx[ch][0] = ((bands[i].scf_idx[ch][2] + t - 25) & 0x7F) - 6; } for(j = 0; j < 2; j++){ if((bands[i].scfi[ch] << j) & 2) bands[i].scf_idx[ch][j + 1] = bands[i].scf_idx[ch][j]; else{ t = get_vlc2(gb, dscf_vlc[0].table, MPC8_DSCF0_BITS, 2); if(t == 31) t = 64 + get_bits(gb, 6); bands[i].scf_idx[ch][j + 1] = ((bands[i].scf_idx[ch][j] + t - 25) & 0x7F) - 6; } } } } for(i = 0, off = 0; i < maxband; i++, off += SAMPLES_PER_BAND){ for(ch = 0; ch < 2; ch++){ res = bands[i].res[ch]; switch(res){ case -1: for(j = 0; j < SAMPLES_PER_BAND; j++) c->Q[ch][off + j] = (av_lfg_get(&c->rnd) & 0x3FC) - 510; break; case 0: break; case 1: for(j = 0; j < SAMPLES_PER_BAND; j += SAMPLES_PER_BAND / 2){ cnt = get_vlc2(gb, q1_vlc.table, MPC8_Q1_BITS, 2); t = mpc8_get_mask(gb, 18, cnt); for(k = 0; k < SAMPLES_PER_BAND / 2; k++, t <<= 1) c->Q[ch][off + j + k] = (t & 0x20000) ? (get_bits1(gb) << 1) - 1 : 0; } break; case 2: cnt = 6;//2*mpc8_thres[res] for(j = 0; j < SAMPLES_PER_BAND; j += 3){ t = get_vlc2(gb, q2_vlc[cnt > 3].table, MPC8_Q2_BITS, 2); c->Q[ch][off + j + 0] = mpc8_idx50[t]; c->Q[ch][off + j + 1] = mpc8_idx51[t]; c->Q[ch][off + j + 2] = mpc8_idx52[t]; cnt = (cnt >> 1) + mpc8_huffq2[t]; } break; case 3: case 4: for(j = 0; j < SAMPLES_PER_BAND; j += 2){ t = get_vlc2(gb, q3_vlc[res - 3].table, MPC8_Q3_BITS, 2) + q3_offsets[res - 3]; c->Q[ch][off + j + 1] = t >> 4; c->Q[ch][off + j + 0] = (t & 8) ? (t & 0xF) - 16 : (t & 0xF); } break; case 5: case 6: case 7: case 8: cnt = 2 * mpc8_thres[res]; for(j = 0; j < SAMPLES_PER_BAND; j++){ t = get_vlc2(gb, quant_vlc[res - 5][cnt > mpc8_thres[res]].table, quant_vlc[res - 5][cnt > mpc8_thres[res]].bits, 2) + quant_offsets[res - 5]; c->Q[ch][off + j] = t; cnt = (cnt >> 1) + FFABS(c->Q[ch][off + j]); } break; default: for(j = 0; j < SAMPLES_PER_BAND; j++){ c->Q[ch][off + j] = get_vlc2(gb, q9up_vlc.table, MPC8_Q9UP_BITS, 2); if(res != 9){ c->Q[ch][off + j] <<= res - 9; c->Q[ch][off + j] |= get_bits(gb, res - 9); } c->Q[ch][off + j] -= (1 << (res - 2)) - 1; } } } } ff_mpc_dequantize_and_synth(c, maxband, data); c->cur_frame++; c->last_bits_used = get_bits_count(gb); if(c->cur_frame >= c->frames) c->cur_frame = 0; *data_size = MPC_FRAME_SIZE * 4; return c->cur_frame ? c->last_bits_used >> 3 : buf_size; } AVCodec mpc8_decoder = { "mpc8", AVMEDIA_TYPE_AUDIO, CODEC_ID_MUSEPACK8, sizeof(MPCContext), mpc8_decode_init, NULL, NULL, mpc8_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"), };
123linslouis-android-video-cutter
jni/libavcodec/mpc8.c
C
asf20
14,538
/* * AMR Audio decoder stub * Copyright (c) 2003 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" static void amr_decode_fix_avctx(AVCodecContext *avctx) { const int is_amr_wb = 1 + (avctx->codec_id == CODEC_ID_AMR_WB); if (!avctx->sample_rate) avctx->sample_rate = 8000 * is_amr_wb; if (!avctx->channels) avctx->channels = 1; avctx->frame_size = 160 * is_amr_wb; avctx->sample_fmt = SAMPLE_FMT_S16; } #if CONFIG_LIBOPENCORE_AMRNB #include <opencore-amrnb/interf_dec.h> #include <opencore-amrnb/interf_enc.h> static const char nb_bitrate_unsupported[] = "bitrate not supported: use one of 4.75k, 5.15k, 5.9k, 6.7k, 7.4k, 7.95k, 10.2k or 12.2k\n"; /* Common code for fixed and float version*/ typedef struct AMR_bitrates { int rate; enum Mode mode; } AMR_bitrates; /* Match desired bitrate */ static int getBitrateMode(int bitrate) { /* make the correspondance between bitrate and mode */ AMR_bitrates rates[] = { { 4750, MR475}, { 5150, MR515}, { 5900, MR59}, { 6700, MR67}, { 7400, MR74}, { 7950, MR795}, {10200, MR102}, {12200, MR122}, }; int i; for (i = 0; i < 8; i++) if (rates[i].rate == bitrate) return rates[i].mode; /* no bitrate matching, return an error */ return -1; } typedef struct AMRContext { int frameCount; void *decState; int *enstate; int enc_bitrate; } AMRContext; static av_cold int amr_nb_decode_init(AVCodecContext *avctx) { AMRContext *s = avctx->priv_data; s->frameCount = 0; s->decState = Decoder_Interface_init(); if (!s->decState) { av_log(avctx, AV_LOG_ERROR, "Decoder_Interface_init error\r\n"); return -1; } amr_decode_fix_avctx(avctx); if (avctx->channels > 1) { av_log(avctx, AV_LOG_ERROR, "amr_nb: multichannel decoding not supported\n"); return -1; } return 0; } static av_cold int amr_nb_decode_close(AVCodecContext *avctx) { AMRContext *s = avctx->priv_data; Decoder_Interface_exit(s->decState); return 0; } static int amr_nb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AMRContext *s = avctx->priv_data; const uint8_t *amrData = buf; static const uint8_t block_size[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 }; enum Mode dec_mode; int packet_size; /* av_log(NULL, AV_LOG_DEBUG, "amr_decode_frame buf=%p buf_size=%d frameCount=%d!!\n", buf, buf_size, s->frameCount); */ dec_mode = (buf[0] >> 3) & 0x000F; packet_size = block_size[dec_mode] + 1; if (packet_size > buf_size) { av_log(avctx, AV_LOG_ERROR, "amr frame too short (%u, should be %u)\n", buf_size, packet_size); return -1; } s->frameCount++; /* av_log(NULL, AV_LOG_DEBUG, "packet_size=%d amrData= 0x%X %X %X %X\n", packet_size, amrData[0], amrData[1], amrData[2], amrData[3]); */ /* call decoder */ Decoder_Interface_Decode(s->decState, amrData, data, 0); *data_size = 160 * 2; return packet_size; } AVCodec libopencore_amrnb_decoder = { "libopencore_amrnb", AVMEDIA_TYPE_AUDIO, CODEC_ID_AMR_NB, sizeof(AMRContext), amr_nb_decode_init, NULL, amr_nb_decode_close, amr_nb_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"), }; static av_cold int amr_nb_encode_init(AVCodecContext *avctx) { AMRContext *s = avctx->priv_data; s->frameCount = 0; if (avctx->sample_rate != 8000) { av_log(avctx, AV_LOG_ERROR, "Only 8000Hz sample rate supported\n"); return -1; } if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, "Only mono supported\n"); return -1; } avctx->frame_size = 160; avctx->coded_frame = avcodec_alloc_frame(); s->enstate=Encoder_Interface_init(0); if (!s->enstate) { av_log(avctx, AV_LOG_ERROR, "Encoder_Interface_init error\n"); return -1; } if ((s->enc_bitrate = getBitrateMode(avctx->bit_rate)) < 0) { av_log(avctx, AV_LOG_ERROR, nb_bitrate_unsupported); return -1; } return 0; } static av_cold int amr_nb_encode_close(AVCodecContext *avctx) { AMRContext *s = avctx->priv_data; Encoder_Interface_exit(s->enstate); av_freep(&avctx->coded_frame); return 0; } static int amr_nb_encode_frame(AVCodecContext *avctx, unsigned char *frame/*out*/, int buf_size, void *data/*in*/) { AMRContext *s = avctx->priv_data; int written; if ((s->enc_bitrate = getBitrateMode(avctx->bit_rate)) < 0) { av_log(avctx, AV_LOG_ERROR, nb_bitrate_unsupported); return -1; } written = Encoder_Interface_Encode(s->enstate, s->enc_bitrate, data, frame, 0); /* av_log(NULL, AV_LOG_DEBUG, "amr_nb_encode_frame encoded %u bytes, bitrate %u, first byte was %#02x\n", written, s->enc_bitrate, frame[0] ); */ return written; } AVCodec libopencore_amrnb_encoder = { "libopencore_amrnb", AVMEDIA_TYPE_AUDIO, CODEC_ID_AMR_NB, sizeof(AMRContext), amr_nb_encode_init, amr_nb_encode_frame, amr_nb_encode_close, NULL, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"), }; #endif /* -----------AMR wideband ------------*/ #if CONFIG_LIBOPENCORE_AMRWB #ifdef _TYPEDEF_H //To avoid duplicate typedefs from typedef in amr-nb #define typedef_h #endif #include <opencore-amrwb/dec_if.h> #include <opencore-amrwb/if_rom.h> static const char wb_bitrate_unsupported[] = "bitrate not supported: use one of 6.6k, 8.85k, 12.65k, 14.25k, 15.85k, 18.25k, 19.85k, 23.05k, or 23.85k\n"; /* Common code for fixed and float version*/ typedef struct AMRWB_bitrates { int rate; int mode; } AMRWB_bitrates; typedef struct AMRWBContext { int frameCount; void *state; int mode; Word16 allow_dtx; } AMRWBContext; static av_cold int amr_wb_decode_init(AVCodecContext *avctx) { AMRWBContext *s = avctx->priv_data; s->frameCount = 0; s->state = D_IF_init(); amr_decode_fix_avctx(avctx); if (avctx->channels > 1) { av_log(avctx, AV_LOG_ERROR, "amr_wb: multichannel decoding not supported\n"); return -1; } return 0; } static int amr_wb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AMRWBContext *s = avctx->priv_data; const uint8_t *amrData = buf; int mode; int packet_size; static const uint8_t block_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1}; if (!buf_size) /* nothing to do */ return 0; mode = (amrData[0] >> 3) & 0x000F; packet_size = block_size[mode]; if (packet_size > buf_size) { av_log(avctx, AV_LOG_ERROR, "amr frame too short (%u, should be %u)\n", buf_size, packet_size + 1); return -1; } s->frameCount++; D_IF_decode(s->state, amrData, data, _good_frame); *data_size = 320 * 2; return packet_size; } static int amr_wb_decode_close(AVCodecContext *avctx) { AMRWBContext *s = avctx->priv_data; D_IF_exit(s->state); return 0; } AVCodec libopencore_amrwb_decoder = { "libopencore_amrwb", AVMEDIA_TYPE_AUDIO, CODEC_ID_AMR_WB, sizeof(AMRWBContext), amr_wb_decode_init, NULL, amr_wb_decode_close, amr_wb_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Wide-Band"), }; #endif /* CONFIG_LIBOPENCORE_AMRWB */
123linslouis-android-video-cutter
jni/libavcodec/libopencore-amr.c
C
asf20
8,933
/* * MPEG-2 HW decode acceleration through VA API * * Copyright (C) 2008-2009 Splitted-Desktop Systems * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "vaapi_internal.h" #include "dsputil.h" /** Reconstruct bitstream f_code */ static inline int mpeg2_get_f_code(MpegEncContext *s) { return ((s->mpeg_f_code[0][0] << 12) | (s->mpeg_f_code[0][1] << 8) | (s->mpeg_f_code[1][0] << 4) | s->mpeg_f_code[1][1]); } /** Determine frame start: first field for field picture or frame picture */ static inline int mpeg2_get_is_frame_start(MpegEncContext *s) { return s->first_field || s->picture_structure == PICT_FRAME; } static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) { struct MpegEncContext * const s = avctx->priv_data; struct vaapi_context * const vactx = avctx->hwaccel_context; VAPictureParameterBufferMPEG2 *pic_param; VAIQMatrixBufferMPEG2 *iq_matrix; int i; dprintf(avctx, "vaapi_mpeg2_start_frame()\n"); vactx->slice_param_size = sizeof(VASliceParameterBufferMPEG2); /* Fill in VAPictureParameterBufferMPEG2 */ pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferMPEG2)); if (!pic_param) return -1; pic_param->horizontal_size = s->width; pic_param->vertical_size = s->height; pic_param->forward_reference_picture = VA_INVALID_ID; pic_param->backward_reference_picture = VA_INVALID_ID; pic_param->picture_coding_type = s->pict_type; pic_param->f_code = mpeg2_get_f_code(s); pic_param->picture_coding_extension.value = 0; /* reset all bits */ pic_param->picture_coding_extension.bits.intra_dc_precision = s->intra_dc_precision; pic_param->picture_coding_extension.bits.picture_structure = s->picture_structure; pic_param->picture_coding_extension.bits.top_field_first = s->top_field_first; pic_param->picture_coding_extension.bits.frame_pred_frame_dct = s->frame_pred_frame_dct; pic_param->picture_coding_extension.bits.concealment_motion_vectors = s->concealment_motion_vectors; pic_param->picture_coding_extension.bits.q_scale_type = s->q_scale_type; pic_param->picture_coding_extension.bits.intra_vlc_format = s->intra_vlc_format; pic_param->picture_coding_extension.bits.alternate_scan = s->alternate_scan; pic_param->picture_coding_extension.bits.repeat_first_field = s->repeat_first_field; pic_param->picture_coding_extension.bits.progressive_frame = s->progressive_frame; pic_param->picture_coding_extension.bits.is_first_field = mpeg2_get_is_frame_start(s); switch (s->pict_type) { case FF_B_TYPE: pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture); // fall-through case FF_P_TYPE: pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture); break; } /* Fill in VAIQMatrixBufferMPEG2 */ iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferMPEG2)); if (!iq_matrix) return -1; iq_matrix->load_intra_quantiser_matrix = 1; iq_matrix->load_non_intra_quantiser_matrix = 1; iq_matrix->load_chroma_intra_quantiser_matrix = 1; iq_matrix->load_chroma_non_intra_quantiser_matrix = 1; for (i = 0; i < 64; i++) { int n = s->dsp.idct_permutation[ff_zigzag_direct[i]]; iq_matrix->intra_quantiser_matrix[i] = s->intra_matrix[n]; iq_matrix->non_intra_quantiser_matrix[i] = s->inter_matrix[n]; iq_matrix->chroma_intra_quantiser_matrix[i] = s->chroma_intra_matrix[n]; iq_matrix->chroma_non_intra_quantiser_matrix[i] = s->chroma_inter_matrix[n]; } return 0; } static int vaapi_mpeg2_end_frame(AVCodecContext *avctx) { return ff_vaapi_common_end_frame(avctx->priv_data); } static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { MpegEncContext * const s = avctx->priv_data; VASliceParameterBufferMPEG2 *slice_param; GetBitContext gb; uint32_t start_code, quantiser_scale_code, intra_slice_flag, macroblock_offset; dprintf(avctx, "vaapi_mpeg2_decode_slice(): buffer %p, size %d\n", buffer, size); /* Determine macroblock_offset */ init_get_bits(&gb, buffer, 8 * size); start_code = get_bits(&gb, 32); assert((start_code & 0xffffff00) == 0x00000100); quantiser_scale_code = get_bits(&gb, 5); intra_slice_flag = get_bits1(&gb); if (intra_slice_flag) { skip_bits(&gb, 8); while (get_bits1(&gb) != 0) skip_bits(&gb, 8); } macroblock_offset = get_bits_count(&gb); /* Fill in VASliceParameterBufferMPEG2 */ slice_param = (VASliceParameterBufferMPEG2 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size); if (!slice_param) return -1; slice_param->macroblock_offset = macroblock_offset; slice_param->slice_horizontal_position = s->mb_x; slice_param->slice_vertical_position = s->mb_y; slice_param->quantiser_scale_code = quantiser_scale_code; slice_param->intra_slice_flag = intra_slice_flag; return 0; } AVHWAccel mpeg2_vaapi_hwaccel = { .name = "mpeg2_vaapi", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_MPEG2VIDEO, .pix_fmt = PIX_FMT_VAAPI_VLD, .capabilities = 0, .start_frame = vaapi_mpeg2_start_frame, .end_frame = vaapi_mpeg2_end_frame, .decode_slice = vaapi_mpeg2_decode_slice, .priv_data_size = 0, };
123linslouis-android-video-cutter
jni/libavcodec/vaapi_mpeg2.c
C
asf20
6,558
/* * AAC encoder psychoacoustic model * Copyright (C) 2008 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAC encoder psychoacoustic model */ #include "avcodec.h" #include "aactab.h" #include "psymodel.h" /*********************************** * TODOs: * thresholds linearization after their modifications for attaining given bitrate * try other bitrate controlling mechanism (maybe use ratecontrol.c?) * control quality for quality-based output **********************************/ /** * constants for 3GPP AAC psychoacoustic model * @{ */ #define PSY_3GPP_SPREAD_LOW 1.5f // spreading factor for ascending threshold spreading (15 dB/Bark) #define PSY_3GPP_SPREAD_HI 3.0f // spreading factor for descending threshold spreading (30 dB/Bark) #define PSY_3GPP_RPEMIN 0.01f #define PSY_3GPP_RPELEV 2.0f /** * @} */ /** * information for single band used by 3GPP TS26.403-inspired psychoacoustic model */ typedef struct Psy3gppBand{ float energy; ///< band energy float ffac; ///< form factor float thr; ///< energy threshold float min_snr; ///< minimal SNR float thr_quiet; ///< threshold in quiet }Psy3gppBand; /** * single/pair channel context for psychoacoustic model */ typedef struct Psy3gppChannel{ Psy3gppBand band[128]; ///< bands information Psy3gppBand prev_band[128]; ///< bands information from the previous frame float win_energy; ///< sliding average of channel energy float iir_state[2]; ///< hi-pass IIR filter state uint8_t next_grouping; ///< stored grouping scheme for the next frame (in case of 8 short window sequence) enum WindowSequence next_window_seq; ///< window sequence to be used in the next frame }Psy3gppChannel; /** * psychoacoustic model frame type-dependent coefficients */ typedef struct Psy3gppCoeffs{ float ath [64]; ///< absolute threshold of hearing per bands float barks [64]; ///< Bark value for each spectral band in long frame float spread_low[64]; ///< spreading factor for low-to-high threshold spreading in long frame float spread_hi [64]; ///< spreading factor for high-to-low threshold spreading in long frame }Psy3gppCoeffs; /** * 3GPP TS26.403-inspired psychoacoustic model specific data */ typedef struct Psy3gppContext{ Psy3gppCoeffs psy_coef[2]; Psy3gppChannel *ch; }Psy3gppContext; /** * Calculate Bark value for given line. */ static av_cold float calc_bark(float f) { return 13.3f * atanf(0.00076f * f) + 3.5f * atanf((f / 7500.0f) * (f / 7500.0f)); } #define ATH_ADD 4 /** * Calculate ATH value for given frequency. * Borrowed from Lame. */ static av_cold float ath(float f, float add) { f /= 1000.0f; return 3.64 * pow(f, -0.8) - 6.8 * exp(-0.6 * (f - 3.4) * (f - 3.4)) + 6.0 * exp(-0.15 * (f - 8.7) * (f - 8.7)) + (0.6 + 0.04 * add) * 0.001 * f * f * f * f; } static av_cold int psy_3gpp_init(FFPsyContext *ctx) { Psy3gppContext *pctx; float barks[1024]; int i, j, g, start; float prev, minscale, minath; ctx->model_priv_data = av_mallocz(sizeof(Psy3gppContext)); pctx = (Psy3gppContext*) ctx->model_priv_data; for (i = 0; i < 1024; i++) barks[i] = calc_bark(i * ctx->avctx->sample_rate / 2048.0); minath = ath(3410, ATH_ADD); for (j = 0; j < 2; j++) { Psy3gppCoeffs *coeffs = &pctx->psy_coef[j]; i = 0; prev = 0.0; for (g = 0; g < ctx->num_bands[j]; g++) { i += ctx->bands[j][g]; coeffs->barks[g] = (barks[i - 1] + prev) / 2.0; prev = barks[i - 1]; } for (g = 0; g < ctx->num_bands[j] - 1; g++) { coeffs->spread_low[g] = pow(10.0, -(coeffs->barks[g+1] - coeffs->barks[g]) * PSY_3GPP_SPREAD_LOW); coeffs->spread_hi [g] = pow(10.0, -(coeffs->barks[g+1] - coeffs->barks[g]) * PSY_3GPP_SPREAD_HI); } start = 0; for (g = 0; g < ctx->num_bands[j]; g++) { minscale = ath(ctx->avctx->sample_rate * start / 1024.0, ATH_ADD); for (i = 1; i < ctx->bands[j][g]; i++) minscale = FFMIN(minscale, ath(ctx->avctx->sample_rate * (start + i) / 1024.0 / 2.0, ATH_ADD)); coeffs->ath[g] = minscale - minath; start += ctx->bands[j][g]; } } pctx->ch = av_mallocz(sizeof(Psy3gppChannel) * ctx->avctx->channels); return 0; } /** * IIR filter used in block switching decision */ static float iir_filter(int in, float state[2]) { float ret; ret = 0.7548f * (in - state[0]) + 0.5095f * state[1]; state[0] = in; state[1] = ret; return ret; } /** * window grouping information stored as bits (0 - new group, 1 - group continues) */ static const uint8_t window_grouping[9] = { 0xB6, 0x6C, 0xD8, 0xB2, 0x66, 0xC6, 0x96, 0x36, 0x36 }; /** * Tell encoder which window types to use. * @see 3GPP TS26.403 5.4.1 "Blockswitching" */ static FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type) { int i, j; int br = ctx->avctx->bit_rate / ctx->avctx->channels; int attack_ratio = br <= 16000 ? 18 : 10; Psy3gppContext *pctx = (Psy3gppContext*) ctx->model_priv_data; Psy3gppChannel *pch = &pctx->ch[channel]; uint8_t grouping = 0; FFPsyWindowInfo wi; memset(&wi, 0, sizeof(wi)); if (la) { float s[8], v; int switch_to_eight = 0; float sum = 0.0, sum2 = 0.0; int attack_n = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 128; j++) { v = iir_filter(audio[(i*128+j)*ctx->avctx->channels], pch->iir_state); sum += v*v; } s[i] = sum; sum2 += sum; } for (i = 0; i < 8; i++) { if (s[i] > pch->win_energy * attack_ratio) { attack_n = i + 1; switch_to_eight = 1; break; } } pch->win_energy = pch->win_energy*7/8 + sum2/64; wi.window_type[1] = prev_type; switch (prev_type) { case ONLY_LONG_SEQUENCE: wi.window_type[0] = switch_to_eight ? LONG_START_SEQUENCE : ONLY_LONG_SEQUENCE; break; case LONG_START_SEQUENCE: wi.window_type[0] = EIGHT_SHORT_SEQUENCE; grouping = pch->next_grouping; break; case LONG_STOP_SEQUENCE: wi.window_type[0] = ONLY_LONG_SEQUENCE; break; case EIGHT_SHORT_SEQUENCE: wi.window_type[0] = switch_to_eight ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE; grouping = switch_to_eight ? pch->next_grouping : 0; break; } pch->next_grouping = window_grouping[attack_n]; } else { for (i = 0; i < 3; i++) wi.window_type[i] = prev_type; grouping = (prev_type == EIGHT_SHORT_SEQUENCE) ? window_grouping[0] : 0; } wi.window_shape = 1; if (wi.window_type[0] != EIGHT_SHORT_SEQUENCE) { wi.num_windows = 1; wi.grouping[0] = 1; } else { int lastgrp = 0; wi.num_windows = 8; for (i = 0; i < 8; i++) { if (!((grouping >> i) & 1)) lastgrp = i; wi.grouping[lastgrp]++; } } return wi; } /** * Calculate band thresholds as suggested in 3GPP TS26.403 */ static void psy_3gpp_analyze(FFPsyContext *ctx, int channel, const float *coefs, FFPsyWindowInfo *wi) { Psy3gppContext *pctx = (Psy3gppContext*) ctx->model_priv_data; Psy3gppChannel *pch = &pctx->ch[channel]; int start = 0; int i, w, g; const int num_bands = ctx->num_bands[wi->num_windows == 8]; const uint8_t* band_sizes = ctx->bands[wi->num_windows == 8]; Psy3gppCoeffs *coeffs = &pctx->psy_coef[wi->num_windows == 8]; //calculate energies, initial thresholds and related values - 5.4.2 "Threshold Calculation" for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { Psy3gppBand *band = &pch->band[w+g]; band->energy = 0.0f; for (i = 0; i < band_sizes[g]; i++) band->energy += coefs[start+i] * coefs[start+i]; band->energy *= 1.0f / (512*512); band->thr = band->energy * 0.001258925f; start += band_sizes[g]; ctx->psy_bands[channel*PSY_MAX_BANDS+w+g].energy = band->energy; } } //modify thresholds - spread, threshold in quiet - 5.4.3 "Spreaded Energy Calculation" for (w = 0; w < wi->num_windows*16; w += 16) { Psy3gppBand *band = &pch->band[w]; for (g = 1; g < num_bands; g++) band[g].thr = FFMAX(band[g].thr, band[g-1].thr * coeffs->spread_low[g-1]); for (g = num_bands - 2; g >= 0; g--) band[g].thr = FFMAX(band[g].thr, band[g+1].thr * coeffs->spread_hi [g]); for (g = 0; g < num_bands; g++) { band[g].thr_quiet = FFMAX(band[g].thr, coeffs->ath[g]); if (wi->num_windows != 8 && wi->window_type[1] != EIGHT_SHORT_SEQUENCE) band[g].thr_quiet = FFMAX(PSY_3GPP_RPEMIN*band[g].thr_quiet, FFMIN(band[g].thr_quiet, PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet)); band[g].thr = FFMAX(band[g].thr, band[g].thr_quiet * 0.25); ctx->psy_bands[channel*PSY_MAX_BANDS+w+g].threshold = band[g].thr; } } memcpy(pch->prev_band, pch->band, sizeof(pch->band)); } static av_cold void psy_3gpp_end(FFPsyContext *apc) { Psy3gppContext *pctx = (Psy3gppContext*) apc->model_priv_data; av_freep(&pctx->ch); av_freep(&apc->model_priv_data); } const FFPsyModel ff_aac_psy_model = { .name = "3GPP TS 26.403-inspired model", .init = psy_3gpp_init, .window = psy_3gpp_window, .analyze = psy_3gpp_analyze, .end = psy_3gpp_end, };
123linslouis-android-video-cutter
jni/libavcodec/aacpsy.c
C
asf20
10,978
/** * @file * VP6 compatible video decoder * * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * The VP6F decoder accepts an optional 1 byte extradata. It is composed of: * - upper 4bits: difference between encoded width and visible width * - lower 4bits: difference between encoded height and visible height * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include "avcodec.h" #include "dsputil.h" #include "get_bits.h" #include "huffman.h" #include "vp56.h" #include "vp56data.h" #include "vp6data.h" static void vp6_parse_coeff(VP56Context *s); static void vp6_parse_coeff_huffman(VP56Context *s); static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size, int *golden_frame) { VP56RangeCoder *c = &s->c; int parse_filter_info = 0; int coeff_offset = 0; int vrt_shift = 0; int sub_version; int rows, cols; int res = 1; int separated_coeff = buf[0] & 1; s->framep[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80); vp56_init_dequant(s, (buf[0] >> 1) & 0x3F); if (s->framep[VP56_FRAME_CURRENT]->key_frame) { sub_version = buf[1] >> 3; if (sub_version > 8) return 0; s->filter_header = buf[1] & 0x06; if (buf[1] & 1) { av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n"); return 0; } if (separated_coeff || !s->filter_header) { coeff_offset = AV_RB16(buf+2) - 2; buf += 2; buf_size -= 2; } rows = buf[2]; /* number of stored macroblock rows */ cols = buf[3]; /* number of stored macroblock cols */ /* buf[4] is number of displayed macroblock rows */ /* buf[5] is number of displayed macroblock cols */ if (!s->macroblocks || /* first frame */ 16*cols != s->avctx->coded_width || 16*rows != s->avctx->coded_height) { avcodec_set_dimensions(s->avctx, 16*cols, 16*rows); if (s->avctx->extradata_size == 1) { s->avctx->width -= s->avctx->extradata[0] >> 4; s->avctx->height -= s->avctx->extradata[0] & 0x0F; } res = 2; } vp56_init_range_decoder(c, buf+6, buf_size-6); vp56_rac_gets(c, 2); parse_filter_info = s->filter_header; if (sub_version < 8) vrt_shift = 5; s->sub_version = sub_version; } else { if (!s->sub_version) return 0; if (separated_coeff || !s->filter_header) { coeff_offset = AV_RB16(buf+1) - 2; buf += 2; buf_size -= 2; } vp56_init_range_decoder(c, buf+1, buf_size-1); *golden_frame = vp56_rac_get(c); if (s->filter_header) { s->deblock_filtering = vp56_rac_get(c); if (s->deblock_filtering) vp56_rac_get(c); if (s->sub_version > 7) parse_filter_info = vp56_rac_get(c); } } if (parse_filter_info) { if (vp56_rac_get(c)) { s->filter_mode = 2; s->sample_variance_threshold = vp56_rac_gets(c, 5) << vrt_shift; s->max_vector_length = 2 << vp56_rac_gets(c, 3); } else if (vp56_rac_get(c)) { s->filter_mode = 1; } else { s->filter_mode = 0; } if (s->sub_version > 7) s->filter_selection = vp56_rac_gets(c, 4); else s->filter_selection = 16; } s->use_huffman = vp56_rac_get(c); s->parse_coeff = vp6_parse_coeff; if (coeff_offset) { buf += coeff_offset; buf_size -= coeff_offset; if (buf_size < 0) return 0; if (s->use_huffman) { s->parse_coeff = vp6_parse_coeff_huffman; init_get_bits(&s->gb, buf, buf_size<<3); } else { vp56_init_range_decoder(&s->cc, buf, buf_size); s->ccp = &s->cc; } } else { s->ccp = &s->c; } return res; } static void vp6_coeff_order_table_init(VP56Context *s) { int i, pos, idx = 1; s->modelp->coeff_index_to_pos[0] = 0; for (i=0; i<16; i++) for (pos=1; pos<64; pos++) if (s->modelp->coeff_reorder[pos] == i) s->modelp->coeff_index_to_pos[idx++] = pos; } static void vp6_default_models_init(VP56Context *s) { VP56Model *model = s->modelp; model->vector_dct[0] = 0xA2; model->vector_dct[1] = 0xA4; model->vector_sig[0] = 0x80; model->vector_sig[1] = 0x80; memcpy(model->mb_types_stats, vp56_def_mb_types_stats, sizeof(model->mb_types_stats)); memcpy(model->vector_fdv, vp6_def_fdv_vector_model, sizeof(model->vector_fdv)); memcpy(model->vector_pdv, vp6_def_pdv_vector_model, sizeof(model->vector_pdv)); memcpy(model->coeff_runv, vp6_def_runv_coeff_model, sizeof(model->coeff_runv)); memcpy(model->coeff_reorder, vp6_def_coeff_reorder, sizeof(model->coeff_reorder)); vp6_coeff_order_table_init(s); } static void vp6_parse_vector_models(VP56Context *s) { VP56RangeCoder *c = &s->c; VP56Model *model = s->modelp; int comp, node; for (comp=0; comp<2; comp++) { if (vp56_rac_get_prob(c, vp6_sig_dct_pct[comp][0])) model->vector_dct[comp] = vp56_rac_gets_nn(c, 7); if (vp56_rac_get_prob(c, vp6_sig_dct_pct[comp][1])) model->vector_sig[comp] = vp56_rac_gets_nn(c, 7); } for (comp=0; comp<2; comp++) for (node=0; node<7; node++) if (vp56_rac_get_prob(c, vp6_pdv_pct[comp][node])) model->vector_pdv[comp][node] = vp56_rac_gets_nn(c, 7); for (comp=0; comp<2; comp++) for (node=0; node<8; node++) if (vp56_rac_get_prob(c, vp6_fdv_pct[comp][node])) model->vector_fdv[comp][node] = vp56_rac_gets_nn(c, 7); } /* nodes must ascend by count, but with descending symbol order */ static int vp6_huff_cmp(const void *va, const void *vb) { const Node *a = va, *b = vb; return (a->count - b->count)*16 + (b->sym - a->sym); } static void vp6_build_huff_tree(VP56Context *s, uint8_t coeff_model[], const uint8_t *map, unsigned size, VLC *vlc) { Node nodes[2*size], *tmp = &nodes[size]; int a, b, i; /* first compute probabilities from model */ tmp[0].count = 256; for (i=0; i<size-1; i++) { a = tmp[i].count * coeff_model[i] >> 8; b = tmp[i].count * (255 - coeff_model[i]) >> 8; nodes[map[2*i ]].count = a + !a; nodes[map[2*i+1]].count = b + !b; } free_vlc(vlc); /* then build the huffman tree accodring to probabilities */ ff_huff_build_tree(s->avctx, vlc, size, nodes, vp6_huff_cmp, FF_HUFFMAN_FLAG_HNODE_FIRST); } static void vp6_parse_coeff_models(VP56Context *s) { VP56RangeCoder *c = &s->c; VP56Model *model = s->modelp; int def_prob[11]; int node, cg, ctx, pos; int ct; /* code type */ int pt; /* plane type (0 for Y, 1 for U or V) */ memset(def_prob, 0x80, sizeof(def_prob)); for (pt=0; pt<2; pt++) for (node=0; node<11; node++) if (vp56_rac_get_prob(c, vp6_dccv_pct[pt][node])) { def_prob[node] = vp56_rac_gets_nn(c, 7); model->coeff_dccv[pt][node] = def_prob[node]; } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) { model->coeff_dccv[pt][node] = def_prob[node]; } if (vp56_rac_get(c)) { for (pos=1; pos<64; pos++) if (vp56_rac_get_prob(c, vp6_coeff_reorder_pct[pos])) model->coeff_reorder[pos] = vp56_rac_gets(c, 4); vp6_coeff_order_table_init(s); } for (cg=0; cg<2; cg++) for (node=0; node<14; node++) if (vp56_rac_get_prob(c, vp6_runv_pct[cg][node])) model->coeff_runv[cg][node] = vp56_rac_gets_nn(c, 7); for (ct=0; ct<3; ct++) for (pt=0; pt<2; pt++) for (cg=0; cg<6; cg++) for (node=0; node<11; node++) if (vp56_rac_get_prob(c, vp6_ract_pct[ct][pt][cg][node])) { def_prob[node] = vp56_rac_gets_nn(c, 7); model->coeff_ract[pt][ct][cg][node] = def_prob[node]; } else if (s->framep[VP56_FRAME_CURRENT]->key_frame) { model->coeff_ract[pt][ct][cg][node] = def_prob[node]; } if (s->use_huffman) { for (pt=0; pt<2; pt++) { vp6_build_huff_tree(s, model->coeff_dccv[pt], vp6_huff_coeff_map, 12, &s->dccv_vlc[pt]); vp6_build_huff_tree(s, model->coeff_runv[pt], vp6_huff_run_map, 9, &s->runv_vlc[pt]); for (ct=0; ct<3; ct++) for (cg = 0; cg < 6; cg++) vp6_build_huff_tree(s, model->coeff_ract[pt][ct][cg], vp6_huff_coeff_map, 12, &s->ract_vlc[pt][ct][cg]); } memset(s->nb_null, 0, sizeof(s->nb_null)); } else { /* coeff_dcct is a linear combination of coeff_dccv */ for (pt=0; pt<2; pt++) for (ctx=0; ctx<3; ctx++) for (node=0; node<5; node++) model->coeff_dcct[pt][ctx][node] = av_clip(((model->coeff_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255); } } static void vp6_parse_vector_adjustment(VP56Context *s, VP56mv *vect) { VP56RangeCoder *c = &s->c; VP56Model *model = s->modelp; int comp; *vect = (VP56mv) {0,0}; if (s->vector_candidate_pos < 2) *vect = s->vector_candidate[0]; for (comp=0; comp<2; comp++) { int i, delta = 0; if (vp56_rac_get_prob(c, model->vector_dct[comp])) { static const uint8_t prob_order[] = {0, 1, 2, 7, 6, 5, 4}; for (i=0; i<sizeof(prob_order); i++) { int j = prob_order[i]; delta |= vp56_rac_get_prob(c, model->vector_fdv[comp][j])<<j; } if (delta & 0xF0) delta |= vp56_rac_get_prob(c, model->vector_fdv[comp][3])<<3; else delta |= 8; } else { delta = vp56_rac_get_tree(c, vp56_pva_tree, model->vector_pdv[comp]); } if (delta && vp56_rac_get_prob(c, model->vector_sig[comp])) delta = -delta; if (!comp) vect->x += delta; else vect->y += delta; } } /** * Read number of consecutive blocks with null DC or AC. * This value is < 74. */ static unsigned vp6_get_nb_null(VP56Context *s) { unsigned val = get_bits(&s->gb, 2); if (val == 2) val += get_bits(&s->gb, 2); else if (val == 3) { val = get_bits1(&s->gb) << 2; val = 6+val + get_bits(&s->gb, 2+val); } return val; } static void vp6_parse_coeff_huffman(VP56Context *s) { VP56Model *model = s->modelp; uint8_t *permute = s->scantable.permutated; VLC *vlc_coeff; int coeff, sign, coeff_idx; int b, cg, idx; int pt = 0; /* plane type (0 for Y, 1 for U or V) */ for (b=0; b<6; b++) { int ct = 0; /* code type */ if (b > 3) pt = 1; vlc_coeff = &s->dccv_vlc[pt]; for (coeff_idx=0; coeff_idx<64; ) { int run = 1; if (coeff_idx<2 && s->nb_null[coeff_idx][pt]) { s->nb_null[coeff_idx][pt]--; if (coeff_idx) break; } else { if (get_bits_count(&s->gb) >= s->gb.size_in_bits) return; coeff = get_vlc2(&s->gb, vlc_coeff->table, 9, 3); if (coeff == 0) { if (coeff_idx) { int pt = (coeff_idx >= 6); run += get_vlc2(&s->gb, s->runv_vlc[pt].table, 9, 3); if (run >= 9) run += get_bits(&s->gb, 6); } else s->nb_null[0][pt] = vp6_get_nb_null(s); ct = 0; } else if (coeff == 11) { /* end of block */ if (coeff_idx == 1) /* first AC coeff ? */ s->nb_null[1][pt] = vp6_get_nb_null(s); break; } else { int coeff2 = vp56_coeff_bias[coeff]; if (coeff > 4) coeff2 += get_bits(&s->gb, coeff <= 9 ? coeff - 4 : 11); ct = 1 + (coeff2 > 1); sign = get_bits1(&s->gb); coeff2 = (coeff2 ^ -sign) + sign; if (coeff_idx) coeff2 *= s->dequant_ac; idx = model->coeff_index_to_pos[coeff_idx]; s->block_coeff[b][permute[idx]] = coeff2; } } coeff_idx+=run; cg = FFMIN(vp6_coeff_groups[coeff_idx], 3); vlc_coeff = &s->ract_vlc[pt][ct][cg]; } } } static void vp6_parse_coeff(VP56Context *s) { VP56RangeCoder *c = s->ccp; VP56Model *model = s->modelp; uint8_t *permute = s->scantable.permutated; uint8_t *model1, *model2, *model3; int coeff, sign, coeff_idx; int b, i, cg, idx, ctx; int pt = 0; /* plane type (0 for Y, 1 for U or V) */ for (b=0; b<6; b++) { int ct = 1; /* code type */ int run = 1; if (b > 3) pt = 1; ctx = s->left_block[vp56_b6to4[b]].not_null_dc + s->above_blocks[s->above_block_idx[b]].not_null_dc; model1 = model->coeff_dccv[pt]; model2 = model->coeff_dcct[pt][ctx]; for (coeff_idx=0; coeff_idx<64; ) { if ((coeff_idx>1 && ct==0) || vp56_rac_get_prob(c, model2[0])) { /* parse a coeff */ if (vp56_rac_get_prob(c, model2[2])) { if (vp56_rac_get_prob(c, model2[3])) { idx = vp56_rac_get_tree(c, vp56_pc_tree, model1); coeff = vp56_coeff_bias[idx+5]; for (i=vp56_coeff_bit_length[idx]; i>=0; i--) coeff += vp56_rac_get_prob(c, vp56_coeff_parse_table[idx][i]) << i; } else { if (vp56_rac_get_prob(c, model2[4])) coeff = 3 + vp56_rac_get_prob(c, model1[5]); else coeff = 2; } ct = 2; } else { ct = 1; coeff = 1; } sign = vp56_rac_get(c); coeff = (coeff ^ -sign) + sign; if (coeff_idx) coeff *= s->dequant_ac; idx = model->coeff_index_to_pos[coeff_idx]; s->block_coeff[b][permute[idx]] = coeff; run = 1; } else { /* parse a run */ ct = 0; if (coeff_idx > 0) { if (!vp56_rac_get_prob(c, model2[1])) break; model3 = model->coeff_runv[coeff_idx >= 6]; run = vp56_rac_get_tree(c, vp6_pcr_tree, model3); if (!run) for (run=9, i=0; i<6; i++) run += vp56_rac_get_prob(c, model3[i+8]) << i; } } cg = vp6_coeff_groups[coeff_idx+=run]; model1 = model2 = model->coeff_ract[pt][ct][cg]; } s->left_block[vp56_b6to4[b]].not_null_dc = s->above_blocks[s->above_block_idx[b]].not_null_dc = !!s->block_coeff[b][0]; } } static int vp6_block_variance(uint8_t *src, int stride) { int sum = 0, square_sum = 0; int y, x; for (y=0; y<8; y+=2) { for (x=0; x<8; x+=2) { sum += src[x]; square_sum += src[x]*src[x]; } src += 2*stride; } return (16*square_sum - sum*sum) >> 8; } static void vp6_filter_hv4(uint8_t *dst, uint8_t *src, int stride, int delta, const int16_t *weights) { int x, y; for (y=0; y<8; y++) { for (x=0; x<8; x++) { dst[x] = av_clip_uint8(( src[x-delta ] * weights[0] + src[x ] * weights[1] + src[x+delta ] * weights[2] + src[x+2*delta] * weights[3] + 64) >> 7); } src += stride; dst += stride; } } static void vp6_filter_diag2(VP56Context *s, uint8_t *dst, uint8_t *src, int stride, int h_weight, int v_weight) { uint8_t *tmp = s->edge_emu_buffer+16; s->dsp.put_h264_chroma_pixels_tab[0](tmp, src, stride, 9, h_weight, 0); s->dsp.put_h264_chroma_pixels_tab[0](dst, tmp, stride, 8, 0, v_weight); } static void vp6_filter(VP56Context *s, uint8_t *dst, uint8_t *src, int offset1, int offset2, int stride, VP56mv mv, int mask, int select, int luma) { int filter4 = 0; int x8 = mv.x & mask; int y8 = mv.y & mask; if (luma) { x8 *= 2; y8 *= 2; filter4 = s->filter_mode; if (filter4 == 2) { if (s->max_vector_length && (FFABS(mv.x) > s->max_vector_length || FFABS(mv.y) > s->max_vector_length)) { filter4 = 0; } else if (s->sample_variance_threshold && (vp6_block_variance(src+offset1, stride) < s->sample_variance_threshold)) { filter4 = 0; } } } if ((y8 && (offset2-offset1)*s->flip<0) || (!y8 && offset1 > offset2)) { offset1 = offset2; } if (filter4) { if (!y8) { /* left or right combine */ vp6_filter_hv4(dst, src+offset1, stride, 1, vp6_block_copy_filter[select][x8]); } else if (!x8) { /* above or below combine */ vp6_filter_hv4(dst, src+offset1, stride, stride, vp6_block_copy_filter[select][y8]); } else { s->dsp.vp6_filter_diag4(dst, src+offset1+((mv.x^mv.y)>>31), stride, vp6_block_copy_filter[select][x8], vp6_block_copy_filter[select][y8]); } } else { if (!x8 || !y8) { s->dsp.put_h264_chroma_pixels_tab[0](dst, src+offset1, stride, 8, x8, y8); } else { vp6_filter_diag2(s, dst, src+offset1 + ((mv.x^mv.y)>>31), stride, x8, y8); } } } static av_cold int vp6_decode_init(AVCodecContext *avctx) { VP56Context *s = avctx->priv_data; vp56_init(avctx, avctx->codec->id == CODEC_ID_VP6, avctx->codec->id == CODEC_ID_VP6A); s->vp56_coord_div = vp6_coord_div; s->parse_vector_adjustment = vp6_parse_vector_adjustment; s->filter = vp6_filter; s->default_models_init = vp6_default_models_init; s->parse_vector_models = vp6_parse_vector_models; s->parse_coeff_models = vp6_parse_coeff_models; s->parse_header = vp6_parse_header; return 0; } static av_cold int vp6_decode_free(AVCodecContext *avctx) { VP56Context *s = avctx->priv_data; int pt, ct, cg; vp56_free(avctx); for (pt=0; pt<2; pt++) { free_vlc(&s->dccv_vlc[pt]); free_vlc(&s->runv_vlc[pt]); for (ct=0; ct<3; ct++) for (cg=0; cg<6; cg++) free_vlc(&s->ract_vlc[pt][ct][cg]); } return 0; } AVCodec vp6_decoder = { "vp6", AVMEDIA_TYPE_VIDEO, CODEC_ID_VP6, sizeof(VP56Context), vp6_decode_init, NULL, vp6_decode_free, vp56_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("On2 VP6"), }; /* flash version, not flipped upside-down */ AVCodec vp6f_decoder = { "vp6f", AVMEDIA_TYPE_VIDEO, CODEC_ID_VP6F, sizeof(VP56Context), vp6_decode_init, NULL, vp6_decode_free, vp56_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version)"), }; /* flash version, not flipped upside-down, with alpha channel */ AVCodec vp6a_decoder = { "vp6a", AVMEDIA_TYPE_VIDEO, CODEC_ID_VP6A, sizeof(VP56Context), vp6_decode_init, NULL, vp6_decode_free, vp56_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("On2 VP6 (Flash version, with alpha channel)"), };
123linslouis-android-video-cutter
jni/libavcodec/vp6.c
C
asf20
21,501
/* * MACE decoder * Copyright (c) 2002 Laszlo Torok <torokl@alpha.dfmk.hu> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MACE decoder. */ #include "avcodec.h" /* * Adapted to ffmpeg by Francois Revol <revol@free.fr> * (removed 68k REG stuff, changed types, added some statics and consts, * libavcodec api, context stuff, interlaced stereo out). */ static const int16_t MACEtab1[] = {-13, 8, 76, 222, 222, 76, 8, -13}; static const int16_t MACEtab3[] = {-18, 140, 140, -18}; static const int16_t MACEtab2[][4] = { { 37, 116, 206, 330}, { 39, 121, 216, 346}, { 41, 127, 225, 361}, { 42, 132, 235, 377}, { 44, 137, 245, 392}, { 46, 144, 256, 410}, { 48, 150, 267, 428}, { 51, 157, 280, 449}, { 53, 165, 293, 470}, { 55, 172, 306, 490}, { 58, 179, 319, 511}, { 60, 187, 333, 534}, { 63, 195, 348, 557}, { 66, 205, 364, 583}, { 69, 214, 380, 609}, { 72, 223, 396, 635}, { 75, 233, 414, 663}, { 79, 244, 433, 694}, { 82, 254, 453, 725}, { 86, 265, 472, 756}, { 90, 278, 495, 792}, { 94, 290, 516, 826}, { 98, 303, 538, 862}, { 102, 316, 562, 901}, { 107, 331, 588, 942}, { 112, 345, 614, 983}, { 117, 361, 641, 1027}, { 122, 377, 670, 1074}, { 127, 394, 701, 1123}, { 133, 411, 732, 1172}, { 139, 430, 764, 1224}, { 145, 449, 799, 1280}, { 152, 469, 835, 1337}, { 159, 490, 872, 1397}, { 166, 512, 911, 1459}, { 173, 535, 951, 1523}, { 181, 558, 993, 1590}, { 189, 584, 1038, 1663}, { 197, 610, 1085, 1738}, { 206, 637, 1133, 1815}, { 215, 665, 1183, 1895}, { 225, 695, 1237, 1980}, { 235, 726, 1291, 2068}, { 246, 759, 1349, 2161}, { 257, 792, 1409, 2257}, { 268, 828, 1472, 2357}, { 280, 865, 1538, 2463}, { 293, 903, 1606, 2572}, { 306, 944, 1678, 2688}, { 319, 986, 1753, 2807}, { 334, 1030, 1832, 2933}, { 349, 1076, 1914, 3065}, { 364, 1124, 1999, 3202}, { 380, 1174, 2088, 3344}, { 398, 1227, 2182, 3494}, { 415, 1281, 2278, 3649}, { 434, 1339, 2380, 3811}, { 453, 1398, 2486, 3982}, { 473, 1461, 2598, 4160}, { 495, 1526, 2714, 4346}, { 517, 1594, 2835, 4540}, { 540, 1665, 2961, 4741}, { 564, 1740, 3093, 4953}, { 589, 1818, 3232, 5175}, { 615, 1898, 3375, 5405}, { 643, 1984, 3527, 5647}, { 671, 2072, 3683, 5898}, { 701, 2164, 3848, 6161}, { 733, 2261, 4020, 6438}, { 766, 2362, 4199, 6724}, { 800, 2467, 4386, 7024}, { 836, 2578, 4583, 7339}, { 873, 2692, 4786, 7664}, { 912, 2813, 5001, 8008}, { 952, 2938, 5223, 8364}, { 995, 3070, 5457, 8739}, { 1039, 3207, 5701, 9129}, { 1086, 3350, 5956, 9537}, { 1134, 3499, 6220, 9960}, { 1185, 3655, 6497, 10404}, { 1238, 3818, 6788, 10869}, { 1293, 3989, 7091, 11355}, { 1351, 4166, 7407, 11861}, { 1411, 4352, 7738, 12390}, { 1474, 4547, 8084, 12946}, { 1540, 4750, 8444, 13522}, { 1609, 4962, 8821, 14126}, { 1680, 5183, 9215, 14756}, { 1756, 5415, 9626, 15415}, { 1834, 5657, 10057, 16104}, { 1916, 5909, 10505, 16822}, { 2001, 6173, 10975, 17574}, { 2091, 6448, 11463, 18356}, { 2184, 6736, 11974, 19175}, { 2282, 7037, 12510, 20032}, { 2383, 7351, 13068, 20926}, { 2490, 7679, 13652, 21861}, { 2601, 8021, 14260, 22834}, { 2717, 8380, 14897, 23854}, { 2838, 8753, 15561, 24918}, { 2965, 9144, 16256, 26031}, { 3097, 9553, 16982, 27193}, { 3236, 9979, 17740, 28407}, { 3380, 10424, 18532, 29675}, { 3531, 10890, 19359, 31000}, { 3688, 11375, 20222, 32382}, { 3853, 11883, 21125, 32767}, { 4025, 12414, 22069, 32767}, { 4205, 12967, 23053, 32767}, { 4392, 13546, 24082, 32767}, { 4589, 14151, 25157, 32767}, { 4793, 14783, 26280, 32767}, { 5007, 15442, 27452, 32767}, { 5231, 16132, 28678, 32767}, { 5464, 16851, 29957, 32767}, { 5708, 17603, 31294, 32767}, { 5963, 18389, 32691, 32767}, { 6229, 19210, 32767, 32767}, { 6507, 20067, 32767, 32767}, { 6797, 20963, 32767, 32767}, { 7101, 21899, 32767, 32767}, { 7418, 22876, 32767, 32767}, { 7749, 23897, 32767, 32767}, { 8095, 24964, 32767, 32767}, { 8456, 26078, 32767, 32767}, { 8833, 27242, 32767, 32767}, { 9228, 28457, 32767, 32767}, { 9639, 29727, 32767, 32767} }; static const int16_t MACEtab4[][2] = { { 64, 216}, { 67, 226}, { 70, 236}, { 74, 246}, { 77, 257}, { 80, 268}, { 84, 280}, { 88, 294}, { 92, 307}, { 96, 321}, { 100, 334}, { 104, 350}, { 109, 365}, { 114, 382}, { 119, 399}, { 124, 416}, { 130, 434}, { 136, 454}, { 142, 475}, { 148, 495}, { 155, 519}, { 162, 541}, { 169, 564}, { 176, 590}, { 185, 617}, { 193, 644}, { 201, 673}, { 210, 703}, { 220, 735}, { 230, 767}, { 240, 801}, { 251, 838}, { 262, 876}, { 274, 914}, { 286, 955}, { 299, 997}, { 312, 1041}, { 326, 1089}, { 341, 1138}, { 356, 1188}, { 372, 1241}, { 388, 1297}, { 406, 1354}, { 424, 1415}, { 443, 1478}, { 462, 1544}, { 483, 1613}, { 505, 1684}, { 527, 1760}, { 551, 1838}, { 576, 1921}, { 601, 2007}, { 628, 2097}, { 656, 2190}, { 686, 2288}, { 716, 2389}, { 748, 2496}, { 781, 2607}, { 816, 2724}, { 853, 2846}, { 891, 2973}, { 930, 3104}, { 972, 3243}, { 1016, 3389}, { 1061, 3539}, { 1108, 3698}, { 1158, 3862}, { 1209, 4035}, { 1264, 4216}, { 1320, 4403}, { 1379, 4599}, { 1441, 4806}, { 1505, 5019}, { 1572, 5244}, { 1642, 5477}, { 1715, 5722}, { 1792, 5978}, { 1872, 6245}, { 1955, 6522}, { 2043, 6813}, { 2134, 7118}, { 2229, 7436}, { 2329, 7767}, { 2432, 8114}, { 2541, 8477}, { 2655, 8854}, { 2773, 9250}, { 2897, 9663}, { 3026, 10094}, { 3162, 10546}, { 3303, 11016}, { 3450, 11508}, { 3604, 12020}, { 3765, 12556}, { 3933, 13118}, { 4108, 13703}, { 4292, 14315}, { 4483, 14953}, { 4683, 15621}, { 4892, 16318}, { 5111, 17046}, { 5339, 17807}, { 5577, 18602}, { 5826, 19433}, { 6086, 20300}, { 6358, 21205}, { 6642, 22152}, { 6938, 23141}, { 7248, 24173}, { 7571, 25252}, { 7909, 26380}, { 8262, 27557}, { 8631, 28786}, { 9016, 30072}, { 9419, 31413}, { 9839, 32767}, { 10278, 32767}, { 10737, 32767}, { 11216, 32767}, { 11717, 32767}, { 12240, 32767}, { 12786, 32767}, { 13356, 32767}, { 13953, 32767}, { 14576, 32767}, { 15226, 32767}, { 15906, 32767}, { 16615, 32767} }; static const struct { const int16_t *tab1; const int16_t *tab2; int stride; } tabs[] = { {MACEtab1, &MACEtab2[0][0], 4}, {MACEtab3, &MACEtab4[0][0], 2}, {MACEtab1, &MACEtab2[0][0], 4} }; #define QT_8S_2_16S(x) (((x) & 0xFF00) | (((x) >> 8) & 0xFF)) typedef struct ChannelData { int16_t index, factor, prev2, previous, level; } ChannelData; typedef struct MACEContext { ChannelData chd[2]; } MACEContext; /** * MACE version of av_clip_int16(). We have to do this to keep binary * identical output to the binary decoder. */ static inline int16_t mace_broken_clip_int16(int n) { if (n > 32767) return 32767; else if (n < -32768) return -32767; else return n; } static int16_t read_table(ChannelData *chd, uint8_t val, int tab_idx) { int16_t current; if (val < tabs[tab_idx].stride) current = tabs[tab_idx].tab2[((chd->index & 0x7f0) >> 4) * tabs[tab_idx].stride + val]; else current = - 1 - tabs[tab_idx].tab2[((chd->index & 0x7f0) >> 4)*tabs[tab_idx].stride + 2*tabs[tab_idx].stride-val-1]; if (( chd->index += tabs[tab_idx].tab1[val]-(chd->index >> 5) ) < 0) chd->index = 0; return current; } static void chomp3(ChannelData *chd, int16_t *output, uint8_t val, int tab_idx, uint32_t numChannels) { int16_t current = read_table(chd, val, tab_idx); current = mace_broken_clip_int16(current + chd->level); chd->level = current - (current >> 3); *output = QT_8S_2_16S(current); } static void chomp6(ChannelData *chd, int16_t *output, uint8_t val, int tab_idx, uint32_t numChannels) { int16_t current = read_table(chd, val, tab_idx); if ((chd->previous ^ current) >= 0) { chd->factor = FFMIN(chd->factor + 506, 32767); } else { if (chd->factor - 314 < -32768) chd->factor = -32767; else chd->factor -= 314; } current = mace_broken_clip_int16(current + chd->level); chd->level = (current*chd->factor) >> 15; current >>= 1; output[0] = QT_8S_2_16S(chd->previous + chd->prev2 - ((chd->prev2-current) >> 2)); output[numChannels] = QT_8S_2_16S(chd->previous + current + ((chd->prev2-current) >> 2)); chd->prev2 = chd->previous; chd->previous = current; } static av_cold int mace_decode_init(AVCodecContext * avctx) { if (avctx->channels > 2) return -1; avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } static int mace_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int16_t *samples = data; MACEContext *ctx = avctx->priv_data; int i, j, k, l; int is_mace3 = (avctx->codec_id == CODEC_ID_MACE3); if (*data_size < (3 * buf_size << (2-is_mace3))) { av_log(avctx, AV_LOG_ERROR, "Output buffer too small!\n"); return -1; } for(i = 0; i < avctx->channels; i++) { int16_t *output = samples + i; for (j=0; j < buf_size / (avctx->channels << is_mace3); j++) for (k=0; k < (1 << is_mace3); k++) { uint8_t pkt = buf[(i << is_mace3) + (j*avctx->channels << is_mace3) + k]; uint8_t val[2][3] = {{pkt >> 5, (pkt >> 3) & 3, pkt & 7 }, {pkt & 7 , (pkt >> 3) & 3, pkt >> 5}}; for (l=0; l < 3; l++) { if (is_mace3) chomp3(&ctx->chd[i], output, val[1][l], l, avctx->channels); else chomp6(&ctx->chd[i], output, val[0][l], l, avctx->channels); output += avctx->channels << (1-is_mace3); } } } *data_size = 3 * buf_size << (2-is_mace3); return buf_size; } AVCodec mace3_decoder = { "mace3", AVMEDIA_TYPE_AUDIO, CODEC_ID_MACE3, sizeof(MACEContext), mace_decode_init, NULL, NULL, mace_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"), }; AVCodec mace6_decoder = { "mace6", AVMEDIA_TYPE_AUDIO, CODEC_ID_MACE6, sizeof(MACEContext), mace_decode_init, NULL, NULL, mace_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"), };
123linslouis-android-video-cutter
jni/libavcodec/mace.c
C
asf20
12,972
/* * DNxHD/VC-3 parser * Copyright (c) 2008 Baptiste Coudurier <baptiste.coudurier@free.fr> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * DNxHD/VC-3 parser */ #include "parser.h" #define DNXHD_HEADER_PREFIX 0x0000028001 static int dnxhd_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) { uint64_t state = pc->state64; int pic_found = pc->frame_start_found; int i = 0; if (!pic_found) { for (i = 0; i < buf_size; i++) { state = (state<<8) | buf[i]; if ((state & 0xffffffffffLL) == DNXHD_HEADER_PREFIX) { i++; pic_found = 1; break; } } } if (pic_found) { if (!buf_size) /* EOF considered as end of frame */ return 0; for (; i < buf_size; i++) { state = (state<<8) | buf[i]; if ((state & 0xffffffffffLL) == DNXHD_HEADER_PREFIX) { pc->frame_start_found = 0; pc->state64 = -1; return i-4; } } } pc->frame_start_found = pic_found; pc->state64 = state; return END_NOT_FOUND; } static int dnxhd_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { ParseContext *pc = s->priv_data; int next; if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { next = buf_size; } else { next = dnxhd_find_frame_end(pc, buf, buf_size); if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { *poutbuf = NULL; *poutbuf_size = 0; return buf_size; } } *poutbuf = buf; *poutbuf_size = buf_size; return next; } AVCodecParser dnxhd_parser = { { CODEC_ID_DNXHD }, sizeof(ParseContext), NULL, dnxhd_parse, ff_parse_close, };
123linslouis-android-video-cutter
jni/libavcodec/dnxhd_parser.c
C
asf20
2,708
/* * Generic DCT based hybrid video encoder * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * mpegvideo header. */ #ifndef AVCODEC_MPEGVIDEO_H #define AVCODEC_MPEGVIDEO_H #include "dsputil.h" #include "get_bits.h" #include "put_bits.h" #include "ratecontrol.h" #include "parser.h" #include "mpeg12data.h" #include "rl.h" #define FRAME_SKIPPED 100 ///< return value for header parsers if frame is not coded enum OutputFormat { FMT_MPEG1, FMT_H261, FMT_H263, FMT_MJPEG, FMT_H264, }; #define MPEG_BUF_SIZE (16 * 1024) #define QMAT_SHIFT_MMX 16 #define QMAT_SHIFT 22 #define MAX_FCODE 7 #define MAX_MV 2048 #define MAX_THREADS 16 #define MAX_PICTURE_COUNT 32 #define ME_MAP_SIZE 64 #define ME_MAP_SHIFT 3 #define ME_MAP_MV_BITS 11 #define MAX_MB_BYTES (30*16*16*3/8 + 120) #define INPLACE_OFFSET 16 /* Start codes. */ #define SEQ_END_CODE 0x000001b7 #define SEQ_START_CODE 0x000001b3 #define GOP_START_CODE 0x000001b8 #define PICTURE_START_CODE 0x00000100 #define SLICE_MIN_START_CODE 0x00000101 #define SLICE_MAX_START_CODE 0x000001af #define EXT_START_CODE 0x000001b5 #define USER_START_CODE 0x000001b2 /** * Picture. */ typedef struct Picture{ FF_COMMON_FRAME /** * halfpel luma planes. */ uint8_t *interpolated[3]; int16_t (*motion_val_base[2])[2]; uint32_t *mb_type_base; #define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if there is just one type #define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4) #define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16) #define IS_PCM(a) ((a)&MB_TYPE_INTRA_PCM) #define IS_INTRA(a) ((a)&7) #define IS_INTER(a) ((a)&(MB_TYPE_16x16|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8)) #define IS_SKIP(a) ((a)&MB_TYPE_SKIP) #define IS_INTRA_PCM(a) ((a)&MB_TYPE_INTRA_PCM) #define IS_INTERLACED(a) ((a)&MB_TYPE_INTERLACED) #define IS_DIRECT(a) ((a)&MB_TYPE_DIRECT2) #define IS_GMC(a) ((a)&MB_TYPE_GMC) #define IS_16X16(a) ((a)&MB_TYPE_16x16) #define IS_16X8(a) ((a)&MB_TYPE_16x8) #define IS_8X16(a) ((a)&MB_TYPE_8x16) #define IS_8X8(a) ((a)&MB_TYPE_8x8) #define IS_SUB_8X8(a) ((a)&MB_TYPE_16x16) //note reused #define IS_SUB_8X4(a) ((a)&MB_TYPE_16x8) //note reused #define IS_SUB_4X8(a) ((a)&MB_TYPE_8x16) //note reused #define IS_SUB_4X4(a) ((a)&MB_TYPE_8x8) //note reused #define IS_ACPRED(a) ((a)&MB_TYPE_ACPRED) #define IS_QUANT(a) ((a)&MB_TYPE_QUANT) #define IS_DIR(a, part, list) ((a) & (MB_TYPE_P0L0<<((part)+2*(list)))) #define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0|MB_TYPE_P1L0)<<(2*(list)))) ///< does this mb use listX, note does not work if subMBs #define HAS_CBP(a) ((a)&MB_TYPE_CBP) int field_poc[2]; ///< h264 top/bottom POC int poc; ///< h264 frame POC int frame_num; ///< h264 frame_num (raw frame_num from slice header) int mmco_reset; ///< h264 MMCO_RESET set this 1. Reordering code must not mix pictures before and after MMCO_RESET. int pic_id; /**< h264 pic_num (short -> no wrap version of pic_num, pic_num & max_pic_num; long -> long_pic_num) */ int long_ref; ///< 1->long term reference 0->short term reference int ref_poc[2][2][16]; ///< h264 POCs of the frames used as reference (FIXME need per slice) int ref_count[2][2]; ///< number of entries in ref_poc (FIXME need per slice) int mbaff; ///< h264 1 -> MBAFF frame 0-> not MBAFF int mb_var_sum; ///< sum of MB variance for current frame int mc_mb_var_sum; ///< motion compensated MB variance for current frame uint16_t *mb_var; ///< Table for MB variances uint16_t *mc_mb_var; ///< Table for motion compensated MB variances uint8_t *mb_mean; ///< Table for MB luminance int32_t *mb_cmp_score; ///< Table for MB cmp scores, for mb decision FIXME remove int b_frame_score; /* */ } Picture; struct MpegEncContext; /** * Motion estimation context. */ typedef struct MotionEstContext{ AVCodecContext *avctx; int skip; ///< set if ME is skipped for the current MB int co_located_mv[4][2]; ///< mv from last P-frame for direct mode ME int direct_basis_mv[4][2]; uint8_t *scratchpad; ///< data area for the ME algo, so that the ME does not need to malloc/free uint8_t *best_mb; uint8_t *temp_mb[2]; uint8_t *temp; int best_bits; uint32_t *map; ///< map to avoid duplicate evaluations uint32_t *score_map; ///< map to store the scores int map_generation; int pre_penalty_factor; int penalty_factor; /*!< an estimate of the bits required to code a given mv value, e.g. (1,0) takes more bits than (0,0). We have to estimate whether any reduction in residual is worth the extra bits. */ int sub_penalty_factor; int mb_penalty_factor; int flags; int sub_flags; int mb_flags; int pre_pass; ///< = 1 for the pre pass int dia_size; int xmin; int xmax; int ymin; int ymax; int pred_x; int pred_y; uint8_t *src[4][4]; uint8_t *ref[4][4]; int stride; int uvstride; /* temp variables for picture complexity calculation */ int mc_mb_var_sum_temp; int mb_var_sum_temp; int scene_change_score; /* cmp, chroma_cmp;*/ op_pixels_func (*hpel_put)[4]; op_pixels_func (*hpel_avg)[4]; qpel_mc_func (*qpel_put)[16]; qpel_mc_func (*qpel_avg)[16]; uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV uint8_t *current_mv_penalty; int (*sub_motion_search)(struct MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, int src_index, int ref_index, int size, int h); }MotionEstContext; /** * MpegEncContext. */ typedef struct MpegEncContext { struct AVCodecContext *avctx; /* the following parameters must be initialized before encoding */ int width, height;///< picture size. must be a multiple of 16 int gop_size; int intra_only; ///< if true, only intra pictures are generated int bit_rate; ///< wanted bit rate enum OutputFormat out_format; ///< output format int h263_pred; ///< use mpeg4/h263 ac/dc predictions int pb_frame; ///< PB frame mode (0 = none, 1 = base, 2 = improved) /* the following codec id fields are deprecated in favor of codec_id */ int h263_plus; ///< h263 plus headers int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead) int h263_flv; ///< use flv h263 header enum CodecID codec_id; /* see CODEC_ID_xxx */ int fixed_qscale; ///< fixed qscale if non zero int encoding; ///< true if we are encoding (vs decoding) int flags; ///< AVCodecContext.flags (HQ, MV4, ...) int flags2; ///< AVCodecContext.flags2 int max_b_frames; ///< max number of b-frames for encoding int luma_elim_threshold; int chroma_elim_threshold; int strict_std_compliance; ///< strictly follow the std (MPEG4, ...) int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag int stream_codec_tag; ///< internal stream_codec_tag upper case converted from avctx stream_codec_tag /* the following fields are managed internally by the encoder */ /** bit output */ PutBitContext pb; /* sequence parameters */ int context_initialized; int input_picture_number; ///< used to set pic->display_picture_number, should not be used for/by anything else int coded_picture_number; ///< used to set pic->coded_picture_number, should not be used for/by anything else int picture_number; //FIXME remove, unclear definition int picture_in_gop_number; ///< 0-> first pic in gop, ... int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input int64_t user_specified_pts;///< last non zero pts from AVFrame which was passed into avcodec_encode_video() int mb_width, mb_height; ///< number of MBs horizontally & vertically int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressing int b4_stride; ///< 4*mb_width+1 used for some 4x4 block arrays to allow simple addressing int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication) int mb_num; ///< number of MBs of a picture int linesize; ///< line size, in bytes, may be different from width int uvlinesize; ///< line size, for chroma in bytes, may be different from width Picture *picture; ///< main picture buffer Picture **input_picture; ///< next pictures on display order for encoding Picture **reordered_input_picture; ///< pointer to the next pictures in codedorder for encoding int start_mb_y; ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) int end_mb_y; ///< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) struct MpegEncContext *thread_context[MAX_THREADS]; /** * copy of the previous picture structure. * note, linesize & data, might not match the previous picture (for field pictures) */ Picture last_picture; /** * copy of the next picture structure. * note, linesize & data, might not match the next picture (for field pictures) */ Picture next_picture; /** * copy of the source picture structure for encoding. * note, linesize & data, might not match the source picture (for field pictures) */ Picture new_picture; /** * copy of the current picture structure. * note, linesize & data, might not match the current picture (for field pictures) */ Picture current_picture; ///< buffer to store the decompressed current picture Picture *last_picture_ptr; ///< pointer to the previous picture. Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred) Picture *current_picture_ptr; ///< pointer to the current picture uint8_t *visualization_buffer[3]; //< temporary buffer vor MV visualization int last_dc[3]; ///< last DC values for MPEG1 int16_t *dc_val_base; int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous int16_t dc_cache[4*5]; int y_dc_scale, c_dc_scale; const uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table const uint8_t *c_dc_scale_table; ///< qscale -> c_dc_scale table const uint8_t *chroma_qscale_table; ///< qscale -> chroma_qscale (h263) uint8_t *coded_block_base; uint8_t *coded_block; ///< used for coded block pattern prediction (msmpeg4v3, wmv1) int16_t (*ac_val_base)[16]; int16_t (*ac_val[3])[16]; ///< used for for mpeg4 AC prediction, all 3 arrays must be continuous int ac_pred; uint8_t *prev_pict_types; ///< previous picture types in bitstream order, used for mb skip #define PREV_PICT_TYPES_BUFFER_SIZE 256 int mb_skipped; ///< MUST BE SET only during DECODING uint8_t *mbskip_table; /**< used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encoding & decoding (contains skip table of next P Frame) */ uint8_t *mbintra_table; ///< used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding uint8_t *cbp_table; ///< used to store cbp, ac_pred for partitioned decoding uint8_t *pred_dir_table; ///< used to store pred_dir for partitioned decoding uint8_t *allocated_edge_emu_buffer; uint8_t *edge_emu_buffer; ///< points into the middle of allocated_edge_emu_buffer uint8_t *rd_scratchpad; ///< scratchpad for rate distortion mb decision uint8_t *obmc_scratchpad; uint8_t *b_scratchpad; ///< scratchpad used for writing into write only buffers int qscale; ///< QP int chroma_qscale; ///< chroma QP unsigned int lambda; ///< lagrange multipler used in rate distortion unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT int *lambda_table; int adaptive_quant; ///< use adaptive quantization int dquant; ///< qscale difference to prev qscale int closed_gop; ///< MPEG1/2 GOP is closed int pict_type; ///< FF_I_TYPE, FF_P_TYPE, FF_B_TYPE, ... int last_pict_type; //FIXME removes int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol int dropable; int frame_rate_index; int last_lambda_for[5]; ///< last lambda for a specific pict type int skipdct; ///< skip dct and code zero residual /* motion compensation */ int unrestricted_mv; ///< mv can point outside of the coded picture int h263_long_vectors; ///< use horrible h263v1 long vector mode int decode; ///< if 0 then decoding will be skipped (for encoding b frames for example) DSPContext dsp; ///< pointers for accelerated dsp functions int f_code; ///< forward MV resolution int b_code; ///< backward MV resolution for B Frames (mpeg4) int16_t (*p_mv_table_base)[2]; int16_t (*b_forw_mv_table_base)[2]; int16_t (*b_back_mv_table_base)[2]; int16_t (*b_bidir_forw_mv_table_base)[2]; int16_t (*b_bidir_back_mv_table_base)[2]; int16_t (*b_direct_mv_table_base)[2]; int16_t (*p_field_mv_table_base[2][2])[2]; int16_t (*b_field_mv_table_base[2][2][2])[2]; int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) p-frame encoding int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode b-frame encoding int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode b-frame encoding int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode b-frame encoding int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced p-frame encoding int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced b-frame encoding uint8_t (*p_field_select_table[2]); uint8_t (*b_field_select_table[2][2]); int me_method; ///< ME algorithm int mv_dir; #define MV_DIR_FORWARD 1 #define MV_DIR_BACKWARD 2 #define MV_DIRECT 4 ///< bidirectional mode where the difference equals the MV of the last P/S/I-Frame (mpeg4) int mv_type; #define MV_TYPE_16X16 0 ///< 1 vector for the whole mb #define MV_TYPE_8X8 1 ///< 4 vectors (h263, mpeg4 4MV) #define MV_TYPE_16X8 2 ///< 2 vectors, one per 16x8 block #define MV_TYPE_FIELD 3 ///< 2 vectors, one per field #define MV_TYPE_DMV 4 ///< 2 vectors, special mpeg2 Dual Prime Vectors /**motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend on type third " : 0 = x, 1 = y */ int mv[2][4][2]; int field_select[2][2]; int last_mv[2][2][2]; ///< last MV, used for MV prediction in MPEG1 & B-frame MPEG4 uint8_t *fcode_tab; ///< smallest fcode needed for each MV int16_t direct_scale_mv[2][64]; ///< precomputed to avoid divisions in ff_mpeg4_set_direct_mv MotionEstContext me; int no_rounding; /**< apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0 */ int hurry_up; /**< when set to 1 during decoding, b frames will be skipped when set to 2 idct/dequant will be skipped too */ /* macroblock layer */ int mb_x, mb_y; int mb_skip_run; int mb_intra; uint16_t *mb_type; ///< Table for candidate MB types for encoding #define CANDIDATE_MB_TYPE_INTRA 0x01 #define CANDIDATE_MB_TYPE_INTER 0x02 #define CANDIDATE_MB_TYPE_INTER4V 0x04 #define CANDIDATE_MB_TYPE_SKIPPED 0x08 //#define MB_TYPE_GMC 0x10 #define CANDIDATE_MB_TYPE_DIRECT 0x10 #define CANDIDATE_MB_TYPE_FORWARD 0x20 #define CANDIDATE_MB_TYPE_BACKWARD 0x40 #define CANDIDATE_MB_TYPE_BIDIR 0x80 #define CANDIDATE_MB_TYPE_INTER_I 0x100 #define CANDIDATE_MB_TYPE_FORWARD_I 0x200 #define CANDIDATE_MB_TYPE_BACKWARD_I 0x400 #define CANDIDATE_MB_TYPE_BIDIR_I 0x800 #define CANDIDATE_MB_TYPE_DIRECT0 0x1000 int block_index[6]; ///< index to current MB in block based arrays with edges int block_wrap[6]; uint8_t *dest[3]; int *mb_index2xy; ///< mb_index -> mb_x + mb_y*mb_stride /** matrix transmitted in the bitstream */ uint16_t intra_matrix[64]; uint16_t chroma_intra_matrix[64]; uint16_t inter_matrix[64]; uint16_t chroma_inter_matrix[64]; #define QUANT_BIAS_SHIFT 8 int intra_quant_bias; ///< bias for the quantizer int inter_quant_bias; ///< bias for the quantizer int min_qcoeff; ///< minimum encodable coefficient int max_qcoeff; ///< maximum encodable coefficient int ac_esc_length; ///< num of bits needed to encode the longest esc uint8_t *intra_ac_vlc_length; uint8_t *intra_ac_vlc_last_length; uint8_t *inter_ac_vlc_length; uint8_t *inter_ac_vlc_last_length; uint8_t *luma_dc_vlc_length; uint8_t *chroma_dc_vlc_length; #define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level)) int coded_score[8]; /** precomputed matrix (combine qscale and DCT renorm) */ int (*q_intra_matrix)[64]; int (*q_inter_matrix)[64]; /** identical to the above but for MMX & these are not permutated, second 64 entries are bias*/ uint16_t (*q_intra_matrix16)[2][64]; uint16_t (*q_inter_matrix16)[2][64]; int block_last_index[12]; ///< last non zero coefficient in block /* scantables */ ScanTable intra_scantable; ScanTable intra_h_scantable; ScanTable intra_v_scantable; ScanTable inter_scantable; ///< if inter == intra then intra should be used to reduce tha cache usage /* noise reduction */ int (*dct_error_sum)[64]; int dct_count[2]; uint16_t (*dct_offset)[64]; void *opaque; ///< private data for the user /* bit rate control */ int64_t wanted_bits; int64_t total_bits; int frame_bits; ///< bits used for the current frame int next_lambda; ///< next lambda used for retrying to encode a frame RateControlContext rc_context; ///< contains stuff only accessed in ratecontrol.c /* statistics, used for 2-pass encoding */ int mv_bits; int header_bits; int i_tex_bits; int p_tex_bits; int i_count; int f_count; int b_count; int skip_count; int misc_bits; ///< cbp, mb_type int last_bits; ///< temp var used for calculating the above vars /* error concealment / resync */ int error_count; uint8_t *error_status_table; ///< table of the error status of each MB #define VP_START 1 ///< current MB is the first after a resync marker #define AC_ERROR 2 #define DC_ERROR 4 #define MV_ERROR 8 #define AC_END 16 #define DC_END 32 #define MV_END 64 //FIXME some prefix? int resync_mb_x; ///< x position of last resync marker int resync_mb_y; ///< y position of last resync marker GetBitContext last_resync_gb; ///< used to search for the next resync marker int mb_num_left; ///< number of MBs left in this video packet (for partitioned Slices only) int next_p_frame_damaged; ///< set if the next p frame is damaged, to avoid showing trashed b frames int error_recognition; ParseContext parse_context; /* H.263 specific */ int gob_index; int obmc; ///< overlapped block motion compensation int showed_packed_warning; ///< flag for having shown the warning about divxs invalid b frames /* H.263+ specific */ int umvplus; ///< == H263+ && unrestricted_mv int h263_aic; ///< Advanded INTRA Coding (AIC) int h263_aic_dir; ///< AIC direction: 0 = left, 1 = top int h263_slice_structured; int alt_inter_vlc; ///< alternative inter vlc int modified_quant; int loop_filter; int custom_pcf; /* mpeg4 specific */ int time_increment_bits; ///< number of bits to represent the fractional part of time int last_time_base; int time_base; ///< time in seconds of last I,P,S Frame int64_t time; ///< time of current frame int64_t last_non_b_time; uint16_t pp_time; ///< time distance between the last 2 p,s,i frames uint16_t pb_time; ///< time distance between the last b and p,s,i frame uint16_t pp_field_time; uint16_t pb_field_time; ///< like above, just for interlaced int shape; int vol_sprite_usage; int sprite_width; int sprite_height; int sprite_left; int sprite_top; int sprite_brightness_change; int num_sprite_warping_points; int real_sprite_warping_points; uint16_t sprite_traj[4][2]; ///< sprite trajectory points int sprite_offset[2][2]; ///< sprite offset[isChroma][isMVY] int sprite_delta[2][2]; ///< sprite_delta [isY][isMVY] int sprite_shift[2]; ///< sprite shift [isChroma] int mcsel; int quant_precision; int quarter_sample; ///< 1->qpel, 0->half pel ME/MC int scalability; int hierachy_type; int enhancement_type; int new_pred; int reduced_res_vop; int aspect_ratio_info; //FIXME remove int sprite_warping_accuracy; int low_latency_sprite; int data_partitioning; ///< data partitioning flag from header int partitioned_frame; ///< is current frame partitioned int rvlc; ///< reversible vlc int resync_marker; ///< could this stream contain resync markers int low_delay; ///< no reordering needed / has no b-frames int vo_type; int vol_control_parameters; ///< does the stream contain the low_delay flag, used to workaround buggy encoders int intra_dc_threshold; ///< QP above whch the ac VLC should be used for intra dc int use_intra_dc_vlc; PutBitContext tex_pb; ///< used for data partitioned VOPs PutBitContext pb2; ///< used for data partitioned VOPs int mpeg_quant; int t_frame; ///< time distance of first I -> B, used for interlaced b frames int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG4 int cplx_estimation_trash_i; int cplx_estimation_trash_p; int cplx_estimation_trash_b; /* divx specific, used to workaround (many) bugs in divx5 */ int divx_version; int divx_build; int divx_packed; uint8_t *bitstream_buffer; //Divx 5.01 puts several frames in a single one, this is used to reorder them int bitstream_buffer_size; unsigned int allocated_bitstream_buffer_size; int xvid_build; /* lavc specific stuff, used to workaround bugs in libavcodec */ int lavc_build; /* RV10 specific */ int rv10_version; ///< RV10 version: 0 or 3 int rv10_first_dc_coded[3]; int orig_width, orig_height; /* MJPEG specific */ struct MJpegContext *mjpeg_ctx; int mjpeg_vsample[3]; ///< vertical sampling factors, default = {2, 1, 1} int mjpeg_hsample[3]; ///< horizontal sampling factors, default = {2, 1, 1} /* MSMPEG4 specific */ int mv_table_index; int rl_table_index; int rl_chroma_table_index; int dc_table_index; int use_skip_mb_code; int slice_height; ///< in macroblocks int first_slice_line; ///< used in mpeg4 too to handle resync markers int flipflop_rounding; int msmpeg4_version; ///< 0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8 int per_mb_rl_table; int esc3_level_length; int esc3_run_length; /** [mb_intra][isChroma][level][run][last] */ int (*ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]; int inter_intra_pred; int mspel; /* decompression specific */ GetBitContext gb; /* Mpeg1 specific */ int gop_picture_number; ///< index of the first picture of a GOP based on fake_pic_num & mpeg1 specific int last_mv_dir; ///< last mv_dir, used for b frame encoding int broken_link; ///< no_output_of_prior_pics_flag uint8_t *vbv_delay_ptr; ///< pointer to vbv_delay in the bitstream /* MPEG-2-specific - I wished not to have to support this mess. */ int progressive_sequence; int mpeg_f_code[2][2]; int picture_structure; /* picture type */ #define PICT_TOP_FIELD 1 #define PICT_BOTTOM_FIELD 2 #define PICT_FRAME 3 int intra_dc_precision; int frame_pred_frame_dct; int top_field_first; int concealment_motion_vectors; int q_scale_type; int intra_vlc_format; int alternate_scan; int repeat_first_field; int chroma_420_type; int chroma_format; #define CHROMA_420 1 #define CHROMA_422 2 #define CHROMA_444 3 int chroma_x_shift;//depend on pix_format, that depend on chroma_format int chroma_y_shift; int progressive_frame; int full_pel[2]; int interlaced_dct; int first_slice; int first_field; ///< is 1 for the first field of a field picture 0 otherwise /* RTP specific */ int rtp_mode; uint8_t *ptr_lastgob; int swap_uv; //vcr2 codec is an MPEG-2 variant with U and V swapped DCTELEM (*pblocks[12])[64]; DCTELEM (*block)[64]; ///< points to one of the following blocks DCTELEM (*blocks)[8][64]; // for HQ mode we need to keep the best block int (*decode_mb)(struct MpegEncContext *s, DCTELEM block[6][64]); // used by some codecs to avoid a switch() #define SLICE_OK 0 #define SLICE_ERROR -1 #define SLICE_END -2 ///<end marker found #define SLICE_NOEND -3 ///<no end marker or error found but mb count exceeded void (*dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_h263_intra)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_h263_inter)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_h261_intra)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_h261_inter)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_intra)(struct MpegEncContext *s, // unquantizer to use (mpeg4 can use both) DCTELEM *block/*align 16*/, int n, int qscale); void (*dct_unquantize_inter)(struct MpegEncContext *s, // unquantizer to use (mpeg4 can use both) DCTELEM *block/*align 16*/, int n, int qscale); int (*dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow); int (*fast_dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow); void (*denoise_dct)(struct MpegEncContext *s, DCTELEM *block); } MpegEncContext; void MPV_decode_defaults(MpegEncContext *s); int MPV_common_init(MpegEncContext *s); void MPV_common_end(MpegEncContext *s); void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]); int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx); void MPV_frame_end(MpegEncContext *s); int MPV_encode_init(AVCodecContext *avctx); int MPV_encode_end(AVCodecContext *avctx); int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data); void MPV_common_init_mmx(MpegEncContext *s); void MPV_common_init_axp(MpegEncContext *s); void MPV_common_init_mlib(MpegEncContext *s); void MPV_common_init_mmi(MpegEncContext *s); void MPV_common_init_arm(MpegEncContext *s); void MPV_common_init_altivec(MpegEncContext *s); void MPV_common_init_bfin(MpegEncContext *s); void ff_clean_intra_table_entries(MpegEncContext *s); void ff_draw_horiz_band(MpegEncContext *s, int y, int h); void ff_mpeg_flush(AVCodecContext *avctx); void ff_print_debug_info(MpegEncContext *s, AVFrame *pict); void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix); int ff_find_unused_picture(MpegEncContext *s, int shared); void ff_denoise_dct(MpegEncContext *s, DCTELEM *block); void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src); const uint8_t *ff_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state); void ff_set_qscale(MpegEncContext * s, int qscale); void ff_er_frame_start(MpegEncContext *s); void ff_er_frame_end(MpegEncContext *s); void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status); int ff_dct_common_init(MpegEncContext *s); void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra); void ff_init_block_index(MpegEncContext *s); void ff_copy_picture(Picture *dst, Picture *src); /** * allocates a Picture * The pixels are allocated/set by calling get_buffer() if shared=0 */ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared); extern const enum PixelFormat ff_pixfmt_list_420[]; extern const enum PixelFormat ff_hwaccel_pixfmt_list_420[]; static inline void ff_update_block_index(MpegEncContext *s){ const int block_size= 8>>s->avctx->lowres; s->block_index[0]+=2; s->block_index[1]+=2; s->block_index[2]+=2; s->block_index[3]+=2; s->block_index[4]++; s->block_index[5]++; s->dest[0]+= 2*block_size; s->dest[1]+= block_size; s->dest[2]+= block_size; } static inline int get_bits_diff(MpegEncContext *s){ const int bits= put_bits_count(&s->pb); const int last= s->last_bits; s->last_bits = bits; return bits - last; } static inline int ff_h263_round_chroma(int x){ static const uint8_t h263_chroma_roundtab[16] = { // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, }; return h263_chroma_roundtab[x & 0xf] + (x >> 3); } /* motion_est.c */ void ff_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y); void ff_estimate_b_frame_motion(MpegEncContext * s, int mb_x, int mb_y); int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type); void ff_fix_long_p_mvs(MpegEncContext * s); void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select, int16_t (*mv_table)[2], int f_code, int type, int truncate); int ff_init_me(MpegEncContext *s); int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y); int ff_epzs_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, int16_t (*last_mv)[2], int ref_mv_scale, int size, int h); int ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate); /* mpeg12.c */ extern const uint8_t ff_mpeg1_dc_scale_table[128]; extern const uint8_t * const ff_mpeg2_dc_scale_table[4]; void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number); void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y); void ff_mpeg1_encode_init(MpegEncContext *s); void ff_mpeg1_encode_slice_header(MpegEncContext *s); void ff_mpeg1_clean_buffers(MpegEncContext *s); int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s); extern const uint8_t ff_aic_dc_scale_table[32]; extern const uint8_t ff_h263_chroma_qscale_table[32]; extern const uint8_t ff_h263_loop_filter_strength[32]; /* h261.c */ void ff_h261_loop_filter(MpegEncContext *s); void ff_h261_reorder_mb_index(MpegEncContext* s); void ff_h261_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y); void ff_h261_encode_picture_header(MpegEncContext * s, int picture_number); void ff_h261_encode_init(MpegEncContext *s); int ff_h261_get_picture_format(int width, int height); /* rv10.c */ void rv10_encode_picture_header(MpegEncContext *s, int picture_number); int rv_decode_dc(MpegEncContext *s, int n); void rv20_encode_picture_header(MpegEncContext *s, int picture_number); /* msmpeg4.c */ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number); void msmpeg4_encode_ext_header(MpegEncContext * s); void msmpeg4_encode_mb(MpegEncContext * s, DCTELEM block[6][64], int motion_x, int motion_y); int msmpeg4_decode_picture_header(MpegEncContext * s); int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size); int ff_msmpeg4_decode_init(AVCodecContext *avctx); void ff_msmpeg4_encode_init(MpegEncContext *s); int ff_wmv2_decode_picture_header(MpegEncContext * s); int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s); void ff_wmv2_add_mb(MpegEncContext *s, DCTELEM block[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr); void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func (*pix_op)[4], int motion_x, int motion_y, int h); int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number); void ff_wmv2_encode_mb(MpegEncContext * s, DCTELEM block[6][64], int motion_x, int motion_y); #endif /* AVCODEC_MPEGVIDEO_H */
123linslouis-android-video-cutter
jni/libavcodec/mpegvideo.h
C
asf20
36,639
/* * Constants for DV codec * Copyright (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Constants for DV codec. */ #ifndef AVCODEC_DVDATA_H #define AVCODEC_DVDATA_H #include "libavutil/rational.h" #include "avcodec.h" typedef struct DVwork_chunk { uint16_t buf_offset; uint16_t mb_coordinates[5]; } DVwork_chunk; /* * DVprofile is used to express the differences between various * DV flavors. For now it's primarily used for differentiating * 525/60 and 625/50, but the plans are to use it for various * DV specs as well (e.g. SMPTE314M vs. IEC 61834). */ typedef struct DVprofile { int dsf; /* value of the dsf in the DV header */ int video_stype; /* stype for VAUX source pack */ int frame_size; /* total size of one frame in bytes */ int difseg_size; /* number of DIF segments per DIF channel */ int n_difchan; /* number of DIF channels per frame */ AVRational time_base; /* 1/framerate */ int ltc_divisor; /* FPS from the LTS standpoint */ int height; /* picture height in pixels */ int width; /* picture width in pixels */ AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */ DVwork_chunk *work_chunks; /* each thread gets its own chunk of frame to work on */ uint32_t *idct_factor; /* set of iDCT factor tables */ enum PixelFormat pix_fmt; /* picture pixel format */ int bpm; /* blocks per macroblock */ const uint8_t *block_sizes; /* AC block sizes, in bits */ int audio_stride; /* size of audio_shuffle table */ int audio_min_samples[3]; /* min amount of audio samples */ /* for 48kHz, 44.1kHz and 32kHz */ int audio_samples_dist[5]; /* how many samples are supposed to be */ /* in each frame in a 5 frames window */ const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */ } DVprofile; /* unquant tables (not used directly) */ static const uint8_t dv_quant_shifts[22][4] = { { 3,3,4,4 }, { 3,3,4,4 }, { 2,3,3,4 }, { 2,3,3,4 }, { 2,2,3,3 }, { 2,2,3,3 }, { 1,2,2,3 }, { 1,2,2,3 }, { 1,1,2,2 }, { 1,1,2,2 }, { 0,1,1,2 }, { 0,1,1,2 }, { 0,0,1,1 }, { 0,0,1,1 }, { 0,0,0,1 }, { 0,0,0,0 }, { 0,0,0,0 }, { 0,0,0,0 }, { 0,0,0,0 }, { 0,0,0,0 }, { 0,0,0,0 }, { 0,0,0,0 }, }; static const uint8_t dv_quant_offset[4] = { 6, 3, 0, 1 }; static const uint8_t dv_quant_areas[4] = { 6, 21, 43, 64 }; /* quantization quanta by QNO for DV100 */ static const uint8_t dv100_qstep[16] = { 1, /* QNO = 0 and 1 both have no quantization */ 1, 2, 3, 4, 5, 6, 7, 8, 16, 18, 20, 22, 24, 28, 52 }; /* DV25/50 DCT coefficient weights and inverse weights */ /* created by dvtables.py */ static const int dv_weight_bits = 18; static const int dv_weight_88[64] = { 131072, 257107, 257107, 242189, 252167, 242189, 235923, 237536, 237536, 235923, 229376, 231390, 223754, 231390, 229376, 222935, 224969, 217965, 217965, 224969, 222935, 200636, 218652, 211916, 212325, 211916, 218652, 200636, 188995, 196781, 205965, 206433, 206433, 205965, 196781, 188995, 185364, 185364, 200636, 200704, 200636, 185364, 185364, 174609, 180568, 195068, 195068, 180568, 174609, 170091, 175557, 189591, 175557, 170091, 165371, 170627, 170627, 165371, 160727, 153560, 160727, 144651, 144651, 136258, }; static const int dv_weight_248[64] = { 131072, 242189, 257107, 237536, 229376, 200636, 242189, 223754, 224969, 196781, 262144, 242189, 229376, 200636, 257107, 237536, 211916, 185364, 235923, 217965, 229376, 211916, 206433, 180568, 242189, 223754, 224969, 196781, 211916, 185364, 235923, 217965, 200704, 175557, 222935, 205965, 200636, 185364, 195068, 170627, 229376, 211916, 206433, 180568, 200704, 175557, 222935, 205965, 175557, 153560, 188995, 174609, 165371, 144651, 200636, 185364, 195068, 170627, 175557, 153560, 188995, 174609, 165371, 144651, }; static const int dv_iweight_bits = 14; static const int dv_iweight_88[64] = { 32768, 16710, 16710, 17735, 17015, 17735, 18197, 18079, 18079, 18197, 18725, 18559, 19196, 18559, 18725, 19284, 19108, 19692, 19692, 19108, 19284, 21400, 19645, 20262, 20214, 20262, 19645, 21400, 22733, 21845, 20867, 20815, 20815, 20867, 21845, 22733, 23173, 23173, 21400, 21400, 21400, 23173, 23173, 24600, 23764, 22017, 22017, 23764, 24600, 25267, 24457, 22672, 24457, 25267, 25971, 25191, 25191, 25971, 26715, 27962, 26715, 29642, 29642, 31536, }; static const int dv_iweight_248[64] = { 32768, 17735, 16710, 18079, 18725, 21400, 17735, 19196, 19108, 21845, 16384, 17735, 18725, 21400, 16710, 18079, 20262, 23173, 18197, 19692, 18725, 20262, 20815, 23764, 17735, 19196, 19108, 21845, 20262, 23173, 18197, 19692, 21400, 24457, 19284, 20867, 21400, 23173, 22017, 25191, 18725, 20262, 20815, 23764, 21400, 24457, 19284, 20867, 24457, 27962, 22733, 24600, 25971, 29642, 21400, 23173, 22017, 25191, 24457, 27962, 22733, 24600, 25971, 29642, }; /** * The "inverse" DV100 weights are actually just the spec weights (zig-zagged). */ static const int dv_iweight_1080_y[64] = { 128, 16, 16, 17, 17, 17, 18, 18, 18, 18, 18, 18, 19, 18, 18, 19, 19, 19, 19, 19, 19, 42, 38, 40, 40, 40, 38, 42, 44, 43, 41, 41, 41, 41, 43, 44, 45, 45, 42, 42, 42, 45, 45, 48, 46, 43, 43, 46, 48, 49, 48, 44, 48, 49, 101, 98, 98, 101, 104, 109, 104, 116, 116, 123, }; static const int dv_iweight_1080_c[64] = { 128, 16, 16, 17, 17, 17, 25, 25, 25, 25, 26, 25, 26, 25, 26, 26, 26, 27, 27, 26, 26, 42, 38, 40, 40, 40, 38, 42, 44, 43, 41, 41, 41, 41, 43, 44, 91, 91, 84, 84, 84, 91, 91, 96, 93, 86, 86, 93, 96, 197, 191, 177, 191, 197, 203, 197, 197, 203, 209, 219, 209, 232, 232, 246, }; static const int dv_iweight_720_y[64] = { 128, 16, 16, 17, 17, 17, 18, 18, 18, 18, 18, 18, 19, 18, 18, 19, 19, 19, 19, 19, 19, 42, 38, 40, 40, 40, 38, 42, 44, 43, 41, 41, 41, 41, 43, 44, 68, 68, 63, 63, 63, 68, 68, 96, 92, 86, 86, 92, 96, 98, 96, 88, 96, 98, 202, 196, 196, 202, 208, 218, 208, 232, 232, 246, }; static const int dv_iweight_720_c[64] = { 128, 24, 24, 26, 26, 26, 36, 36, 36, 36, 36, 36, 38, 36, 36, 38, 38, 38, 38, 38, 38, 84, 76, 80, 80, 80, 76, 84, 88, 86, 82, 82, 82, 82, 86, 88, 182, 182, 168, 168, 168, 182, 182, 192, 186, 192, 172, 186, 192, 394, 382, 354, 382, 394, 406, 394, 394, 406, 418, 438, 418, 464, 464, 492, }; static const uint8_t dv_audio_shuffle525[10][9] = { { 0, 30, 60, 20, 50, 80, 10, 40, 70 }, /* 1st channel */ { 6, 36, 66, 26, 56, 86, 16, 46, 76 }, { 12, 42, 72, 2, 32, 62, 22, 52, 82 }, { 18, 48, 78, 8, 38, 68, 28, 58, 88 }, { 24, 54, 84, 14, 44, 74, 4, 34, 64 }, { 1, 31, 61, 21, 51, 81, 11, 41, 71 }, /* 2nd channel */ { 7, 37, 67, 27, 57, 87, 17, 47, 77 }, { 13, 43, 73, 3, 33, 63, 23, 53, 83 }, { 19, 49, 79, 9, 39, 69, 29, 59, 89 }, { 25, 55, 85, 15, 45, 75, 5, 35, 65 }, }; static const uint8_t dv_audio_shuffle625[12][9] = { { 0, 36, 72, 26, 62, 98, 16, 52, 88}, /* 1st channel */ { 6, 42, 78, 32, 68, 104, 22, 58, 94}, { 12, 48, 84, 2, 38, 74, 28, 64, 100}, { 18, 54, 90, 8, 44, 80, 34, 70, 106}, { 24, 60, 96, 14, 50, 86, 4, 40, 76}, { 30, 66, 102, 20, 56, 92, 10, 46, 82}, { 1, 37, 73, 27, 63, 99, 17, 53, 89}, /* 2nd channel */ { 7, 43, 79, 33, 69, 105, 23, 59, 95}, { 13, 49, 85, 3, 39, 75, 29, 65, 101}, { 19, 55, 91, 9, 45, 81, 35, 71, 107}, { 25, 61, 97, 15, 51, 87, 5, 41, 77}, { 31, 67, 103, 21, 57, 93, 11, 47, 83}, }; static const av_unused int dv_audio_frequency[3] = { 48000, 44100, 32000, }; /* macroblock bit budgets */ static const uint8_t block_sizes_dv2550[8] = { 112, 112, 112, 112, 80, 80, 0, 0, }; static const uint8_t block_sizes_dv100[8] = { 80, 80, 80, 80, 80, 80, 64, 64, }; enum dv_section_type { dv_sect_header = 0x1f, dv_sect_subcode = 0x3f, dv_sect_vaux = 0x56, dv_sect_audio = 0x76, dv_sect_video = 0x96, }; enum dv_pack_type { dv_header525 = 0x3f, /* see dv_write_pack for important details on */ dv_header625 = 0xbf, /* these two packs */ dv_timecode = 0x13, dv_audio_source = 0x50, dv_audio_control = 0x51, dv_audio_recdate = 0x52, dv_audio_rectime = 0x53, dv_video_source = 0x60, dv_video_control = 0x61, dv_video_recdate = 0x62, dv_video_rectime = 0x63, dv_unknown_pack = 0xff, }; #define DV_PROFILE_IS_HD(p) ((p)->video_stype & 0x10) #define DV_PROFILE_IS_1080i50(p) (((p)->video_stype == 0x14) && ((p)->dsf == 1)) #define DV_PROFILE_IS_720p50(p) (((p)->video_stype == 0x18) && ((p)->dsf == 1)) /* minimum number of bytes to read from a DV stream in order to determine the profile */ #define DV_PROFILE_BYTES (6*80) /* 6 DIF blocks */ /** * largest possible DV frame, in bytes (1080i50) */ #define DV_MAX_FRAME_SIZE 576000 /** * maximum number of blocks per macroblock in any DV format */ #define DV_MAX_BPM 8 const DVprofile* ff_dv_frame_profile(const DVprofile *sys, const uint8_t* frame, unsigned buf_size); const DVprofile* ff_dv_codec_profile(AVCodecContext* codec); static inline int dv_write_dif_id(enum dv_section_type t, uint8_t chan_num, uint8_t seq_num, uint8_t dif_num, uint8_t* buf) { buf[0] = (uint8_t)t; /* Section type */ buf[1] = (seq_num << 4) | /* DIF seq number 0-9 for 525/60; 0-11 for 625/50 */ (chan_num << 3) | /* FSC: for 50Mb/s 0 - first channel; 1 - second */ 7; /* reserved -- always 1 */ buf[2] = dif_num; /* DIF block number Video: 0-134, Audio: 0-8 */ return 3; } static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t* buf) { if (syb_num == 0 || syb_num == 6) { buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */ (0 << 4) | /* AP3 (Subcode application ID) */ 0x0f; /* reserved -- always 1 */ } else if (syb_num == 11) { buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */ 0x7f; /* reserved -- always 1 */ } else { buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */ (0 << 4) | /* APT (Track application ID) */ 0x0f; /* reserved -- always 1 */ } buf[1] = 0xf0 | /* reserved -- always 1 */ (syb_num & 0x0f); /* SSYB number 0 - 11 */ buf[2] = 0xff; /* reserved -- always 1 */ return 3; } #endif /* AVCODEC_DVDATA_H */
123linslouis-android-video-cutter
jni/libavcodec/dvdata.h
C
asf20
12,119
/* * (I)DCT Transforms * Copyright (c) 2009 Peter Ross <pross@xvid.org> * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com> * Copyright (c) 2010 Vitor Sessak * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * (Inverse) Discrete Cosine Transforms. These are also known as the * type II and type III DCTs respectively. */ #include <math.h> #include "libavutil/mathematics.h" #include "fft.h" /* sin((M_PI * x / (2*n)) */ #define SIN(s,n,x) (s->costab[(n) - (x)]) /* cos((M_PI * x / (2*n)) */ #define COS(s,n,x) (s->costab[x]) static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data) { int n = 1 << ctx->nbits; int i; data[0] = 0; for(i = 1; i < n/2; i++) { float tmp1 = data[i ]; float tmp2 = data[n - i]; float s = SIN(ctx, n, 2*i); s *= tmp1 + tmp2; tmp1 = (tmp1 - tmp2) * 0.5f; data[i ] = s + tmp1; data[n - i] = s - tmp1; } data[n/2] *= 2; ff_rdft_calc(&ctx->rdft, data); data[0] *= 0.5f; for(i = 1; i < n-2; i += 2) { data[i + 1] += data[i - 1]; data[i ] = -data[i + 2]; } data[n-1] = 0; } static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data) { int n = 1 << ctx->nbits; int i; float next = -0.5f * (data[0] - data[n]); for(i = 0; i < n/2; i++) { float tmp1 = data[i ]; float tmp2 = data[n - i]; float s = SIN(ctx, n, 2*i); float c = COS(ctx, n, 2*i); c *= tmp1 - tmp2; s *= tmp1 - tmp2; next += c; tmp1 = (tmp1 + tmp2) * 0.5f; data[i ] = tmp1 - s; data[n - i] = tmp1 + s; } ff_rdft_calc(&ctx->rdft, data); data[n] = data[1]; data[1] = next; for(i = 3; i <= n; i += 2) data[i] = data[i - 2] - data[i]; } static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data) { int n = 1 << ctx->nbits; int i; float next = data[n - 1]; float inv_n = 1.0f / n; for (i = n - 2; i >= 2; i -= 2) { float val1 = data[i ]; float val2 = data[i - 1] - data[i + 1]; float c = COS(ctx, n, i); float s = SIN(ctx, n, i); data[i ] = c * val1 + s * val2; data[i + 1] = s * val1 - c * val2; } data[1] = 2 * next; ff_rdft_calc(&ctx->rdft, data); for (i = 0; i < n / 2; i++) { float tmp1 = data[i ] * inv_n; float tmp2 = data[n - i - 1] * inv_n; float csc = ctx->csc2[i] * (tmp1 - tmp2); tmp1 += tmp2; data[i ] = tmp1 + csc; data[n - i - 1] = tmp1 - csc; } } static void ff_dct_calc_II_c(DCTContext *ctx, FFTSample *data) { int n = 1 << ctx->nbits; int i; float next; for (i=0; i < n/2; i++) { float tmp1 = data[i ]; float tmp2 = data[n - i - 1]; float s = SIN(ctx, n, 2*i + 1); s *= tmp1 - tmp2; tmp1 = (tmp1 + tmp2) * 0.5f; data[i ] = tmp1 + s; data[n-i-1] = tmp1 - s; } ff_rdft_calc(&ctx->rdft, data); next = data[1] * 0.5; data[1] *= -1; for (i = n - 2; i >= 0; i -= 2) { float inr = data[i ]; float ini = data[i + 1]; float c = COS(ctx, n, i); float s = SIN(ctx, n, i); data[i ] = c * inr + s * ini; data[i+1] = next; next += s * inr - c * ini; } } void ff_dct_calc(DCTContext *s, FFTSample *data) { s->dct_calc(s, data); } av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse) { int n = 1 << nbits; int i; s->nbits = nbits; s->inverse = inverse; ff_init_ff_cos_tabs(nbits+2); s->costab = ff_cos_tabs[nbits+2]; s->csc2 = av_malloc(n/2 * sizeof(FFTSample)); if (ff_rdft_init(&s->rdft, nbits, inverse == DCT_III) < 0) { av_free(s->csc2); return -1; } for (i = 0; i < n/2; i++) s->csc2[i] = 0.5 / sin((M_PI / (2*n) * (2*i + 1))); switch(inverse) { case DCT_I : s->dct_calc = ff_dct_calc_I_c; break; case DCT_II : s->dct_calc = ff_dct_calc_II_c ; break; case DCT_III: s->dct_calc = ff_dct_calc_III_c; break; case DST_I : s->dct_calc = ff_dst_calc_I_c; break; } return 0; } av_cold void ff_dct_end(DCTContext *s) { ff_rdft_end(&s->rdft); av_free(s->csc2); }
123linslouis-android-video-cutter
jni/libavcodec/dct.c
C
asf20
5,054
/* * Simple free lossless/lossy audio codec * Copyright (c) 2004 Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "get_bits.h" #include "golomb.h" /** * @file * Simple free lossless/lossy audio codec * Based on Paul Francis Harrison's Bonk (http://www.logarithmic.net/pfh/bonk) * Written and designed by Alex Beregszaszi * * TODO: * - CABAC put/get_symbol * - independent quantizer for channels * - >2 channels support * - more decorrelation types * - more tap_quant tests * - selectable intlist writers/readers (bonk-style, golomb, cabac) */ #define MAX_CHANNELS 2 #define MID_SIDE 0 #define LEFT_SIDE 1 #define RIGHT_SIDE 2 typedef struct SonicContext { int lossless, decorrelation; int num_taps, downsampling; double quantization; int channels, samplerate, block_align, frame_size; int *tap_quant; int *int_samples; int *coded_samples[MAX_CHANNELS]; // for encoding int *tail; int tail_size; int *window; int window_size; // for decoding int *predictor_k; int *predictor_state[MAX_CHANNELS]; } SonicContext; #define LATTICE_SHIFT 10 #define SAMPLE_SHIFT 4 #define LATTICE_FACTOR (1 << LATTICE_SHIFT) #define SAMPLE_FACTOR (1 << SAMPLE_SHIFT) #define BASE_QUANT 0.6 #define RATE_VARIATION 3.0 static inline int divide(int a, int b) { if (a < 0) return -( (-a + b/2)/b ); else return (a + b/2)/b; } static inline int shift(int a,int b) { return (a+(1<<(b-1))) >> b; } static inline int shift_down(int a,int b) { return (a>>b)+((a<0)?1:0); } #if 1 static inline int intlist_write(PutBitContext *pb, int *buf, int entries, int base_2_part) { int i; for (i = 0; i < entries; i++) set_se_golomb(pb, buf[i]); return 1; } static inline int intlist_read(GetBitContext *gb, int *buf, int entries, int base_2_part) { int i; for (i = 0; i < entries; i++) buf[i] = get_se_golomb(gb); return 1; } #else #define ADAPT_LEVEL 8 static int bits_to_store(uint64_t x) { int res = 0; while(x) { res++; x >>= 1; } return res; } static void write_uint_max(PutBitContext *pb, unsigned int value, unsigned int max) { int i, bits; if (!max) return; bits = bits_to_store(max); for (i = 0; i < bits-1; i++) put_bits(pb, 1, value & (1 << i)); if ( (value | (1 << (bits-1))) <= max) put_bits(pb, 1, value & (1 << (bits-1))); } static unsigned int read_uint_max(GetBitContext *gb, int max) { int i, bits, value = 0; if (!max) return 0; bits = bits_to_store(max); for (i = 0; i < bits-1; i++) if (get_bits1(gb)) value += 1 << i; if ( (value | (1<<(bits-1))) <= max) if (get_bits1(gb)) value += 1 << (bits-1); return value; } static int intlist_write(PutBitContext *pb, int *buf, int entries, int base_2_part) { int i, j, x = 0, low_bits = 0, max = 0; int step = 256, pos = 0, dominant = 0, any = 0; int *copy, *bits; copy = av_mallocz(4* entries); if (!copy) return -1; if (base_2_part) { int energy = 0; for (i = 0; i < entries; i++) energy += abs(buf[i]); low_bits = bits_to_store(energy / (entries * 2)); if (low_bits > 15) low_bits = 15; put_bits(pb, 4, low_bits); } for (i = 0; i < entries; i++) { put_bits(pb, low_bits, abs(buf[i])); copy[i] = abs(buf[i]) >> low_bits; if (copy[i] > max) max = abs(copy[i]); } bits = av_mallocz(4* entries*max); if (!bits) { // av_free(copy); return -1; } for (i = 0; i <= max; i++) { for (j = 0; j < entries; j++) if (copy[j] >= i) bits[x++] = copy[j] > i; } // store bitstream while (pos < x) { int steplet = step >> 8; if (pos + steplet > x) steplet = x - pos; for (i = 0; i < steplet; i++) if (bits[i+pos] != dominant) any = 1; put_bits(pb, 1, any); if (!any) { pos += steplet; step += step / ADAPT_LEVEL; } else { int interloper = 0; while (((pos + interloper) < x) && (bits[pos + interloper] == dominant)) interloper++; // note change write_uint_max(pb, interloper, (step >> 8) - 1); pos += interloper + 1; step -= step / ADAPT_LEVEL; } if (step < 256) { step = 65536 / step; dominant = !dominant; } } // store signs for (i = 0; i < entries; i++) if (buf[i]) put_bits(pb, 1, buf[i] < 0); // av_free(bits); // av_free(copy); return 0; } static int intlist_read(GetBitContext *gb, int *buf, int entries, int base_2_part) { int i, low_bits = 0, x = 0; int n_zeros = 0, step = 256, dominant = 0; int pos = 0, level = 0; int *bits = av_mallocz(4* entries); if (!bits) return -1; if (base_2_part) { low_bits = get_bits(gb, 4); if (low_bits) for (i = 0; i < entries; i++) buf[i] = get_bits(gb, low_bits); } // av_log(NULL, AV_LOG_INFO, "entries: %d, low bits: %d\n", entries, low_bits); while (n_zeros < entries) { int steplet = step >> 8; if (!get_bits1(gb)) { for (i = 0; i < steplet; i++) bits[x++] = dominant; if (!dominant) n_zeros += steplet; step += step / ADAPT_LEVEL; } else { int actual_run = read_uint_max(gb, steplet-1); // av_log(NULL, AV_LOG_INFO, "actual run: %d\n", actual_run); for (i = 0; i < actual_run; i++) bits[x++] = dominant; bits[x++] = !dominant; if (!dominant) n_zeros += actual_run; else n_zeros++; step -= step / ADAPT_LEVEL; } if (step < 256) { step = 65536 / step; dominant = !dominant; } } // reconstruct unsigned values n_zeros = 0; for (i = 0; n_zeros < entries; i++) { while(1) { if (pos >= entries) { pos = 0; level += 1 << low_bits; } if (buf[pos] >= level) break; pos++; } if (bits[i]) buf[pos] += 1 << low_bits; else n_zeros++; pos++; } // av_free(bits); // read signs for (i = 0; i < entries; i++) if (buf[i] && get_bits1(gb)) buf[i] = -buf[i]; // av_log(NULL, AV_LOG_INFO, "zeros: %d pos: %d\n", n_zeros, pos); return 0; } #endif static void predictor_init_state(int *k, int *state, int order) { int i; for (i = order-2; i >= 0; i--) { int j, p, x = state[i]; for (j = 0, p = i+1; p < order; j++,p++) { int tmp = x + shift_down(k[j] * state[p], LATTICE_SHIFT); state[p] += shift_down(k[j]*x, LATTICE_SHIFT); x = tmp; } } } static int predictor_calc_error(int *k, int *state, int order, int error) { int i, x = error - shift_down(k[order-1] * state[order-1], LATTICE_SHIFT); #if 1 int *k_ptr = &(k[order-2]), *state_ptr = &(state[order-2]); for (i = order-2; i >= 0; i--, k_ptr--, state_ptr--) { int k_value = *k_ptr, state_value = *state_ptr; x -= shift_down(k_value * state_value, LATTICE_SHIFT); state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT); } #else for (i = order-2; i >= 0; i--) { x -= shift_down(k[i] * state[i], LATTICE_SHIFT); state[i+1] = state[i] + shift_down(k[i] * x, LATTICE_SHIFT); } #endif // don't drift too far, to avoid overflows if (x > (SAMPLE_FACTOR<<16)) x = (SAMPLE_FACTOR<<16); if (x < -(SAMPLE_FACTOR<<16)) x = -(SAMPLE_FACTOR<<16); state[0] = x; return x; } #if CONFIG_SONIC_ENCODER || CONFIG_SONIC_LS_ENCODER // Heavily modified Levinson-Durbin algorithm which // copes better with quantization, and calculates the // actual whitened result as it goes. static void modified_levinson_durbin(int *window, int window_entries, int *out, int out_entries, int channels, int *tap_quant) { int i; int *state = av_mallocz(4* window_entries); memcpy(state, window, 4* window_entries); for (i = 0; i < out_entries; i++) { int step = (i+1)*channels, k, j; double xx = 0.0, xy = 0.0; #if 1 int *x_ptr = &(window[step]), *state_ptr = &(state[0]); j = window_entries - step; for (;j>=0;j--,x_ptr++,state_ptr++) { double x_value = *x_ptr, state_value = *state_ptr; xx += state_value*state_value; xy += x_value*state_value; } #else for (j = 0; j <= (window_entries - step); j++); { double stepval = window[step+j], stateval = window[j]; // xx += (double)window[j]*(double)window[j]; // xy += (double)window[step+j]*(double)window[j]; xx += stateval*stateval; xy += stepval*stateval; } #endif if (xx == 0.0) k = 0; else k = (int)(floor(-xy/xx * (double)LATTICE_FACTOR / (double)(tap_quant[i]) + 0.5)); if (k > (LATTICE_FACTOR/tap_quant[i])) k = LATTICE_FACTOR/tap_quant[i]; if (-k > (LATTICE_FACTOR/tap_quant[i])) k = -(LATTICE_FACTOR/tap_quant[i]); out[i] = k; k *= tap_quant[i]; #if 1 x_ptr = &(window[step]); state_ptr = &(state[0]); j = window_entries - step; for (;j>=0;j--,x_ptr++,state_ptr++) { int x_value = *x_ptr, state_value = *state_ptr; *x_ptr = x_value + shift_down(k*state_value,LATTICE_SHIFT); *state_ptr = state_value + shift_down(k*x_value, LATTICE_SHIFT); } #else for (j=0; j <= (window_entries - step); j++) { int stepval = window[step+j], stateval=state[j]; window[step+j] += shift_down(k * stateval, LATTICE_SHIFT); state[j] += shift_down(k * stepval, LATTICE_SHIFT); } #endif } av_free(state); } static inline int code_samplerate(int samplerate) { switch (samplerate) { case 44100: return 0; case 22050: return 1; case 11025: return 2; case 96000: return 3; case 48000: return 4; case 32000: return 5; case 24000: return 6; case 16000: return 7; case 8000: return 8; } return -1; } static av_cold int sonic_encode_init(AVCodecContext *avctx) { SonicContext *s = avctx->priv_data; PutBitContext pb; int i, version = 0; if (avctx->channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n"); return -1; /* only stereo or mono for now */ } if (avctx->channels == 2) s->decorrelation = MID_SIDE; if (avctx->codec->id == CODEC_ID_SONIC_LS) { s->lossless = 1; s->num_taps = 32; s->downsampling = 1; s->quantization = 0.0; } else { s->num_taps = 128; s->downsampling = 2; s->quantization = 1.0; } // max tap 2048 if ((s->num_taps < 32) || (s->num_taps > 1024) || ((s->num_taps>>5)<<5 != s->num_taps)) { av_log(avctx, AV_LOG_ERROR, "Invalid number of taps\n"); return -1; } // generate taps s->tap_quant = av_mallocz(4* s->num_taps); for (i = 0; i < s->num_taps; i++) s->tap_quant[i] = (int)(sqrt(i+1)); s->channels = avctx->channels; s->samplerate = avctx->sample_rate; s->block_align = (int)(2048.0*s->samplerate/44100)/s->downsampling; s->frame_size = s->channels*s->block_align*s->downsampling; s->tail = av_mallocz(4* s->num_taps*s->channels); if (!s->tail) return -1; s->tail_size = s->num_taps*s->channels; s->predictor_k = av_mallocz(4 * s->num_taps); if (!s->predictor_k) return -1; for (i = 0; i < s->channels; i++) { s->coded_samples[i] = av_mallocz(4* s->block_align); if (!s->coded_samples[i]) return -1; } s->int_samples = av_mallocz(4* s->frame_size); s->window_size = ((2*s->tail_size)+s->frame_size); s->window = av_mallocz(4* s->window_size); if (!s->window) return -1; avctx->extradata = av_mallocz(16); if (!avctx->extradata) return -1; init_put_bits(&pb, avctx->extradata, 16*8); put_bits(&pb, 2, version); // version if (version == 1) { put_bits(&pb, 2, s->channels); put_bits(&pb, 4, code_samplerate(s->samplerate)); } put_bits(&pb, 1, s->lossless); if (!s->lossless) put_bits(&pb, 3, SAMPLE_SHIFT); // XXX FIXME: sample precision put_bits(&pb, 2, s->decorrelation); put_bits(&pb, 2, s->downsampling); put_bits(&pb, 5, (s->num_taps >> 5)-1); // 32..1024 put_bits(&pb, 1, 0); // XXX FIXME: no custom tap quant table flush_put_bits(&pb); avctx->extradata_size = put_bits_count(&pb)/8; av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n", version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling); avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) return AVERROR(ENOMEM); avctx->coded_frame->key_frame = 1; avctx->frame_size = s->block_align*s->downsampling; return 0; } static av_cold int sonic_encode_close(AVCodecContext *avctx) { SonicContext *s = avctx->priv_data; int i; av_freep(&avctx->coded_frame); for (i = 0; i < s->channels; i++) av_free(s->coded_samples[i]); av_free(s->predictor_k); av_free(s->tail); av_free(s->tap_quant); av_free(s->window); av_free(s->int_samples); return 0; } static int sonic_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data) { SonicContext *s = avctx->priv_data; PutBitContext pb; int i, j, ch, quant = 0, x = 0; short *samples = data; init_put_bits(&pb, buf, buf_size*8); // short -> internal for (i = 0; i < s->frame_size; i++) s->int_samples[i] = samples[i]; if (!s->lossless) for (i = 0; i < s->frame_size; i++) s->int_samples[i] = s->int_samples[i] << SAMPLE_SHIFT; switch(s->decorrelation) { case MID_SIDE: for (i = 0; i < s->frame_size; i += s->channels) { s->int_samples[i] += s->int_samples[i+1]; s->int_samples[i+1] -= shift(s->int_samples[i], 1); } break; case LEFT_SIDE: for (i = 0; i < s->frame_size; i += s->channels) s->int_samples[i+1] -= s->int_samples[i]; break; case RIGHT_SIDE: for (i = 0; i < s->frame_size; i += s->channels) s->int_samples[i] -= s->int_samples[i+1]; break; } memset(s->window, 0, 4* s->window_size); for (i = 0; i < s->tail_size; i++) s->window[x++] = s->tail[i]; for (i = 0; i < s->frame_size; i++) s->window[x++] = s->int_samples[i]; for (i = 0; i < s->tail_size; i++) s->window[x++] = 0; for (i = 0; i < s->tail_size; i++) s->tail[i] = s->int_samples[s->frame_size - s->tail_size + i]; // generate taps modified_levinson_durbin(s->window, s->window_size, s->predictor_k, s->num_taps, s->channels, s->tap_quant); if (intlist_write(&pb, s->predictor_k, s->num_taps, 0) < 0) return -1; for (ch = 0; ch < s->channels; ch++) { x = s->tail_size+ch; for (i = 0; i < s->block_align; i++) { int sum = 0; for (j = 0; j < s->downsampling; j++, x += s->channels) sum += s->window[x]; s->coded_samples[ch][i] = sum; } } // simple rate control code if (!s->lossless) { double energy1 = 0.0, energy2 = 0.0; for (ch = 0; ch < s->channels; ch++) { for (i = 0; i < s->block_align; i++) { double sample = s->coded_samples[ch][i]; energy2 += sample*sample; energy1 += fabs(sample); } } energy2 = sqrt(energy2/(s->channels*s->block_align)); energy1 = sqrt(2.0)*energy1/(s->channels*s->block_align); // increase bitrate when samples are like a gaussian distribution // reduce bitrate when samples are like a two-tailed exponential distribution if (energy2 > energy1) energy2 += (energy2-energy1)*RATE_VARIATION; quant = (int)(BASE_QUANT*s->quantization*energy2/SAMPLE_FACTOR); // av_log(avctx, AV_LOG_DEBUG, "quant: %d energy: %f / %f\n", quant, energy1, energy2); if (quant < 1) quant = 1; if (quant > 65535) quant = 65535; set_ue_golomb(&pb, quant); quant *= SAMPLE_FACTOR; } // write out coded samples for (ch = 0; ch < s->channels; ch++) { if (!s->lossless) for (i = 0; i < s->block_align; i++) s->coded_samples[ch][i] = divide(s->coded_samples[ch][i], quant); if (intlist_write(&pb, s->coded_samples[ch], s->block_align, 1) < 0) return -1; } // av_log(avctx, AV_LOG_DEBUG, "used bytes: %d\n", (put_bits_count(&pb)+7)/8); flush_put_bits(&pb); return (put_bits_count(&pb)+7)/8; } #endif /* CONFIG_SONIC_ENCODER || CONFIG_SONIC_LS_ENCODER */ #if CONFIG_SONIC_DECODER static const int samplerate_table[] = { 44100, 22050, 11025, 96000, 48000, 32000, 24000, 16000, 8000 }; static av_cold int sonic_decode_init(AVCodecContext *avctx) { SonicContext *s = avctx->priv_data; GetBitContext gb; int i, version; s->channels = avctx->channels; s->samplerate = avctx->sample_rate; if (!avctx->extradata) { av_log(avctx, AV_LOG_ERROR, "No mandatory headers present\n"); return -1; } init_get_bits(&gb, avctx->extradata, avctx->extradata_size); version = get_bits(&gb, 2); if (version > 1) { av_log(avctx, AV_LOG_ERROR, "Unsupported Sonic version, please report\n"); return -1; } if (version == 1) { s->channels = get_bits(&gb, 2); s->samplerate = samplerate_table[get_bits(&gb, 4)]; av_log(avctx, AV_LOG_INFO, "Sonicv2 chans: %d samprate: %d\n", s->channels, s->samplerate); } if (s->channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n"); return -1; } s->lossless = get_bits1(&gb); if (!s->lossless) skip_bits(&gb, 3); // XXX FIXME s->decorrelation = get_bits(&gb, 2); s->downsampling = get_bits(&gb, 2); s->num_taps = (get_bits(&gb, 5)+1)<<5; if (get_bits1(&gb)) // XXX FIXME av_log(avctx, AV_LOG_INFO, "Custom quant table\n"); s->block_align = (int)(2048.0*(s->samplerate/44100))/s->downsampling; s->frame_size = s->channels*s->block_align*s->downsampling; // avctx->frame_size = s->block_align; av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n", version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling); // generate taps s->tap_quant = av_mallocz(4* s->num_taps); for (i = 0; i < s->num_taps; i++) s->tap_quant[i] = (int)(sqrt(i+1)); s->predictor_k = av_mallocz(4* s->num_taps); for (i = 0; i < s->channels; i++) { s->predictor_state[i] = av_mallocz(4* s->num_taps); if (!s->predictor_state[i]) return -1; } for (i = 0; i < s->channels; i++) { s->coded_samples[i] = av_mallocz(4* s->block_align); if (!s->coded_samples[i]) return -1; } s->int_samples = av_mallocz(4* s->frame_size); avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } static av_cold int sonic_decode_close(AVCodecContext *avctx) { SonicContext *s = avctx->priv_data; int i; av_free(s->int_samples); av_free(s->tap_quant); av_free(s->predictor_k); for (i = 0; i < s->channels; i++) { av_free(s->predictor_state[i]); av_free(s->coded_samples[i]); } return 0; } static int sonic_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; SonicContext *s = avctx->priv_data; GetBitContext gb; int i, quant, ch, j; short *samples = data; if (buf_size == 0) return 0; // av_log(NULL, AV_LOG_INFO, "buf_size: %d\n", buf_size); init_get_bits(&gb, buf, buf_size*8); intlist_read(&gb, s->predictor_k, s->num_taps, 0); // dequantize for (i = 0; i < s->num_taps; i++) s->predictor_k[i] *= s->tap_quant[i]; if (s->lossless) quant = 1; else quant = get_ue_golomb(&gb) * SAMPLE_FACTOR; // av_log(NULL, AV_LOG_INFO, "quant: %d\n", quant); for (ch = 0; ch < s->channels; ch++) { int x = ch; predictor_init_state(s->predictor_k, s->predictor_state[ch], s->num_taps); intlist_read(&gb, s->coded_samples[ch], s->block_align, 1); for (i = 0; i < s->block_align; i++) { for (j = 0; j < s->downsampling - 1; j++) { s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, 0); x += s->channels; } s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, s->coded_samples[ch][i] * quant); x += s->channels; } for (i = 0; i < s->num_taps; i++) s->predictor_state[ch][i] = s->int_samples[s->frame_size - s->channels + ch - i*s->channels]; } switch(s->decorrelation) { case MID_SIDE: for (i = 0; i < s->frame_size; i += s->channels) { s->int_samples[i+1] += shift(s->int_samples[i], 1); s->int_samples[i] -= s->int_samples[i+1]; } break; case LEFT_SIDE: for (i = 0; i < s->frame_size; i += s->channels) s->int_samples[i+1] += s->int_samples[i]; break; case RIGHT_SIDE: for (i = 0; i < s->frame_size; i += s->channels) s->int_samples[i] += s->int_samples[i+1]; break; } if (!s->lossless) for (i = 0; i < s->frame_size; i++) s->int_samples[i] = shift(s->int_samples[i], SAMPLE_SHIFT); // internal -> short for (i = 0; i < s->frame_size; i++) samples[i] = av_clip_int16(s->int_samples[i]); align_get_bits(&gb); *data_size = s->frame_size * 2; return (get_bits_count(&gb)+7)/8; } AVCodec sonic_decoder = { "sonic", AVMEDIA_TYPE_AUDIO, CODEC_ID_SONIC, sizeof(SonicContext), sonic_decode_init, NULL, sonic_decode_close, sonic_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Sonic"), }; #endif /* CONFIG_SONIC_DECODER */ #if CONFIG_SONIC_ENCODER AVCodec sonic_encoder = { "sonic", AVMEDIA_TYPE_AUDIO, CODEC_ID_SONIC, sizeof(SonicContext), sonic_encode_init, sonic_encode_frame, sonic_encode_close, NULL, .long_name = NULL_IF_CONFIG_SMALL("Sonic"), }; #endif #if CONFIG_SONIC_LS_ENCODER AVCodec sonic_ls_encoder = { "sonicls", AVMEDIA_TYPE_AUDIO, CODEC_ID_SONIC_LS, sizeof(SonicContext), sonic_encode_init, sonic_encode_frame, sonic_encode_close, NULL, .long_name = NULL_IF_CONFIG_SMALL("Sonic lossless"), }; #endif
123linslouis-android-video-cutter
jni/libavcodec/sonic.c
C
asf20
25,166
/* * Video Decode and Presentation API for UNIX (VDPAU) is used for * HW decode acceleration for MPEG-1/2, MPEG-4 ASP, H.264 and VC-1. * * Copyright (c) 2008 NVIDIA * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <limits.h> #include "avcodec.h" #include "h264.h" #include "vc1.h" #undef NDEBUG #include <assert.h> #include "vdpau.h" #include "vdpau_internal.h" /** * \addtogroup VDPAU_Decoding * * @{ */ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) { H264Context *h = s->avctx->priv_data; struct vdpau_render_state *render, *render_ref; VdpReferenceFrameH264 *rf, *rf2; Picture *pic; int i, list, pic_frame_idx; render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; assert(render); rf = &render->info.h264.referenceFrames[0]; #define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames) for (list = 0; list < 2; ++list) { Picture **lp = list ? h->long_ref : h->short_ref; int ls = list ? 16 : h->short_ref_count; for (i = 0; i < ls; ++i) { pic = lp[i]; if (!pic || !pic->reference) continue; pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; render_ref = (struct vdpau_render_state *)pic->data[0]; assert(render_ref); rf2 = &render->info.h264.referenceFrames[0]; while (rf2 != rf) { if ( (rf2->surface == render_ref->surface) && (rf2->is_long_term == pic->long_ref) && (rf2->frame_idx == pic_frame_idx) ) break; ++rf2; } if (rf2 != rf) { rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; continue; } if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT]) continue; rf->surface = render_ref->surface; rf->is_long_term = pic->long_ref; rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; rf->field_order_cnt[0] = pic->field_poc[0]; rf->field_order_cnt[1] = pic->field_poc[1]; rf->frame_idx = pic_frame_idx; ++rf; } } for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) { rf->surface = VDP_INVALID_HANDLE; rf->is_long_term = 0; rf->top_is_reference = 0; rf->bottom_is_reference = 0; rf->field_order_cnt[0] = 0; rf->field_order_cnt[1] = 0; rf->frame_idx = 0; } } void ff_vdpau_add_data_chunk(MpegEncContext *s, const uint8_t *buf, int buf_size) { struct vdpau_render_state *render; render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; assert(render); render->bitstream_buffers= av_fast_realloc( render->bitstream_buffers, &render->bitstream_buffers_allocated, sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1) ); render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION; render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf; render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size; render->bitstream_buffers_used++; } void ff_vdpau_h264_picture_start(MpegEncContext *s) { H264Context *h = s->avctx->priv_data; struct vdpau_render_state *render; int i; render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; assert(render); for (i = 0; i < 2; ++i) { int foc = s->current_picture_ptr->field_poc[i]; if (foc == INT_MAX) foc = 0; render->info.h264.field_order_cnt[i] = foc; } render->info.h264.frame_num = h->frame_num; } void ff_vdpau_h264_picture_complete(MpegEncContext *s) { H264Context *h = s->avctx->priv_data; struct vdpau_render_state *render; render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; assert(render); render->info.h264.slice_count = h->slice_num; if (render->info.h264.slice_count < 1) return; render->info.h264.is_reference = (s->current_picture_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; render->info.h264.field_pic_flag = s->picture_structure != PICT_FRAME; render->info.h264.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD; render->info.h264.num_ref_frames = h->sps.ref_frame_count; render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag; render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; render->info.h264.weighted_pred_flag = h->pps.weighted_pred; render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc; render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode; render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26; render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; render->info.h264.pic_order_cnt_type = h->sps.poc_type; render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4; render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; render->info.h264.entropy_coding_mode_flag = h->pps.cabac; render->info.h264.pic_order_present_flag = h->pps.pic_order_present; render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); memcpy(render->info.h264.scaling_lists_8x8, h->pps.scaling_matrix8, sizeof(render->info.h264.scaling_lists_8x8)); ff_draw_horiz_band(s, 0, s->avctx->height); render->bitstream_buffers_used = 0; } void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, int buf_size, int slice_count) { struct vdpau_render_state *render, *last, *next; int i; if (!s->current_picture_ptr) return; render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; assert(render); /* fill VdpPictureInfoMPEG1Or2 struct */ render->info.mpeg.picture_structure = s->picture_structure; render->info.mpeg.picture_coding_type = s->pict_type; render->info.mpeg.intra_dc_precision = s->intra_dc_precision; render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct; render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors; render->info.mpeg.intra_vlc_format = s->intra_vlc_format; render->info.mpeg.alternate_scan = s->alternate_scan; render->info.mpeg.q_scale_type = s->q_scale_type; render->info.mpeg.top_field_first = s->top_field_first; render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2 render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2 render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert. render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1]; render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0]; render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1]; for (i = 0; i < 64; ++i) { render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i]; render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; } render->info.mpeg.forward_reference = VDP_INVALID_HANDLE; render->info.mpeg.backward_reference = VDP_INVALID_HANDLE; switch(s->pict_type){ case FF_B_TYPE: next = (struct vdpau_render_state *)s->next_picture.data[0]; assert(next); render->info.mpeg.backward_reference = next->surface; // no return here, going to set forward prediction case FF_P_TYPE: last = (struct vdpau_render_state *)s->last_picture.data[0]; if (!last) // FIXME: Does this test make sense? last = render; // predict second field from the first render->info.mpeg.forward_reference = last->surface; } ff_vdpau_add_data_chunk(s, buf, buf_size); render->info.mpeg.slice_count = slice_count; if (slice_count) ff_draw_horiz_band(s, 0, s->avctx->height); render->bitstream_buffers_used = 0; } void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, int buf_size) { VC1Context *v = s->avctx->priv_data; struct vdpau_render_state *render, *last, *next; render = (struct vdpau_render_state *)s->current_picture.data[0]; assert(render); /* fill LvPictureInfoVC1 struct */ render->info.vc1.frame_coding_mode = v->fcm; render->info.vc1.postprocflag = v->postprocflag; render->info.vc1.pulldown = v->broadcast; render->info.vc1.interlace = v->interlace; render->info.vc1.tfcntrflag = v->tfcntrflag; render->info.vc1.finterpflag = v->finterpflag; render->info.vc1.psf = v->psf; render->info.vc1.dquant = v->dquant; render->info.vc1.panscan_flag = v->panscanflag; render->info.vc1.refdist_flag = v->refdist_flag; render->info.vc1.quantizer = v->quantizer_mode; render->info.vc1.extended_mv = v->extended_mv; render->info.vc1.extended_dmv = v->extended_dmv; render->info.vc1.overlap = v->overlap; render->info.vc1.vstransform = v->vstransform; render->info.vc1.loopfilter = v->s.loop_filter; render->info.vc1.fastuvmc = v->fastuvmc; render->info.vc1.range_mapy_flag = v->range_mapy_flag; render->info.vc1.range_mapy = v->range_mapy; render->info.vc1.range_mapuv_flag = v->range_mapuv_flag; render->info.vc1.range_mapuv = v->range_mapuv; /* Specific to simple/main profile only */ render->info.vc1.multires = v->multires; render->info.vc1.syncmarker = v->s.resync_marker; render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1); render->info.vc1.maxbframes = v->s.max_b_frames; render->info.vc1.deblockEnable = v->postprocflag & 1; render->info.vc1.pquant = v->pq; render->info.vc1.forward_reference = VDP_INVALID_HANDLE; render->info.vc1.backward_reference = VDP_INVALID_HANDLE; if (v->bi_type) render->info.vc1.picture_type = 4; else render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3; switch(s->pict_type){ case FF_B_TYPE: next = (struct vdpau_render_state *)s->next_picture.data[0]; assert(next); render->info.vc1.backward_reference = next->surface; // no break here, going to set forward prediction case FF_P_TYPE: last = (struct vdpau_render_state *)s->last_picture.data[0]; if (!last) // FIXME: Does this test make sense? last = render; // predict second field from the first render->info.vc1.forward_reference = last->surface; } ff_vdpau_add_data_chunk(s, buf, buf_size); render->info.vc1.slice_count = 1; ff_draw_horiz_band(s, 0, s->avctx->height); render->bitstream_buffers_used = 0; } void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, int buf_size) { struct vdpau_render_state *render, *last, *next; int i; if (!s->current_picture_ptr) return; render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; assert(render); /* fill VdpPictureInfoMPEG4Part2 struct */ render->info.mpeg4.trd[0] = s->pp_time; render->info.mpeg4.trb[0] = s->pb_time; render->info.mpeg4.trd[1] = s->pp_field_time >> 1; render->info.mpeg4.trb[1] = s->pb_field_time >> 1; render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den; render->info.mpeg4.vop_coding_type = 0; render->info.mpeg4.vop_fcode_forward = s->f_code; render->info.mpeg4.vop_fcode_backward = s->b_code; render->info.mpeg4.resync_marker_disable = !s->resync_marker; render->info.mpeg4.interlaced = !s->progressive_sequence; render->info.mpeg4.quant_type = s->mpeg_quant; render->info.mpeg4.quarter_sample = s->quarter_sample; render->info.mpeg4.short_video_header = s->avctx->codec->id == CODEC_ID_H263; render->info.mpeg4.rounding_control = s->no_rounding; render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan; render->info.mpeg4.top_field_first = s->top_field_first; for (i = 0; i < 64; ++i) { render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i]; render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; } render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE; render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE; switch (s->pict_type) { case FF_B_TYPE: next = (struct vdpau_render_state *)s->next_picture.data[0]; assert(next); render->info.mpeg4.backward_reference = next->surface; render->info.mpeg4.vop_coding_type = 2; // no break here, going to set forward prediction case FF_P_TYPE: last = (struct vdpau_render_state *)s->last_picture.data[0]; assert(last); render->info.mpeg4.forward_reference = last->surface; } ff_vdpau_add_data_chunk(s, buf, buf_size); ff_draw_horiz_band(s, 0, s->avctx->height); render->bitstream_buffers_used = 0; } /* @}*/
123linslouis-android-video-cutter
jni/libavcodec/vdpau.c
C
asf20
16,214
/* * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * bitstream writer API */ #ifndef AVCODEC_PUT_BITS_H #define AVCODEC_PUT_BITS_H #include <stdint.h> #include <stdlib.h> #include <assert.h> #include "libavutil/bswap.h" #include "libavutil/common.h" #include "libavutil/intreadwrite.h" #include "libavutil/log.h" #include "mathops.h" //#define ALT_BITSTREAM_WRITER //#define ALIGNED_BITSTREAM_WRITER /* buf and buf_end must be present and used by every alternative writer. */ typedef struct PutBitContext { #ifdef ALT_BITSTREAM_WRITER uint8_t *buf, *buf_end; int index; #else uint32_t bit_buf; int bit_left; uint8_t *buf, *buf_ptr, *buf_end; #endif int size_in_bits; } PutBitContext; /** * Initializes the PutBitContext s. * * @param buffer the buffer where to put bits * @param buffer_size the size in bytes of buffer */ static inline void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size) { if(buffer_size < 0) { buffer_size = 0; buffer = NULL; } s->size_in_bits= 8*buffer_size; s->buf = buffer; s->buf_end = s->buf + buffer_size; #ifdef ALT_BITSTREAM_WRITER s->index=0; ((uint32_t*)(s->buf))[0]=0; // memset(buffer, 0, buffer_size); #else s->buf_ptr = s->buf; s->bit_left=32; s->bit_buf=0; #endif } /** * Returns the total number of bits written to the bitstream. */ static inline int put_bits_count(PutBitContext *s) { #ifdef ALT_BITSTREAM_WRITER return s->index; #else return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left; #endif } /** * Pads the end of the output stream with zeros. */ static inline void flush_put_bits(PutBitContext *s) { #ifdef ALT_BITSTREAM_WRITER align_put_bits(s); #else #ifndef BITSTREAM_WRITER_LE s->bit_buf<<= s->bit_left; #endif while (s->bit_left < 32) { /* XXX: should test end of buffer */ #ifdef BITSTREAM_WRITER_LE *s->buf_ptr++=s->bit_buf; s->bit_buf>>=8; #else *s->buf_ptr++=s->bit_buf >> 24; s->bit_buf<<=8; #endif s->bit_left+=8; } s->bit_left=32; s->bit_buf=0; #endif } #if defined(ALT_BITSTREAM_WRITER) || defined(BITSTREAM_WRITER_LE) #define align_put_bits align_put_bits_unsupported_here #define ff_put_string ff_put_string_unsupported_here #define ff_copy_bits ff_copy_bits_unsupported_here #else /** * Pads the bitstream with zeros up to the next byte boundary. */ void align_put_bits(PutBitContext *s); /** * Puts the string string in the bitstream. * * @param terminate_string 0-terminates the written string if value is 1 */ void ff_put_string(PutBitContext *pb, const char *string, int terminate_string); /** * Copies the content of src to the bitstream. * * @param length the number of bits of src to copy */ void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length); #endif /** * Writes up to 31 bits into a bitstream. * Use put_bits32 to write 32 bits. */ static inline void put_bits(PutBitContext *s, int n, unsigned int value) #ifndef ALT_BITSTREAM_WRITER { unsigned int bit_buf; int bit_left; // printf("put_bits=%d %x\n", n, value); assert(n <= 31 && value < (1U << n)); bit_buf = s->bit_buf; bit_left = s->bit_left; // printf("n=%d value=%x cnt=%d buf=%x\n", n, value, bit_cnt, bit_buf); /* XXX: optimize */ #ifdef BITSTREAM_WRITER_LE bit_buf |= value << (32 - bit_left); if (n >= bit_left) { #if !HAVE_FAST_UNALIGNED if (3 & (intptr_t) s->buf_ptr) { AV_WL32(s->buf_ptr, bit_buf); } else #endif *(uint32_t *)s->buf_ptr = le2me_32(bit_buf); s->buf_ptr+=4; bit_buf = (bit_left==32)?0:value >> bit_left; bit_left+=32; } bit_left-=n; #else if (n < bit_left) { bit_buf = (bit_buf<<n) | value; bit_left-=n; } else { bit_buf<<=bit_left; bit_buf |= value >> (n - bit_left); #if !HAVE_FAST_UNALIGNED if (3 & (intptr_t) s->buf_ptr) { AV_WB32(s->buf_ptr, bit_buf); } else #endif *(uint32_t *)s->buf_ptr = be2me_32(bit_buf); //printf("bitbuf = %08x\n", bit_buf); s->buf_ptr+=4; bit_left+=32 - n; bit_buf = value; } #endif s->bit_buf = bit_buf; s->bit_left = bit_left; } #else /* ALT_BITSTREAM_WRITER defined */ { # ifdef ALIGNED_BITSTREAM_WRITER # if ARCH_X86 __asm__ volatile( "movl %0, %%ecx \n\t" "xorl %%eax, %%eax \n\t" "shrdl %%cl, %1, %%eax \n\t" "shrl %%cl, %1 \n\t" "movl %0, %%ecx \n\t" "shrl $3, %%ecx \n\t" "andl $0xFFFFFFFC, %%ecx \n\t" "bswapl %1 \n\t" "orl %1, (%2, %%ecx) \n\t" "bswapl %%eax \n\t" "addl %3, %0 \n\t" "movl %%eax, 4(%2, %%ecx) \n\t" : "=&r" (s->index), "=&r" (value) : "r" (s->buf), "r" (n), "0" (s->index), "1" (value<<(-n)) : "%eax", "%ecx" ); # else int index= s->index; uint32_t *ptr= ((uint32_t *)s->buf)+(index>>5); value<<= 32-n; ptr[0] |= be2me_32(value>>(index&31)); ptr[1] = be2me_32(value<<(32-(index&31))); //if(n>24) printf("%d %d\n", n, value); index+= n; s->index= index; # endif # else //ALIGNED_BITSTREAM_WRITER # if ARCH_X86 __asm__ volatile( "movl $7, %%ecx \n\t" "andl %0, %%ecx \n\t" "addl %3, %%ecx \n\t" "negl %%ecx \n\t" "shll %%cl, %1 \n\t" "bswapl %1 \n\t" "movl %0, %%ecx \n\t" "shrl $3, %%ecx \n\t" "orl %1, (%%ecx, %2) \n\t" "addl %3, %0 \n\t" "movl $0, 4(%%ecx, %2) \n\t" : "=&r" (s->index), "=&r" (value) : "r" (s->buf), "r" (n), "0" (s->index), "1" (value) : "%ecx" ); # else int index= s->index; uint32_t *ptr= (uint32_t*)(((uint8_t *)s->buf)+(index>>3)); ptr[0] |= be2me_32(value<<(32-n-(index&7) )); ptr[1] = 0; //if(n>24) printf("%d %d\n", n, value); index+= n; s->index= index; # endif # endif //!ALIGNED_BITSTREAM_WRITER } #endif static inline void put_sbits(PutBitContext *pb, int n, int32_t value) { assert(n >= 0 && n <= 31); put_bits(pb, n, value & ((1<<n)-1)); } /** * Writes exactly 32 bits into a bitstream. */ static void av_unused put_bits32(PutBitContext *s, uint32_t value) { int lo = value & 0xffff; int hi = value >> 16; #ifdef BITSTREAM_WRITER_LE put_bits(s, 16, lo); put_bits(s, 16, hi); #else put_bits(s, 16, hi); put_bits(s, 16, lo); #endif } /** * Returns the pointer to the byte where the bitstream writer will put * the next bit. */ static inline uint8_t* put_bits_ptr(PutBitContext *s) { #ifdef ALT_BITSTREAM_WRITER return s->buf + (s->index>>3); #else return s->buf_ptr; #endif } /** * Skips the given number of bytes. * PutBitContext must be flushed & aligned to a byte boundary before calling this. */ static inline void skip_put_bytes(PutBitContext *s, int n) { assert((put_bits_count(s)&7)==0); #ifdef ALT_BITSTREAM_WRITER FIXME may need some cleaning of the buffer s->index += n<<3; #else assert(s->bit_left==32); s->buf_ptr += n; #endif } /** * Skips the given number of bits. * Must only be used if the actual values in the bitstream do not matter. * If n is 0 the behavior is undefined. */ static inline void skip_put_bits(PutBitContext *s, int n) { #ifdef ALT_BITSTREAM_WRITER s->index += n; #else s->bit_left -= n; s->buf_ptr-= 4*(s->bit_left>>5); s->bit_left &= 31; #endif } /** * Changes the end of the buffer. * * @param size the new size in bytes of the buffer where to put bits */ static inline void set_put_bits_buffer_size(PutBitContext *s, int size) { s->buf_end= s->buf + size; } #endif /* AVCODEC_PUT_BITS_H */
123linslouis-android-video-cutter
jni/libavcodec/put_bits.h
C
asf20
8,957
/* * MPEG Audio common tables * copyright (c) 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * mpeg audio layer common tables. */ #include "mpegaudiodata.h" const uint16_t ff_mpa_bitrate_tab[2][3][15] = { { {0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 }, {0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 }, {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 } }, { {0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256}, {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160}, {0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160} } }; const uint16_t ff_mpa_freq_tab[3] = { 44100, 48000, 32000 }; /*******************************************************/ /* half mpeg encoding window (full precision) */ const int32_t ff_mpa_enwindow[257] = { 0, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -3, -3, -4, -4, -5, -5, -6, -7, -7, -8, -9, -10, -11, -13, -14, -16, -17, -19, -21, -24, -26, -29, -31, -35, -38, -41, -45, -49, -53, -58, -63, -68, -73, -79, -85, -91, -97, -104, -111, -117, -125, -132, -139, -147, -154, -161, -169, -176, -183, -190, -196, -202, -208, 213, 218, 222, 225, 227, 228, 228, 227, 224, 221, 215, 208, 200, 189, 177, 163, 146, 127, 106, 83, 57, 29, -2, -36, -72, -111, -153, -197, -244, -294, -347, -401, -459, -519, -581, -645, -711, -779, -848, -919, -991, -1064, -1137, -1210, -1283, -1356, -1428, -1498, -1567, -1634, -1698, -1759, -1817, -1870, -1919, -1962, -2001, -2032, -2057, -2075, -2085, -2087, -2080, -2063, 2037, 2000, 1952, 1893, 1822, 1739, 1644, 1535, 1414, 1280, 1131, 970, 794, 605, 402, 185, -45, -288, -545, -814, -1095, -1388, -1692, -2006, -2330, -2663, -3004, -3351, -3705, -4063, -4425, -4788, -5153, -5517, -5879, -6237, -6589, -6935, -7271, -7597, -7910, -8209, -8491, -8755, -8998, -9219, -9416, -9585, -9727, -9838, -9916, -9959, -9966, -9935, -9863, -9750, -9592, -9389, -9139, -8840, -8492, -8092, -7640, -7134, 6574, 5959, 5288, 4561, 3776, 2935, 2037, 1082, 70, -998, -2122, -3300, -4533, -5818, -7154, -8540, -9975,-11455,-12980,-14548,-16155,-17799,-19478,-21189, -22929,-24694,-26482,-28289,-30112,-31947,-33791,-35640, -37489,-39336,-41176,-43006,-44821,-46617,-48390,-50137, -51853,-53534,-55178,-56778,-58333,-59838,-61289,-62684, -64019,-65290,-66494,-67629,-68692,-69679,-70590,-71420, -72169,-72835,-73415,-73908,-74313,-74630,-74856,-74992, 75038, }; /*******************************************************/ /* layer 2 tables */ const int ff_mpa_sblimit_table[5] = { 27 , 30 , 8, 12 , 30 }; const int ff_mpa_quant_steps[17] = { 3, 5, 7, 9, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767, 65535 }; /* we use a negative value if grouped */ const int ff_mpa_quant_bits[17] = { -5, -7, 3, -10, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; /* encoding tables which give the quantization index. Note how it is possible to store them efficiently ! */ static const unsigned char alloc_table_1[] = { 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 4, 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 3, 0, 1, 2, 3, 4, 5, 16, 2, 0, 1, 16, 2, 0, 1, 16, 2, 0, 1, 16, 2, 0, 1, 16, 2, 0, 1, 16, 2, 0, 1, 16, 2, 0, 1, 16, }; static const unsigned char alloc_table_3[] = { 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 4, 0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, }; static const unsigned char alloc_table_4[] = { 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 3, 0, 1, 3, 4, 5, 6, 7, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, 2, 0, 1, 3, }; const unsigned char * const ff_mpa_alloc_tables[5] = { alloc_table_1, alloc_table_1, alloc_table_3, alloc_table_3, alloc_table_4, };
123linslouis-android-video-cutter
jni/libavcodec/mpegaudiodata.c
C
asf20
6,762
/* * V.Flash PTX (.ptx) image decoder * Copyright (c) 2007 Ivo van Poorten * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" typedef struct PTXContext { AVFrame picture; } PTXContext; static av_cold int ptx_init(AVCodecContext *avctx) { PTXContext *s = avctx->priv_data; avcodec_get_frame_defaults(&s->picture); avctx->coded_frame= &s->picture; return 0; } static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; PTXContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; unsigned int offset, w, h, y, stride, bytes_per_pixel; uint8_t *ptr; offset = AV_RL16(buf); w = AV_RL16(buf+8); h = AV_RL16(buf+10); bytes_per_pixel = AV_RL16(buf+12) >> 3; if (bytes_per_pixel != 2) { av_log(avctx, AV_LOG_ERROR, "image format is not rgb15, please report on ffmpeg-users mailing list\n"); return -1; } avctx->pix_fmt = PIX_FMT_RGB555; if (offset != 0x2c) av_log(avctx, AV_LOG_WARNING, "offset != 0x2c, untested due to lack of sample files\n"); buf += offset; if (p->data[0]) avctx->release_buffer(avctx, p); if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = FF_I_TYPE; ptr = p->data[0]; stride = p->linesize[0]; for (y=0; y<h; y++) { #if HAVE_BIGENDIAN unsigned int x; for (x=0; x<w*bytes_per_pixel; x+=bytes_per_pixel) AV_WN16(ptr+x, AV_RL16(buf+x)); #else memcpy(ptr, buf, w*bytes_per_pixel); #endif ptr += stride; buf += w*bytes_per_pixel; } *picture = s->picture; *data_size = sizeof(AVPicture); return offset + w*h*bytes_per_pixel; } static av_cold int ptx_end(AVCodecContext *avctx) { PTXContext *s = avctx->priv_data; if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); return 0; } AVCodec ptx_decoder = { "ptx", AVMEDIA_TYPE_VIDEO, CODEC_ID_PTX, sizeof(PTXContext), ptx_init, NULL, ptx_end, ptx_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("V.Flash PTX image"), };
123linslouis-android-video-cutter
jni/libavcodec/ptx.c
C
asf20
3,296
/* * RealVideo 4 decoder * copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * RV40 VLC tables used for macroblock information decoding */ #ifndef AVCODEC_RV40VLC2_H #define AVCODEC_RV40VLC2_H #include <stdint.h> /** * codes used for the first four block types */ //@{ #define AIC_TOP_BITS 8 #define AIC_TOP_SIZE 16 static const uint8_t rv40_aic_top_vlc_codes[AIC_TOP_SIZE] = { 0x01, 0x05, 0x01, 0x00, 0x03, 0x3D, 0x1D, 0x02, 0x04, 0x3C, 0x3F, 0x1C, 0x0D, 0x3E, 0x0C, 0x01 }; static const uint8_t rv40_aic_top_vlc_bits[AIC_TOP_SIZE] = { 1, 4, 5, 5, 5, 7, 6, 5, 4, 7, 7, 6, 5, 7, 5, 3 }; //@} /** * codes used for determining a pair of block types */ //@{ #define AIC_MODE2_NUM 20 #define AIC_MODE2_SIZE 81 #define AIC_MODE2_BITS 9 static const uint16_t aic_mode2_vlc_codes[AIC_MODE2_NUM][AIC_MODE2_SIZE] = { { 0x0001, 0x0001, 0x0005, 0x01F5, 0x0011, 0x0049, 0x0000, 0x0048, 0x004B, 0x0035, 0x0003, 0x0034, 0x03C9, 0x01F4, 0x00C9, 0x004A, 0x0FD9, 0x03C8, 0x0010, 0x0037, 0x0001, 0x00C8, 0x0075, 0x01F7, 0x00CB, 0x0074, 0x0002, 0x01F6, 0x00CA, 0x01F1, 0x01F0, 0x1F81, 0x07F9, 0x1F80, 0x1F83, 0x07F8, 0x0077, 0x00F5, 0x0036, 0x07FB, 0x0076, 0x1F82, 0x00F4, 0x00F7, 0x07FA, 0x0071, 0x00F6, 0x03CB, 0x03CA, 0x0FD8, 0x00F1, 0x03F5, 0x1F8D, 0x07E5, 0x0013, 0x0031, 0x00F0, 0x0FDB, 0x00F3, 0x07E4, 0x0030, 0x01F3, 0x07E7, 0x03F4, 0x07E6, 0x0070, 0x3F19, 0x01F2, 0x3F18, 0x0FDA, 0x0033, 0x07E1, 0x01FD, 0x01FC, 0x0073, 0x01FF, 0x0FC5, 0x0FC4, 0x0FC7, 0x03F7, 0x0072, }, { 0x0005, 0x0005, 0x0005, 0x0079, 0x0005, 0x000D, 0x001D, 0x0078, 0x0069, 0x0004, 0x0001, 0x0007, 0x0068, 0x001C, 0x001F, 0x0004, 0x006B, 0x000C, 0x0004, 0x001E, 0x0006, 0x006A, 0x0015, 0x000F, 0x0014, 0x0017, 0x0007, 0x0016, 0x000E, 0x0011, 0x0009, 0x00D1, 0x00D0, 0x0181, 0x00D3, 0x007B, 0x0010, 0x0013, 0x0004, 0x00D2, 0x0007, 0x0319, 0x0008, 0x007A, 0x00DD, 0x0019, 0x0006, 0x000B, 0x0065, 0x00DC, 0x0012, 0x0064, 0x0180, 0x00DF, 0x0006, 0x0018, 0x0001, 0x00DE, 0x001D, 0x00D9, 0x001B, 0x0067, 0x000A, 0x00D8, 0x00DB, 0x001C, 0x0318, 0x00DA, 0x0635, 0x0183, 0x0000, 0x00C5, 0x0066, 0x0061, 0x0035, 0x00C4, 0x0182, 0x0634, 0x031B, 0x00C7, 0x001F, }, { 0x0005, 0x0001, 0x001D, 0x01C1, 0x0035, 0x00F1, 0x006D, 0x00F0, 0x0049, 0x0000, 0x0004, 0x0003, 0x00F3, 0x0048, 0x0034, 0x006C, 0x01C0, 0x01C3, 0x0007, 0x0006, 0x0001, 0x006F, 0x0002, 0x004B, 0x006E, 0x001C, 0x0005, 0x0069, 0x0068, 0x006B, 0x0037, 0x01C2, 0x00F2, 0x0395, 0x01CD, 0x00FD, 0x006A, 0x0036, 0x0015, 0x01CC, 0x0014, 0x0394, 0x004A, 0x00FC, 0x00FF, 0x0017, 0x0031, 0x00FE, 0x01CF, 0x0397, 0x00F9, 0x01CE, 0x0725, 0x0396, 0x0016, 0x0030, 0x0075, 0x0724, 0x00F8, 0x0727, 0x0033, 0x0391, 0x0390, 0x0011, 0x0032, 0x001F, 0x00FB, 0x0074, 0x0726, 0x00FA, 0x001E, 0x0077, 0x0019, 0x0018, 0x0004, 0x0010, 0x003D, 0x0076, 0x0071, 0x0013, 0x0001, }, { 0x000D, 0x0019, 0x0011, 0x0015, 0x0061, 0x0019, 0x0014, 0x01AD, 0x0060, 0x0018, 0x0001, 0x0005, 0x001B, 0x0010, 0x0019, 0x0005, 0x0017, 0x0018, 0x0016, 0x0004, 0x0004, 0x0013, 0x000C, 0x0012, 0x001A, 0x0018, 0x0005, 0x000F, 0x001B, 0x0004, 0x001D, 0x0011, 0x001C, 0x0010, 0x000E, 0x001B, 0x0013, 0x001F, 0x001A, 0x0029, 0x0005, 0x0063, 0x001E, 0x0009, 0x0062, 0x0008, 0x0007, 0x0007, 0x0019, 0x0004, 0x001A, 0x0018, 0x006D, 0x0007, 0x001B, 0x0007, 0x001A, 0x006C, 0x0006, 0x0012, 0x0005, 0x006F, 0x000B, 0x006E, 0x0069, 0x001D, 0x0359, 0x0028, 0x002B, 0x002A, 0x001C, 0x00D5, 0x0358, 0x001F, 0x0001, 0x001E, 0x0068, 0x00D4, 0x00D7, 0x0019, 0x0000, }, { 0x00B9, 0x0061, 0x0060, 0x00B8, 0x02B5, 0x01AD, 0x00BB, 0x0AF5, 0x0151, 0x0001, 0x0001, 0x0005, 0x0000, 0x0003, 0x0005, 0x0004, 0x0063, 0x0025, 0x00BA, 0x0004, 0x0007, 0x0062, 0x00A5, 0x0024, 0x006D, 0x0002, 0x006C, 0x02B4, 0x000D, 0x006F, 0x0027, 0x00A4, 0x0026, 0x01AC, 0x0150, 0x01AF, 0x01AE, 0x0021, 0x006E, 0x02B7, 0x0020, 0x0153, 0x0023, 0x00A7, 0x0152, 0x00A6, 0x0006, 0x000C, 0x0022, 0x01A9, 0x0019, 0x002D, 0x02B6, 0x01A8, 0x000F, 0x0007, 0x000E, 0x00A1, 0x0069, 0x002C, 0x0001, 0x01AB, 0x00A0, 0x02B1, 0x00A3, 0x002F, 0x0AF4, 0x02B0, 0x0AF7, 0x02B3, 0x0068, 0x015D, 0x0AF6, 0x01AA, 0x0055, 0x015C, 0x02B2, 0x0579, 0x0578, 0x015F, 0x00A2, }, { 0x0905, 0x013D, 0x013C, 0x0904, 0x121D, 0x049D, 0x049C, 0x243D, 0x0907, 0x00ED, 0x0001, 0x0015, 0x0041, 0x013F, 0x0031, 0x0014, 0x025D, 0x025C, 0x013E, 0x000D, 0x0000, 0x0040, 0x0139, 0x0043, 0x0030, 0x0017, 0x0033, 0x0906, 0x0032, 0x0042, 0x00EC, 0x025F, 0x00EF, 0x025E, 0x049F, 0x0138, 0x0901, 0x013B, 0x0259, 0x121C, 0x049E, 0x0900, 0x0258, 0x243C, 0x121F, 0x0903, 0x003D, 0x00EE, 0x025B, 0x025A, 0x004D, 0x013A, 0x0902, 0x0245, 0x00E9, 0x0016, 0x00E8, 0x0499, 0x0125, 0x0244, 0x004C, 0x0498, 0x090D, 0x00EB, 0x003C, 0x0011, 0x049B, 0x049A, 0x0485, 0x00EA, 0x003F, 0x0124, 0x090C, 0x003E, 0x0039, 0x0095, 0x0247, 0x0246, 0x0484, 0x0094, 0x0038, }, { 0x0F09, 0x00CD, 0x01FD, 0x0791, 0x1E6D, 0x0790, 0x03D9, 0x3CD1, 0x3CD0, 0x0075, 0x0001, 0x0001, 0x0035, 0x00CC, 0x0011, 0x0000, 0x03D8, 0x01FC, 0x03DB, 0x0010, 0x0003, 0x00CF, 0x03DA, 0x00CE, 0x0074, 0x0034, 0x0077, 0x0793, 0x0013, 0x0076, 0x0071, 0x03C5, 0x0070, 0x01FF, 0x0792, 0x01FE, 0x01F9, 0x0037, 0x00C9, 0x0F08, 0x01F8, 0x03C4, 0x00C8, 0x0F0B, 0x079D, 0x03C7, 0x0001, 0x0012, 0x0073, 0x00CB, 0x0005, 0x0036, 0x03C6, 0x0072, 0x007D, 0x0002, 0x00CA, 0x079C, 0x01FB, 0x00F5, 0x0031, 0x079F, 0x0F0A, 0x0F35, 0x079E, 0x01FA, 0x1E6C, 0x1E6F, 0x3CD3, 0x0799, 0x03C1, 0x1E6E, 0x3CD2, 0x0030, 0x00F4, 0x007C, 0x03C0, 0x03C3, 0x0798, 0x01E5, 0x00F7, }, { 0x01A5, 0x0001, 0x001D, 0x0021, 0x00A1, 0x000D, 0x0061, 0x06B9, 0x00A0, 0x0060, 0x0001, 0x0005, 0x000C, 0x0020, 0x001C, 0x0004, 0x01A4, 0x01A7, 0x00A3, 0x001F, 0x001E, 0x0023, 0x0022, 0x002D, 0x002C, 0x0063, 0x0062, 0x1A81, 0x01A6, 0x01A1, 0x06B8, 0x06BB, 0x00A2, 0x06BA, 0x0D59, 0x06A5, 0x01A0, 0x000F, 0x006D, 0x06A4, 0x002F, 0x00AD, 0x006C, 0x06A7, 0x00AC, 0x0D58, 0x000E, 0x01A3, 0x00AF, 0x00AE, 0x006F, 0x01A2, 0x0D5B, 0x00A9, 0x0019, 0x0001, 0x0009, 0x00A8, 0x006E, 0x002E, 0x0000, 0x01AD, 0x00AB, 0x00AA, 0x0355, 0x0029, 0x1A80, 0x1A83, 0x1A82, 0x0354, 0x01AC, 0x0D5A, 0x1A8D, 0x01AF, 0x0357, 0x0D45, 0x0D44, 0x0D47, 0x1A8C, 0x06A6, 0x06A1, }, { 0x0001, 0x0011, 0x0005, 0x0775, 0x00F9, 0x00F8, 0x0031, 0x0030, 0x0049, 0x00FB, 0x0010, 0x0033, 0x0EC9, 0x038D, 0x038C, 0x00FA, 0x038F, 0x0774, 0x0048, 0x0032, 0x0000, 0x01D5, 0x00E5, 0x038E, 0x00E4, 0x0013, 0x000D, 0x0389, 0x0777, 0x0388, 0x038B, 0x1DF9, 0x0EC8, 0x3BC9, 0x1DF8, 0x038A, 0x03B5, 0x0776, 0x00E7, 0x3BC8, 0x01D4, 0x3BCB, 0x0ECB, 0x0771, 0x0ECA, 0x01D7, 0x03B4, 0x01D6, 0x1DFB, 0x0EF5, 0x0770, 0x0EF4, 0x3BCA, 0x0773, 0x00E6, 0x03B7, 0x004B, 0x1DFA, 0x03B6, 0x0EF7, 0x00E1, 0x0EF6, 0x0EF1, 0x03B1, 0x01D1, 0x003D, 0x0EF0, 0x0772, 0x077D, 0x077C, 0x003C, 0x01D0, 0x03B0, 0x01D3, 0x003F, 0x03B3, 0x01D2, 0x0EF3, 0x077F, 0x00E0, 0x004A, }, { 0x0015, 0x0049, 0x0014, 0x07D1, 0x03FD, 0x03FC, 0x01C1, 0x01C0, 0x00F1, 0x0017, 0x0001, 0x0001, 0x01C3, 0x0048, 0x004B, 0x0016, 0x0031, 0x01C2, 0x004A, 0x0011, 0x0000, 0x01CD, 0x00F0, 0x01CC, 0x0075, 0x0010, 0x000D, 0x03FF, 0x01CF, 0x01CE, 0x07D0, 0x0F81, 0x07D3, 0x1F1D, 0x0F80, 0x07D2, 0x01C9, 0x03FE, 0x0074, 0x07DD, 0x00F3, 0x1F1C, 0x07DC, 0x03F9, 0x07DF, 0x00F2, 0x00FD, 0x0077, 0x07DE, 0x07D9, 0x01C8, 0x07D8, 0x0F83, 0x03F8, 0x0030, 0x0076, 0x0013, 0x0F82, 0x00FC, 0x03FB, 0x0033, 0x03FA, 0x03E5, 0x03E4, 0x01CB, 0x0032, 0x1F1F, 0x03E7, 0x07DB, 0x07DA, 0x003D, 0x01CA, 0x07C5, 0x03E6, 0x0071, 0x0F8D, 0x07C4, 0x1F1E, 0x0F8C, 0x03E1, 0x01F5, }, { 0x0019, 0x0065, 0x0018, 0x0351, 0x0350, 0x0353, 0x0021, 0x0020, 0x0064, 0x001D, 0x0005, 0x0005, 0x01A5, 0x0023, 0x0067, 0x0005, 0x0066, 0x0022, 0x001B, 0x0004, 0x0001, 0x0004, 0x001C, 0x0061, 0x001A, 0x0005, 0x0004, 0x0007, 0x002D, 0x0006, 0x002C, 0x01A4, 0x002F, 0x0352, 0x035D, 0x0060, 0x0001, 0x002E, 0x001F, 0x035C, 0x0000, 0x06B1, 0x01A7, 0x0029, 0x01A6, 0x0028, 0x0063, 0x0062, 0x035F, 0x01A1, 0x002B, 0x06B0, 0x06B3, 0x01A0, 0x0003, 0x006D, 0x001E, 0x035E, 0x006C, 0x06B2, 0x0002, 0x01A3, 0x01A2, 0x000D, 0x0005, 0x0007, 0x01AD, 0x006F, 0x002A, 0x006E, 0x0004, 0x0004, 0x000C, 0x0007, 0x0006, 0x000F, 0x000E, 0x00D5, 0x0009, 0x0006, 0x0007, }, { 0x0065, 0x0181, 0x0064, 0x36C9, 0x06D5, 0x0DB5, 0x0379, 0x0180, 0x0183, 0x00D5, 0x001D, 0x001C, 0x0DB4, 0x0182, 0x0378, 0x00D4, 0x00D7, 0x06D4, 0x0067, 0x001F, 0x0001, 0x00D6, 0x00D1, 0x018D, 0x0066, 0x0001, 0x0000, 0x037B, 0x06D7, 0x037A, 0x0DB7, 0x36C8, 0x06D6, 0x0DB6, 0x1B79, 0x0DB1, 0x018C, 0x0365, 0x00D0, 0x1B78, 0x00D3, 0x1B7B, 0x0364, 0x06D1, 0x06D0, 0x018F, 0x018E, 0x00D2, 0x36CB, 0x0367, 0x0366, 0x06D3, 0x0DB0, 0x06D2, 0x0361, 0x06DD, 0x0189, 0x36CA, 0x0360, 0x36F5, 0x0188, 0x0DB3, 0x36F4, 0x0009, 0x0008, 0x0005, 0x06DC, 0x00DD, 0x018B, 0x00DC, 0x0004, 0x000B, 0x018A, 0x0061, 0x0003, 0x0363, 0x00DF, 0x06DF, 0x0362, 0x000A, 0x001E, }, { 0x001D, 0x0061, 0x000D, 0x0D55, 0x06B9, 0x06B8, 0x01A5, 0x0021, 0x0020, 0x0023, 0x000C, 0x0060, 0x0D54, 0x00AD, 0x00AC, 0x0022, 0x00AF, 0x06BB, 0x000F, 0x001C, 0x0001, 0x002D, 0x0063, 0x01A4, 0x000E, 0x0001, 0x0005, 0x01A7, 0x06BA, 0x01A6, 0x06A5, 0x0D57, 0x0D56, 0x1ABD, 0x0D51, 0x00AE, 0x002C, 0x00A9, 0x002F, 0x0D50, 0x01A1, 0x1ABC, 0x06A4, 0x06A7, 0x06A6, 0x00A8, 0x06A1, 0x01A0, 0x1ABF, 0x0D53, 0x06A0, 0x0D52, 0x1ABE, 0x06A3, 0x0062, 0x002E, 0x0009, 0x0D5D, 0x01A3, 0x0D5C, 0x006D, 0x00AB, 0x06A2, 0x006C, 0x001F, 0x0001, 0x06AD, 0x0029, 0x01A2, 0x0028, 0x0004, 0x001E, 0x01AD, 0x006F, 0x0000, 0x01AC, 0x01AF, 0x06AC, 0x00AA, 0x006E, 0x0019, }, { 0x0019, 0x007D, 0x0018, 0x01B5, 0x000D, 0x01B4, 0x007C, 0x007F, 0x01B7, 0x000C, 0x001B, 0x001A, 0x01B6, 0x000F, 0x00D5, 0x0019, 0x007E, 0x00D4, 0x0018, 0x001B, 0x0001, 0x000E, 0x0011, 0x0009, 0x0005, 0x0005, 0x0005, 0x00D7, 0x01B1, 0x0008, 0x01B0, 0x0079, 0x06FD, 0x0371, 0x0370, 0x00D6, 0x0078, 0x01B3, 0x0010, 0x0373, 0x0013, 0x06FC, 0x007B, 0x007A, 0x00D1, 0x00D0, 0x00D3, 0x0065, 0x0372, 0x06FF, 0x0064, 0x06FE, 0x037D, 0x00D2, 0x00DD, 0x0067, 0x0004, 0x037C, 0x0012, 0x01B2, 0x0007, 0x0066, 0x01BD, 0x0006, 0x0061, 0x0004, 0x01BC, 0x001A, 0x0060, 0x001D, 0x0004, 0x001C, 0x0063, 0x0001, 0x0007, 0x000B, 0x0000, 0x0062, 0x000A, 0x0005, 0x0007, }, { 0x0069, 0x0045, 0x0068, 0x04BD, 0x0255, 0x04BC, 0x00E5, 0x00E4, 0x0031, 0x0030, 0x0019, 0x0001, 0x0121, 0x00E7, 0x00E6, 0x0033, 0x00E1, 0x00E0, 0x006B, 0x0018, 0x0001, 0x0044, 0x0032, 0x0047, 0x006A, 0x001B, 0x0005, 0x003D, 0x0046, 0x0015, 0x0041, 0x0120, 0x0123, 0x04BF, 0x0122, 0x0040, 0x003C, 0x00E3, 0x0014, 0x0254, 0x0043, 0x0975, 0x012D, 0x00E2, 0x00ED, 0x0042, 0x00EC, 0x004D, 0x0257, 0x0256, 0x0251, 0x04BE, 0x0974, 0x0250, 0x00EF, 0x00EE, 0x004C, 0x04B9, 0x012C, 0x04B8, 0x004F, 0x04BB, 0x0253, 0x003F, 0x0017, 0x0001, 0x0252, 0x00E9, 0x00E8, 0x00EB, 0x0000, 0x0003, 0x0016, 0x0002, 0x0004, 0x004E, 0x003E, 0x00EA, 0x0049, 0x000D, 0x0007, }, { 0x000D, 0x01BD, 0x000C, 0x0D31, 0x0D30, 0x0D33, 0x0359, 0x0358, 0x002D, 0x0065, 0x001D, 0x001C, 0x0D32, 0x035B, 0x035A, 0x002C, 0x01BC, 0x0345, 0x000F, 0x001F, 0x0001, 0x002F, 0x0064, 0x01BF, 0x0067, 0x0001, 0x0005, 0x0066, 0x002E, 0x0061, 0x0029, 0x0695, 0x0694, 0x0697, 0x0696, 0x0060, 0x01BE, 0x0D3D, 0x0028, 0x1A49, 0x0344, 0x1A48, 0x1A4B, 0x0D3C, 0x0691, 0x002B, 0x01B9, 0x002A, 0x0D3F, 0x0690, 0x0347, 0x0D3E, 0x1A4A, 0x0346, 0x00D5, 0x0341, 0x0063, 0x0D39, 0x0340, 0x0D38, 0x01B8, 0x0D3B, 0x0D3A, 0x00D4, 0x0062, 0x0000, 0x0693, 0x01BB, 0x0343, 0x0342, 0x001E, 0x000E, 0x006D, 0x0009, 0x0001, 0x006C, 0x00D7, 0x034D, 0x01BA, 0x0008, 0x0004, }, { 0x0075, 0x00CD, 0x0035, 0x03C1, 0x03C0, 0x07F9, 0x03C3, 0x1F8D, 0x00CC, 0x0074, 0x0011, 0x0010, 0x03C2, 0x0FD9, 0x01F1, 0x00CF, 0x03CD, 0x00CE, 0x0034, 0x0001, 0x0001, 0x0037, 0x00C9, 0x00C8, 0x0036, 0x0000, 0x0001, 0x0FD8, 0x03CC, 0x00CB, 0x01F0, 0x07F8, 0x03CF, 0x07FB, 0x07FA, 0x00CA, 0x01F3, 0x03CE, 0x00F5, 0x0FDB, 0x00F4, 0x07E5, 0x07E4, 0x07E7, 0x01F2, 0x07E6, 0x03C9, 0x01FD, 0x0FDA, 0x1F8C, 0x07E1, 0x1F8F, 0x1F8E, 0x03C8, 0x03CB, 0x0077, 0x0076, 0x0FC5, 0x03CA, 0x07E0, 0x00F7, 0x0FC4, 0x03F5, 0x00F6, 0x01FC, 0x0003, 0x03F4, 0x0071, 0x03F7, 0x00F1, 0x0013, 0x0031, 0x0030, 0x0070, 0x0005, 0x0012, 0x0073, 0x01FF, 0x0072, 0x007D, 0x0002, }, { 0x0061, 0x0055, 0x0060, 0x02C9, 0x02C8, 0x02CB, 0x0171, 0x00B5, 0x0054, 0x0001, 0x0001, 0x0001, 0x0057, 0x0001, 0x0063, 0x001D, 0x0062, 0x0039, 0x006D, 0x0000, 0x0005, 0x0038, 0x0056, 0x00B4, 0x006C, 0x0003, 0x001C, 0x006F, 0x003B, 0x0002, 0x003A, 0x0170, 0x00B7, 0x0173, 0x0051, 0x006E, 0x0025, 0x0050, 0x0069, 0x02CA, 0x0024, 0x0027, 0x0172, 0x00B6, 0x00B1, 0x000D, 0x000C, 0x001F, 0x017D, 0x0026, 0x0068, 0x0053, 0x017C, 0x006B, 0x001E, 0x000F, 0x0004, 0x017F, 0x006A, 0x02F5, 0x0019, 0x0021, 0x0052, 0x02F4, 0x02F7, 0x0020, 0x0BCD, 0x05E5, 0x05E4, 0x0BCC, 0x0023, 0x00B0, 0x02F6, 0x00B3, 0x0022, 0x02F1, 0x02F0, 0x0BCF, 0x0BCE, 0x017E, 0x005D, }, { 0x00BD, 0x0025, 0x01A1, 0x0159, 0x0299, 0x00BC, 0x0024, 0x0505, 0x0504, 0x01A0, 0x0001, 0x001D, 0x006D, 0x001C, 0x0001, 0x0005, 0x0027, 0x01A3, 0x0158, 0x001F, 0x001E, 0x01A2, 0x0026, 0x0021, 0x000D, 0x0020, 0x0023, 0x0298, 0x006C, 0x0022, 0x00BF, 0x00BE, 0x01AD, 0x002D, 0x029B, 0x00B9, 0x01AC, 0x00B8, 0x01AF, 0x029A, 0x006F, 0x015B, 0x006E, 0x0285, 0x0284, 0x01AE, 0x0019, 0x002C, 0x01A9, 0x01A8, 0x000C, 0x000F, 0x015A, 0x00BB, 0x000E, 0x0000, 0x0069, 0x01AB, 0x0018, 0x01AA, 0x0004, 0x0055, 0x00BA, 0x0507, 0x0145, 0x0054, 0x0506, 0x00A5, 0x0501, 0x00A4, 0x0057, 0x0500, 0x0A05, 0x0144, 0x00A7, 0x0287, 0x0286, 0x0503, 0x0147, 0x0A04, 0x0146, }, { 0x0759, 0x0041, 0x00E5, 0x03BD, 0x0E9D, 0x012D, 0x012C, 0x3A1D, 0x03BC, 0x012F, 0x000D, 0x0040, 0x00E4, 0x03BF, 0x0043, 0x0042, 0x0758, 0x03BE, 0x00E7, 0x0001, 0x0000, 0x003D, 0x00E6, 0x0015, 0x0014, 0x0017, 0x003C, 0x743D, 0x012E, 0x03B9, 0x03B8, 0x0E9C, 0x03BB, 0x075B, 0x3A1C, 0x0E9F, 0x0129, 0x00E1, 0x0128, 0x0E9E, 0x012B, 0x075A, 0x00E0, 0x0E99, 0x0745, 0x3A1F, 0x03BA, 0x0744, 0x0E98, 0x1D0D, 0x03A5, 0x0E9B, 0x743C, 0x0E9A, 0x012A, 0x004D, 0x00E3, 0x0E85, 0x01D5, 0x0E84, 0x004C, 0x0747, 0x1D0C, 0x01D4, 0x003F, 0x0016, 0x0746, 0x03A4, 0x0741, 0x004F, 0x003E, 0x01D7, 0x0740, 0x000C, 0x0011, 0x004E, 0x00E2, 0x00ED, 0x00EC, 0x0049, 0x0048, }, }; static const uint8_t aic_mode2_vlc_bits[AIC_MODE2_NUM][AIC_MODE2_SIZE] = { { 1, 5, 4, 10, 6, 8, 5, 8, 8, 7, 5, 7, 11, 10, 9, 8, 13, 11, 6, 7, 3, 9, 8, 10, 9, 8, 5, 10, 9, 10, 10, 14, 12, 14, 14, 12, 8, 9, 7, 12, 8, 14, 9, 9, 12, 8, 9, 11, 11, 13, 9, 11, 14, 12, 6, 7, 9, 13, 9, 12, 7, 10, 12, 11, 12, 8, 15, 10, 15, 13, 7, 12, 10, 10, 8, 10, 13, 13, 13, 11, 8, }, { 4, 6, 5, 11, 8, 10, 7, 11, 9, 4, 1, 4, 9, 7, 7, 5, 9, 10, 6, 7, 4, 9, 9, 10, 9, 9, 6, 9, 10, 9, 10, 12, 12, 13, 12, 11, 9, 9, 8, 12, 8, 14, 10, 11, 12, 7, 8, 10, 11, 12, 9, 11, 13, 12, 6, 7, 8, 12, 9, 12, 7, 11, 10, 12, 12, 9, 14, 12, 15, 13, 8, 12, 11, 11, 10, 12, 13, 15, 14, 12, 9, }, { 5, 7, 6, 12, 9, 11, 8, 11, 10, 7, 5, 7, 11, 10, 9, 8, 12, 12, 5, 5, 1, 8, 7, 10, 8, 6, 4, 8, 8, 8, 9, 12, 11, 13, 12, 11, 8, 9, 8, 12, 8, 13, 10, 11, 11, 8, 9, 11, 12, 13, 11, 12, 14, 13, 8, 9, 10, 14, 11, 14, 9, 13, 13, 8, 9, 6, 11, 10, 14, 11, 6, 10, 6, 6, 4, 8, 9, 10, 10, 8, 5, }, { 11, 7, 8, 10, 12, 9, 10, 14, 12, 7, 1, 5, 7, 8, 6, 4, 10, 9, 10, 5, 4, 8, 11, 8, 7, 6, 7, 11, 6, 7, 8, 10, 8, 10, 11, 9, 10, 8, 9, 13, 9, 12, 8, 11, 12, 11, 4, 7, 8, 9, 6, 8, 12, 9, 8, 5, 8, 12, 9, 10, 6, 12, 11, 12, 12, 10, 15, 13, 13, 13, 10, 13, 15, 10, 9, 10, 12, 13, 13, 10, 9, }, { 11, 8, 8, 11, 13, 10, 11, 15, 12, 7, 1, 4, 7, 7, 5, 4, 8, 9, 11, 5, 5, 8, 11, 9, 8, 7, 8, 13, 7, 8, 9, 11, 9, 10, 12, 10, 10, 9, 8, 13, 9, 12, 9, 11, 12, 11, 5, 7, 9, 10, 6, 9, 13, 10, 7, 4, 7, 11, 8, 9, 5, 10, 11, 13, 11, 9, 15, 13, 15, 13, 8, 12, 15, 10, 10, 12, 13, 14, 14, 12, 11, }, { 12, 9, 9, 12, 13, 11, 11, 14, 12, 8, 2, 5, 7, 9, 6, 5, 10, 10, 9, 4, 2, 7, 9, 7, 6, 5, 6, 12, 6, 7, 8, 10, 8, 10, 11, 9, 12, 9, 10, 13, 11, 12, 10, 14, 13, 12, 6, 8, 10, 10, 7, 9, 12, 10, 8, 5, 8, 11, 9, 10, 7, 11, 12, 8, 6, 5, 11, 11, 11, 8, 6, 9, 12, 6, 6, 8, 10, 10, 11, 8, 6, }, { 13, 9, 10, 12, 14, 12, 11, 15, 15, 8, 1, 5, 7, 9, 6, 5, 11, 10, 11, 6, 5, 9, 11, 9, 8, 7, 8, 12, 6, 8, 8, 11, 8, 10, 12, 10, 10, 7, 9, 13, 10, 11, 9, 13, 12, 11, 3, 6, 8, 9, 4, 7, 11, 8, 8, 5, 9, 12, 10, 9, 7, 12, 13, 13, 12, 10, 14, 14, 15, 12, 11, 14, 15, 7, 9, 8, 11, 11, 12, 10, 9, }, { 10, 5, 6, 9, 11, 7, 8, 12, 11, 8, 1, 4, 7, 9, 6, 4, 10, 10, 11, 6, 6, 9, 9, 9, 9, 8, 8, 14, 10, 10, 12, 12, 11, 12, 13, 12, 10, 7, 8, 12, 9, 11, 8, 12, 11, 13, 7, 10, 11, 11, 8, 10, 13, 11, 6, 3, 7, 11, 8, 9, 5, 10, 11, 11, 11, 9, 14, 14, 14, 11, 10, 13, 14, 10, 11, 13, 13, 13, 14, 12, 12, }, { 2, 5, 3, 11, 8, 8, 6, 6, 7, 8, 5, 6, 12, 10, 10, 8, 10, 11, 7, 6, 2, 9, 8, 10, 8, 5, 4, 10, 11, 10, 10, 13, 12, 14, 13, 10, 10, 11, 8, 14, 9, 14, 12, 11, 12, 9, 10, 9, 13, 12, 11, 12, 14, 11, 8, 10, 7, 13, 10, 12, 8, 12, 12, 10, 9, 6, 12, 11, 11, 11, 6, 9, 10, 9, 6, 10, 9, 12, 11, 8, 7, }, { 6, 8, 6, 12, 11, 11, 10, 10, 9, 6, 1, 3, 10, 8, 8, 6, 7, 10, 8, 6, 3, 10, 9, 10, 8, 6, 5, 11, 10, 10, 12, 13, 12, 14, 13, 12, 10, 11, 8, 12, 9, 14, 12, 11, 12, 9, 9, 8, 12, 12, 10, 12, 13, 11, 7, 8, 6, 13, 9, 11, 7, 11, 11, 11, 10, 7, 14, 11, 12, 12, 7, 10, 12, 11, 8, 13, 12, 14, 13, 11, 10, }, { 7, 10, 7, 13, 13, 13, 11, 11, 10, 8, 5, 6, 12, 11, 10, 9, 10, 11, 7, 5, 1, 9, 8, 10, 7, 4, 4, 9, 11, 9, 11, 12, 11, 13, 13, 10, 9, 11, 8, 13, 9, 14, 12, 11, 12, 11, 10, 10, 13, 12, 11, 14, 14, 12, 9, 10, 8, 13, 10, 14, 9, 12, 12, 9, 7, 4, 12, 10, 11, 10, 6, 7, 9, 7, 4, 9, 9, 11, 9, 7, 5, }, { 7, 9, 7, 14, 11, 12, 10, 9, 9, 8, 5, 5, 12, 9, 10, 8, 8, 11, 7, 5, 2, 8, 8, 9, 7, 4, 4, 10, 11, 10, 12, 14, 11, 12, 13, 12, 9, 10, 8, 13, 8, 13, 10, 11, 11, 9, 9, 8, 14, 10, 10, 11, 12, 11, 10, 11, 9, 14, 10, 14, 9, 12, 14, 6, 6, 3, 11, 8, 9, 8, 3, 6, 9, 7, 4, 10, 8, 11, 10, 6, 5, }, { 6, 8, 7, 13, 12, 12, 10, 9, 9, 9, 7, 8, 13, 11, 11, 9, 11, 12, 7, 6, 1, 9, 8, 10, 7, 5, 4, 10, 12, 10, 12, 13, 13, 14, 13, 11, 9, 11, 9, 13, 10, 14, 12, 12, 12, 11, 12, 10, 14, 13, 12, 13, 14, 12, 8, 9, 7, 13, 10, 13, 8, 11, 12, 8, 6, 3, 12, 9, 10, 9, 4, 6, 10, 8, 5, 10, 10, 12, 11, 8, 6, }, { 7, 10, 7, 12, 9, 12, 10, 10, 12, 9, 7, 7, 12, 9, 11, 6, 10, 11, 6, 6, 1, 9, 8, 9, 7, 4, 5, 11, 12, 9, 12, 10, 14, 13, 13, 11, 10, 12, 8, 13, 8, 14, 10, 10, 11, 11, 11, 10, 13, 14, 10, 14, 13, 11, 11, 10, 7, 13, 8, 12, 7, 10, 12, 7, 10, 4, 12, 6, 10, 8, 5, 8, 10, 7, 4, 9, 7, 10, 9, 6, 5, }, { 7, 9, 7, 13, 12, 13, 10, 10, 8, 8, 5, 6, 11, 10, 10, 8, 10, 10, 7, 5, 2, 9, 8, 9, 7, 5, 3, 8, 9, 7, 9, 11, 11, 13, 11, 9, 8, 10, 7, 12, 9, 14, 11, 10, 10, 9, 10, 9, 12, 12, 12, 13, 14, 12, 10, 10, 9, 13, 11, 13, 9, 13, 12, 8, 7, 4, 12, 10, 10, 10, 6, 6, 7, 6, 3, 9, 8, 10, 9, 6, 3, }, { 7, 10, 7, 13, 13, 13, 11, 11, 9, 8, 6, 6, 13, 11, 11, 9, 10, 11, 7, 6, 1, 9, 8, 10, 8, 5, 4, 8, 9, 8, 9, 12, 12, 12, 12, 8, 10, 13, 9, 14, 11, 14, 14, 13, 12, 9, 10, 9, 13, 12, 11, 13, 14, 11, 9, 11, 8, 13, 11, 13, 10, 13, 13, 9, 8, 5, 12, 10, 11, 11, 6, 7, 8, 7, 3, 8, 9, 11, 10, 7, 4, }, { 8, 9, 7, 11, 11, 12, 11, 14, 9, 8, 6, 6, 11, 13, 10, 9, 11, 9, 7, 5, 1, 7, 9, 9, 7, 5, 3, 13, 11, 9, 10, 12, 11, 12, 12, 9, 10, 11, 9, 13, 9, 12, 12, 12, 10, 12, 11, 10, 13, 14, 12, 14, 14, 11, 11, 8, 8, 13, 11, 12, 9, 13, 11, 9, 10, 5, 11, 8, 11, 9, 6, 7, 7, 8, 4, 6, 8, 10, 8, 8, 5, }, { 8, 10, 8, 13, 13, 13, 12, 11, 10, 5, 1, 3, 10, 7, 8, 6, 8, 9, 8, 7, 4, 9, 10, 11, 8, 7, 6, 8, 9, 7, 9, 12, 11, 12, 10, 8, 9, 10, 8, 13, 9, 9, 12, 11, 11, 7, 7, 6, 12, 9, 8, 10, 12, 8, 6, 7, 4, 12, 8, 13, 6, 9, 10, 13, 13, 9, 15, 14, 14, 15, 9, 11, 13, 11, 9, 13, 13, 15, 15, 12, 10, }, { 10, 8, 9, 11, 12, 10, 8, 13, 13, 9, 2, 5, 7, 5, 4, 3, 8, 9, 11, 5, 5, 9, 8, 8, 6, 8, 8, 12, 7, 8, 10, 10, 9, 8, 12, 10, 9, 10, 9, 12, 7, 11, 7, 12, 12, 9, 5, 8, 9, 9, 6, 6, 11, 10, 6, 4, 7, 9, 5, 9, 3, 9, 10, 13, 11, 9, 13, 10, 13, 10, 9, 13, 14, 11, 10, 12, 12, 13, 11, 14, 11, }, { 11, 7, 8, 10, 12, 9, 9, 14, 10, 9, 4, 7, 8, 10, 7, 7, 11, 10, 8, 2, 2, 6, 8, 5, 5, 5, 6, 15, 9, 10, 10, 12, 10, 11, 14, 12, 9, 8, 9, 12, 9, 11, 8, 12, 11, 14, 10, 11, 12, 13, 10, 12, 15, 12, 9, 7, 8, 12, 9, 12, 7, 11, 13, 9, 6, 5, 11, 10, 11, 7, 6, 9, 11, 4, 5, 7, 8, 8, 8, 7, 7, }, }; //@} /** * Codes used for determining block type */ //@{ #define AIC_MODE1_NUM 90 #define AIC_MODE1_SIZE 9 #define AIC_MODE1_BITS 7 static const uint8_t aic_mode1_vlc_codes[AIC_MODE1_NUM][AIC_MODE1_SIZE] = { { 0x01, 0x01, 0x01, 0x11, 0x00, 0x09, 0x03, 0x10, 0x05,}, { 0x09, 0x01, 0x01, 0x05, 0x11, 0x00, 0x03, 0x21, 0x20,}, { 0x01, 0x01, 0x01, 0x11, 0x09, 0x10, 0x05, 0x00, 0x03,}, { 0x01, 0x01, 0x00, 0x03, 0x21, 0x05, 0x09, 0x20, 0x11,}, { 0x01, 0x09, 0x00, 0x29, 0x08, 0x15, 0x03, 0x0B, 0x28,}, { 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x03, 0x02,}, { 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x01, 0x09, 0x08,}, { 0x01, 0x01, 0x01, 0x09, 0x01, 0x08, 0x00, 0x03, 0x05,}, { 0x01, 0x01, 0x01, 0x00, 0x05, 0x11, 0x09, 0x10, 0x03,}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, { 0x01, 0x01, 0x01, 0x05, 0x01, 0x00, 0x03, 0x09, 0x08,}, { 0x09, 0x01, 0x01, 0x05, 0x11, 0x00, 0x03, 0x21, 0x20,}, { 0x01, 0x01, 0x01, 0x0D, 0x05, 0x04, 0x00, 0x07, 0x0C,}, { 0x01, 0x01, 0x00, 0x05, 0x11, 0x03, 0x09, 0x21, 0x20,}, { 0x05, 0x01, 0x01, 0x11, 0x00, 0x09, 0x03, 0x21, 0x20,}, { 0x09, 0x01, 0x01, 0x00, 0x05, 0x01, 0x03, 0x11, 0x10,}, { 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x01, 0x03, 0x02,}, { 0x01, 0x01, 0x01, 0x09, 0x00, 0x05, 0x01, 0x03, 0x08,}, { 0x01, 0x01, 0x01, 0x09, 0x11, 0x05, 0x00, 0x10, 0x03,}, { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, { 0x01, 0x00, 0x01, 0x09, 0x08, 0x15, 0x14, 0x0B, 0x03,}, { 0x0D, 0x01, 0x01, 0x05, 0x0C, 0x04, 0x01, 0x00, 0x07,}, { 0x01, 0x01, 0x01, 0x05, 0x00, 0x04, 0x03, 0x01, 0x01,}, { 0x05, 0x01, 0x01, 0x04, 0x19, 0x07, 0x18, 0x0D, 0x00,}, { 0x11, 0x09, 0x01, 0x21, 0x05, 0x20, 0x01, 0x00, 0x03,}, { 0x41, 0x01, 0x00, 0x05, 0x40, 0x03, 0x09, 0x21, 0x11,}, { 0x29, 0x01, 0x00, 0x28, 0x09, 0x15, 0x03, 0x08, 0x0B,}, { 0x01, 0x00, 0x01, 0x11, 0x09, 0x10, 0x05, 0x01, 0x03,}, { 0x05, 0x01, 0x01, 0x04, 0x0D, 0x0C, 0x07, 0x00, 0x01,}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, { 0x01, 0x00, 0x03, 0x05, 0x11, 0x10, 0x25, 0x24, 0x13,}, { 0x21, 0x01, 0x01, 0x00, 0x11, 0x03, 0x05, 0x20, 0x09,}, { 0x01, 0x01, 0x01, 0x00, 0x09, 0x11, 0x10, 0x05, 0x03,}, { 0x21, 0x05, 0x01, 0x01, 0x09, 0x00, 0x11, 0x20, 0x03,}, { 0x05, 0x01, 0x00, 0x04, 0x01, 0x19, 0x07, 0x18, 0x0D,}, { 0x11, 0x01, 0x00, 0x01, 0x09, 0x01, 0x03, 0x10, 0x05,}, { 0x1D, 0x01, 0x05, 0x0D, 0x0C, 0x04, 0x00, 0x1C, 0x0F,}, { 0x05, 0x19, 0x01, 0x04, 0x00, 0x18, 0x1B, 0x1A, 0x07,}, { 0x09, 0x01, 0x00, 0x01, 0x05, 0x03, 0x11, 0x10, 0x01,}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, { 0x01, 0x00, 0x03, 0x41, 0x05, 0x40, 0x09, 0x11, 0x21,}, { 0x05, 0x01, 0x01, 0x19, 0x04, 0x07, 0x00, 0x18, 0x0D,}, { 0x01, 0x01, 0x01, 0x05, 0x01, 0x04, 0x01, 0x00, 0x03,}, { 0x01, 0x05, 0x00, 0x0D, 0x01, 0x04, 0x07, 0x19, 0x18,}, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02,}, { 0x31, 0x01, 0x05, 0x19, 0x04, 0x07, 0x00, 0x30, 0x0D,}, { 0x01, 0x00, 0x03, 0x11, 0x01, 0x05, 0x01, 0x09, 0x10,}, { 0x01, 0x05, 0x01, 0x11, 0x01, 0x10, 0x00, 0x03, 0x09,}, { 0x01, 0x09, 0x00, 0x29, 0x03, 0x08, 0x28, 0x15, 0x0B,}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, { 0x01, 0x01, 0x00, 0x09, 0x15, 0x03, 0x08, 0x14, 0x0B,}, { 0x11, 0x01, 0x01, 0x00, 0x09, 0x01, 0x03, 0x10, 0x05,}, { 0x01, 0x00, 0x03, 0x25, 0x11, 0x05, 0x10, 0x24, 0x13,}, { 0x11, 0x01, 0x00, 0x01, 0x09, 0x01, 0x05, 0x10, 0x03,}, { 0x05, 0x01, 0x00, 0x0D, 0x0C, 0x04, 0x0F, 0x1D, 0x1C,}, { 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x03, 0x02,}, { 0x21, 0x01, 0x05, 0x09, 0x11, 0x00, 0x03, 0x41, 0x40,}, { 0x05, 0x01, 0x00, 0x1D, 0x1C, 0x0D, 0x0C, 0x0F, 0x04,}, { 0x05, 0x01, 0x00, 0x0D, 0x31, 0x04, 0x19, 0x30, 0x07,}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, { 0x01, 0x01, 0x00, 0x21, 0x05, 0x11, 0x03, 0x09, 0x20,}, { 0x01, 0x01, 0x00, 0x11, 0x03, 0x05, 0x01, 0x09, 0x10,}, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02,}, { 0x05, 0x01, 0x04, 0x19, 0x07, 0x0D, 0x00, 0x31, 0x30,}, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02,}, { 0x05, 0x01, 0x01, 0x11, 0x09, 0x00, 0x03, 0x21, 0x20,}, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02,}, { 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x01, 0x01, 0x02,}, { 0x09, 0x01, 0x00, 0x29, 0x08, 0x15, 0x03, 0x28, 0x0B,}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, { 0x01, 0x01, 0x01, 0x05, 0x01, 0x04, 0x00, 0x01, 0x03,}, { 0x09, 0x01, 0x00, 0x29, 0x28, 0x15, 0x08, 0x03, 0x0B,}, { 0x01, 0x00, 0x01, 0x11, 0x05, 0x10, 0x09, 0x01, 0x03,}, { 0x05, 0x04, 0x01, 0x1D, 0x0D, 0x0C, 0x1C, 0x00, 0x0F,}, { 0x09, 0x11, 0x01, 0x41, 0x00, 0x40, 0x05, 0x03, 0x21,}, { 0x0D, 0x05, 0x01, 0x1D, 0x1C, 0x0C, 0x04, 0x00, 0x0F,}, { 0x41, 0x09, 0x01, 0x40, 0x00, 0x11, 0x05, 0x03, 0x21,}, { 0x01, 0x01, 0x01, 0x05, 0x01, 0x04, 0x00, 0x01, 0x03,}, { 0x05, 0x04, 0x01, 0x0D, 0x01, 0x0C, 0x07, 0x01, 0x00,}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, { 0x05, 0x04, 0x01, 0x07, 0x19, 0x31, 0x30, 0x0D, 0x00,}, { 0x21, 0x01, 0x01, 0x00, 0x11, 0x09, 0x20, 0x05, 0x03,}, { 0x05, 0x01, 0x01, 0x04, 0x07, 0x0D, 0x0C, 0x00, 0x01,}, { 0x21, 0x09, 0x01, 0x00, 0x20, 0x05, 0x23, 0x22, 0x03,}, { 0x31, 0x0D, 0x01, 0x19, 0x05, 0x30, 0x04, 0x07, 0x00,}, { 0x31, 0x05, 0x01, 0x04, 0x19, 0x00, 0x0D, 0x30, 0x07,}, { 0x31, 0x01, 0x00, 0x0D, 0x05, 0x19, 0x04, 0x30, 0x07,}, { 0x01, 0x01, 0x01, 0x00, 0x01, 0x03, 0x02, 0x01, 0x01,}, { 0x01, 0x00, 0x01, 0x01, 0x05, 0x09, 0x08, 0x03, 0x01,}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,}, }; static const uint8_t aic_mode1_vlc_bits[AIC_MODE1_NUM][AIC_MODE1_SIZE] = { { 1, 4, 2, 7, 4, 6, 4, 7, 5,}, { 5, 1, 3, 4, 6, 3, 3, 7, 7,}, { 1, 4, 2, 7, 6, 7, 5, 4, 4,}, { 1, 3, 3, 3, 7, 4, 5, 7, 6,}, { 2, 4, 2, 6, 4, 5, 2, 4, 6,}, { 7, 2, 3, 4, 7, 1, 5, 7, 7,}, { 5, 1, 3, 6, 5, 5, 2, 7, 7,}, { 2, 5, 1, 7, 3, 7, 5, 5, 6,}, { 2, 4, 1, 4, 5, 7, 6, 7, 4,}, { 0, 0, 0, 0, 0, 0, 0, 0, 0,}, { 2, 1, 3, 6, 5, 5, 5, 7, 7,}, { 5, 1, 3, 4, 6, 3, 3, 7, 7,}, { 4, 1, 2, 6, 5, 5, 4, 5, 6,}, { 3, 1, 3, 4, 6, 3, 5, 7, 7,}, { 4, 1, 3, 6, 3, 5, 3, 7, 7,}, { 6, 1, 4, 4, 5, 2, 4, 7, 7,}, { 7, 1, 5, 7, 4, 3, 2, 7, 7,}, { 5, 3, 2, 7, 5, 6, 1, 5, 7,}, { 4, 1, 2, 6, 7, 5, 4, 7, 4,}, { 1, 0, 1, 0, 0, 0, 0, 0, 0,}, { 3, 3, 1, 5, 5, 6, 6, 5, 3,}, { 6, 2, 1, 5, 6, 5, 4, 4, 5,}, { 6, 4, 1, 7, 6, 7, 6, 3, 2,}, { 4, 3, 1, 4, 6, 4, 6, 5, 3,}, { 6, 5, 1, 7, 4, 7, 3, 3, 3,}, { 7, 2, 2, 3, 7, 2, 4, 6, 5,}, { 6, 2, 2, 6, 4, 5, 2, 4, 4,}, { 4, 4, 1, 7, 6, 7, 5, 2, 4,}, { 5, 4, 1, 5, 6, 6, 5, 4, 2,}, { 0, 0, 0, 0, 0, 0, 0, 0, 0,}, { 2, 2, 2, 3, 5, 5, 6, 6, 5,}, { 7, 1, 3, 3, 6, 3, 4, 7, 5,}, { 2, 4, 1, 4, 6, 7, 7, 5, 4,}, { 7, 4, 3, 1, 5, 3, 6, 7, 3,}, { 4, 3, 3, 4, 1, 6, 4, 6, 5,}, { 7, 4, 4, 2, 6, 1, 4, 7, 5,}, { 5, 2, 3, 4, 4, 3, 2, 5, 4,}, { 3, 5, 2, 3, 2, 5, 5, 5, 3,}, { 6, 4, 4, 2, 5, 4, 7, 7, 1,}, { 0, 0, 0, 0, 0, 0, 0, 0, 0,}, { 2, 2, 2, 7, 3, 7, 4, 5, 6,}, { 4, 1, 3, 6, 4, 4, 3, 6, 5,}, { 2, 4, 1, 7, 3, 7, 6, 6, 6,}, { 3, 4, 3, 5, 1, 4, 4, 6, 6,}, { 4, 5, 2, 7, 1, 7, 3, 7, 7,}, { 6, 2, 3, 5, 3, 3, 2, 6, 4,}, { 4, 4, 4, 7, 2, 5, 1, 6, 7,}, { 4, 5, 2, 7, 1, 7, 4, 4, 6,}, { 2, 4, 2, 6, 2, 4, 6, 5, 4,}, { 0, 0, 0, 0, 0, 0, 0, 0, 0,}, { 1, 3, 3, 5, 6, 3, 5, 6, 5,}, { 7, 1, 4, 4, 6, 2, 4, 7, 5,}, { 2, 2, 2, 6, 5, 3, 5, 6, 5,}, { 7, 4, 4, 2, 6, 1, 5, 7, 4,}, { 3, 2, 2, 4, 4, 3, 4, 5, 5,}, { 7, 2, 5, 3, 7, 1, 4, 7, 7,}, { 6, 2, 3, 4, 5, 2, 2, 7, 7,}, { 3, 2, 2, 5, 5, 4, 4, 4, 3,}, { 3, 2, 2, 4, 6, 3, 5, 6, 3,}, { 0, 0, 0, 0, 0, 0, 0, 0, 0,}, { 1, 3, 3, 7, 4, 6, 3, 5, 7,}, { 4, 1, 4, 7, 4, 5, 2, 6, 7,}, { 2, 4, 1, 7, 5, 7, 3, 7, 7,}, { 3, 2, 3, 5, 3, 4, 2, 6, 6,}, { 3, 5, 4, 7, 2, 7, 1, 7, 7,}, { 4, 1, 3, 6, 5, 3, 3, 7, 7,}, { 4, 2, 5, 7, 3, 7, 1, 7, 7,}, { 7, 4, 1, 7, 3, 7, 2, 5, 7,}, { 4, 2, 2, 6, 4, 5, 2, 6, 4,}, { 0, 0, 0, 0, 0, 0, 0, 0, 0,}, { 3, 4, 1, 7, 6, 7, 6, 2, 6,}, { 4, 2, 2, 6, 6, 5, 4, 2, 4,}, { 4, 4, 1, 7, 5, 7, 6, 2, 4,}, { 3, 3, 2, 5, 4, 4, 5, 2, 4,}, { 4, 5, 2, 7, 2, 7, 3, 2, 6,}, { 4, 3, 2, 5, 5, 4, 3, 2, 4,}, { 7, 4, 2, 7, 2, 5, 3, 2, 6,}, { 4, 6, 2, 7, 3, 7, 6, 1, 6,}, { 5, 5, 1, 6, 4, 6, 5, 2, 4,}, { 0, 0, 0, 0, 0, 0, 0, 0, 0,}, { 3, 3, 2, 3, 5, 6, 6, 4, 2,}, { 7, 1, 3, 3, 6, 5, 7, 4, 3,}, { 5, 4, 1, 5, 5, 6, 6, 4, 2,}, { 6, 4, 2, 2, 6, 3, 6, 6, 2,}, { 6, 4, 2, 5, 3, 6, 3, 3, 2,}, { 6, 3, 2, 3, 5, 2, 4, 6, 3,}, { 6, 2, 2, 4, 3, 5, 3, 6, 3,}, { 7, 5, 1, 7, 4, 7, 7, 3, 2,}, { 5, 5, 2, 3, 6, 7, 7, 5, 1,}, { 0, 0, 0, 0, 0, 0, 0, 0, 0,}, }; //@} #define PBTYPE_ESCAPE 0xFF /** tables used for P-frame macroblock type decoding */ //@{ #define NUM_PTYPE_VLCS 7 #define PTYPE_VLC_SIZE 8 #define PTYPE_VLC_BITS 7 static const uint8_t ptype_vlc_codes[NUM_PTYPE_VLCS][PTYPE_VLC_SIZE] = { { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }, { 0x0D, 0x05, 0x01, 0x04, 0x01, 0x00, 0x07, 0x0C }, { 0x09, 0x11, 0x01, 0x00, 0x05, 0x03, 0x21, 0x20 }, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 } }; static const uint8_t ptype_vlc_bits[NUM_PTYPE_VLCS][PTYPE_VLC_SIZE] = { { 1, 2, 3, 6, 5, 4, 7, 7 }, { 3, 1, 2, 7, 6, 5, 4, 7 }, { 5, 4, 1, 4, 3, 3, 4, 5 }, { 4, 5, 2, 2, 3, 2, 6, 6 }, { 5, 6, 1, 4, 2, 3, 7, 7 }, { 5, 6, 1, 4, 3, 2, 7, 7 }, { 6, 3, 2, 7, 5, 4, 1, 7 } }; static const uint8_t ptype_vlc_syms[PTYPE_VLC_SIZE] = { 0, 1, 2, 3, 8, 9, 11, PBTYPE_ESCAPE }; /** reverse of ptype_vlc_syms */ static const uint8_t block_num_to_ptype_vlc_num[12] = { 0, 1, 2, 3, 0, 0, 2, 0, 4, 5, 0, 6 }; //@} /** tables used for P-frame macroblock type decoding */ //@{ #define NUM_BTYPE_VLCS 6 #define BTYPE_VLC_SIZE 7 #define BTYPE_VLC_BITS 6 static const uint8_t btype_vlc_codes[NUM_BTYPE_VLCS][BTYPE_VLC_SIZE] = { { 0x01, 0x05, 0x00, 0x03, 0x11, 0x09, 0x10 }, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }, { 0x09, 0x01, 0x00, 0x01, 0x05, 0x03, 0x08 }, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 }, { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 } }; static const uint8_t btype_vlc_bits[NUM_BTYPE_VLCS][PTYPE_VLC_SIZE] = { { 2, 3, 2, 2, 5, 4, 5 }, { 4, 1, 3, 2, 6, 5, 6 }, { 6, 4, 1, 2, 5, 3, 6 }, { 5, 3, 3, 1, 4, 3, 5 }, { 6, 5, 3, 2, 4, 1, 6 }, { 6, 5, 3, 1, 4, 2, 6 } }; static const uint8_t btype_vlc_syms[BTYPE_VLC_SIZE] = { 0, 1, 4, 5, 10, 7, PBTYPE_ESCAPE }; /** reverse of btype_vlc_syms */ static const uint8_t block_num_to_btype_vlc_num[12] = { 0, 1, 0, 0, 2, 3, 0, 5, 0, 0, 4, 0 }; //@} #endif /* AVCODEC_RV40VLC2_H */
123linslouis-android-video-cutter
jni/libavcodec/rv40vlc2.h
C
asf20
33,382
/* * TIFF image encoder * Copyright (c) 2007 Bartlomiej Wolowiec * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * TIFF image encoder * @file * @author Bartlomiej Wolowiec */ #include "avcodec.h" #if CONFIG_ZLIB #include <zlib.h> #endif #include "bytestream.h" #include "tiff.h" #include "rle.h" #include "lzw.h" #include "put_bits.h" #define TIFF_MAX_ENTRY 32 /** sizes of various TIFF field types (string size = 1)*/ static const uint8_t type_sizes2[6] = { 0, 1, 1, 2, 4, 8 }; typedef struct TiffEncoderContext { AVCodecContext *avctx; AVFrame picture; int width; ///< picture width int height; ///< picture height unsigned int bpp; ///< bits per pixel int compr; ///< compression level int bpp_tab_size; ///< bpp_tab size int photometric_interpretation; ///< photometric interpretation int strips; ///< number of strips int rps; ///< row per strip uint8_t entries[TIFF_MAX_ENTRY*12]; ///< entires in header int num_entries; ///< number of entires uint8_t **buf; ///< actual position in buffer uint8_t *buf_start; ///< pointer to first byte in buffer int buf_size; ///< buffer size uint16_t subsampling[2]; ///< YUV subsampling factors struct LZWEncodeState *lzws; ///< LZW Encode state } TiffEncoderContext; /** * Check free space in buffer * @param s Tiff context * @param need Needed bytes * @return 0 - ok, 1 - no free space */ inline static int check_size(TiffEncoderContext * s, uint64_t need) { if (s->buf_size < *s->buf - s->buf_start + need) { *s->buf = s->buf_start + s->buf_size + 1; av_log(s->avctx, AV_LOG_ERROR, "Buffer is too small\n"); return 1; } return 0; } /** * Put n values to buffer * * @param p Pointer to pointer to output buffer * @param n Number of values * @param val Pointer to values * @param type Type of values * @param flip =0 - normal copy, >0 - flip */ static void tnput(uint8_t ** p, int n, const uint8_t * val, enum TiffTypes type, int flip) { int i; #if HAVE_BIGENDIAN flip ^= ((int[]) {0, 0, 0, 1, 3, 3})[type]; #endif for (i = 0; i < n * type_sizes2[type]; i++) *(*p)++ = val[i ^ flip]; } /** * Add entry to directory in tiff header. * @param s Tiff context * @param tag Tag that identifies the entry * @param type Entry type * @param count The number of values * @param ptr_val Pointer to values */ static void add_entry(TiffEncoderContext * s, enum TiffTags tag, enum TiffTypes type, int count, const void *ptr_val) { uint8_t *entries_ptr = s->entries + 12 * s->num_entries; assert(s->num_entries < TIFF_MAX_ENTRY); bytestream_put_le16(&entries_ptr, tag); bytestream_put_le16(&entries_ptr, type); bytestream_put_le32(&entries_ptr, count); if (type_sizes[type] * count <= 4) { tnput(&entries_ptr, count, ptr_val, type, 0); } else { bytestream_put_le32(&entries_ptr, *s->buf - s->buf_start); check_size(s, count * type_sizes2[type]); tnput(s->buf, count, ptr_val, type, 0); } s->num_entries++; } static void add_entry1(TiffEncoderContext * s, enum TiffTags tag, enum TiffTypes type, int val){ uint16_t w = val; uint32_t dw= val; add_entry(s, tag, type, 1, type == TIFF_SHORT ? (void *)&w : (void *)&dw); } /** * Encode one strip in tiff file * * @param s Tiff context * @param src Input buffer * @param dst Output buffer * @param n Size of input buffer * @param compr Compression method * @return Number of output bytes. If an output error is encountered, -1 returned */ static int encode_strip(TiffEncoderContext * s, const int8_t * src, uint8_t * dst, int n, int compr) { switch (compr) { #if CONFIG_ZLIB case TIFF_DEFLATE: case TIFF_ADOBE_DEFLATE: { unsigned long zlen = s->buf_size - (*s->buf - s->buf_start); if (compress(dst, &zlen, src, n) != Z_OK) { av_log(s->avctx, AV_LOG_ERROR, "Compressing failed\n"); return -1; } return zlen; } #endif case TIFF_RAW: if (check_size(s, n)) return -1; memcpy(dst, src, n); return n; case TIFF_PACKBITS: return ff_rle_encode(dst, s->buf_size - (*s->buf - s->buf_start), src, 1, n, 2, 0xff, -1, 0); case TIFF_LZW: return ff_lzw_encode(s->lzws, src, n); default: return -1; } } static void pack_yuv(TiffEncoderContext * s, uint8_t * dst, int lnum) { AVFrame *p = &s->picture; int i, j, k; int w = (s->width - 1) / s->subsampling[0] + 1; uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]]; uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]]; for (i = 0; i < w; i++){ for (j = 0; j < s->subsampling[1]; j++) for (k = 0; k < s->subsampling[0]; k++) *dst++ = p->data[0][(lnum + j) * p->linesize[0] + i * s->subsampling[0] + k]; *dst++ = *pu++; *dst++ = *pv++; } } static int encode_frame(AVCodecContext * avctx, unsigned char *buf, int buf_size, void *data) { TiffEncoderContext *s = avctx->priv_data; AVFrame *pict = data; AVFrame *const p = (AVFrame *) & s->picture; int i; int n; uint8_t *ptr = buf; uint8_t *offset; uint32_t strips; uint32_t *strip_sizes = NULL; uint32_t *strip_offsets = NULL; int bytes_per_row; uint32_t res[2] = { 72, 1 }; // image resolution (72/1) static const uint16_t bpp_tab[] = { 8, 8, 8, 8 }; int ret = -1; int is_yuv = 0; uint8_t *yuv_line = NULL; int shift_h, shift_v; s->buf_start = buf; s->buf = &ptr; s->buf_size = buf_size; *p = *pict; p->pict_type = FF_I_TYPE; p->key_frame = 1; avctx->coded_frame= &s->picture; s->compr = TIFF_PACKBITS; if (avctx->compression_level == 0) { s->compr = TIFF_RAW; } else if(avctx->compression_level == 2) { s->compr = TIFF_LZW; #if CONFIG_ZLIB } else if ((avctx->compression_level >= 3)) { s->compr = TIFF_DEFLATE; #endif } s->width = avctx->width; s->height = avctx->height; s->subsampling[0] = 1; s->subsampling[1] = 1; switch (avctx->pix_fmt) { case PIX_FMT_RGB24: s->bpp = 24; s->photometric_interpretation = 2; break; case PIX_FMT_GRAY8: s->bpp = 8; s->photometric_interpretation = 1; break; case PIX_FMT_PAL8: s->bpp = 8; s->photometric_interpretation = 3; break; case PIX_FMT_MONOBLACK: s->bpp = 1; s->photometric_interpretation = 1; break; case PIX_FMT_MONOWHITE: s->bpp = 1; s->photometric_interpretation = 0; break; case PIX_FMT_YUV420P: case PIX_FMT_YUV422P: case PIX_FMT_YUV444P: case PIX_FMT_YUV410P: case PIX_FMT_YUV411P: s->photometric_interpretation = 6; avcodec_get_chroma_sub_sample(avctx->pix_fmt, &shift_h, &shift_v); s->bpp = 8 + (16 >> (shift_h + shift_v)); s->subsampling[0] = 1 << shift_h; s->subsampling[1] = 1 << shift_v; s->bpp_tab_size = 3; is_yuv = 1; break; default: av_log(s->avctx, AV_LOG_ERROR, "This colors format is not supported\n"); return -1; } if (!is_yuv) s->bpp_tab_size = (s->bpp >> 3); if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE || s->compr == TIFF_LZW) //best choose for DEFLATE s->rps = s->height; else s->rps = FFMAX(8192 / (((s->width * s->bpp) >> 3) + 1), 1); // suggest size of strip s->rps = ((s->rps - 1) / s->subsampling[1] + 1) * s->subsampling[1]; // round rps up strips = (s->height - 1) / s->rps + 1; if (check_size(s, 8)) goto fail; // write header bytestream_put_le16(&ptr, 0x4949); bytestream_put_le16(&ptr, 42); offset = ptr; bytestream_put_le32(&ptr, 0); strip_sizes = av_mallocz(sizeof(*strip_sizes) * strips); strip_offsets = av_mallocz(sizeof(*strip_offsets) * strips); bytes_per_row = (((s->width - 1)/s->subsampling[0] + 1) * s->bpp * s->subsampling[0] * s->subsampling[1] + 7) >> 3; if (is_yuv){ yuv_line = av_malloc(bytes_per_row); if (yuv_line == NULL){ av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n"); goto fail; } } #if CONFIG_ZLIB if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) { uint8_t *zbuf; int zlen, zn; int j; zlen = bytes_per_row * s->rps; zbuf = av_malloc(zlen); strip_offsets[0] = ptr - buf; zn = 0; for (j = 0; j < s->rps; j++) { if (is_yuv){ pack_yuv(s, yuv_line, j); memcpy(zbuf + zn, yuv_line, bytes_per_row); j += s->subsampling[1] - 1; } else memcpy(zbuf + j * bytes_per_row, p->data[0] + j * p->linesize[0], bytes_per_row); zn += bytes_per_row; } n = encode_strip(s, zbuf, ptr, zn, s->compr); av_free(zbuf); if (n<0) { av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n"); goto fail; } ptr += n; strip_sizes[0] = ptr - buf - strip_offsets[0]; } else #endif { if(s->compr == TIFF_LZW) s->lzws = av_malloc(ff_lzw_encode_state_size); for (i = 0; i < s->height; i++) { if (strip_sizes[i / s->rps] == 0) { if(s->compr == TIFF_LZW){ ff_lzw_encode_init(s->lzws, ptr, s->buf_size - (*s->buf - s->buf_start), 12, FF_LZW_TIFF, put_bits); } strip_offsets[i / s->rps] = ptr - buf; } if (is_yuv){ pack_yuv(s, yuv_line, i); n = encode_strip(s, yuv_line, ptr, bytes_per_row, s->compr); i += s->subsampling[1] - 1; } else n = encode_strip(s, p->data[0] + i * p->linesize[0], ptr, bytes_per_row, s->compr); if (n < 0) { av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n"); goto fail; } strip_sizes[i / s->rps] += n; ptr += n; if(s->compr == TIFF_LZW && (i==s->height-1 || i%s->rps == s->rps-1)){ int ret; ret = ff_lzw_encode_flush(s->lzws, flush_put_bits); strip_sizes[(i / s->rps )] += ret ; ptr += ret; } } if(s->compr == TIFF_LZW) av_free(s->lzws); } s->num_entries = 0; add_entry1(s,TIFF_SUBFILE, TIFF_LONG, 0); add_entry1(s,TIFF_WIDTH, TIFF_LONG, s->width); add_entry1(s,TIFF_HEIGHT, TIFF_LONG, s->height); if (s->bpp_tab_size) add_entry(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab); add_entry1(s,TIFF_COMPR, TIFF_SHORT, s->compr); add_entry1(s,TIFF_INVERT, TIFF_SHORT, s->photometric_interpretation); add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, strip_offsets); if (s->bpp_tab_size) add_entry1(s,TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size); add_entry1(s,TIFF_ROWSPERSTRIP, TIFF_LONG, s->rps); add_entry(s, TIFF_STRIP_SIZE, TIFF_LONG, strips, strip_sizes); add_entry(s, TIFF_XRES, TIFF_RATIONAL, 1, res); add_entry(s, TIFF_YRES, TIFF_RATIONAL, 1, res); add_entry1(s,TIFF_RES_UNIT, TIFF_SHORT, 2); if(!(avctx->flags & CODEC_FLAG_BITEXACT)) add_entry(s, TIFF_SOFTWARE_NAME, TIFF_STRING, strlen(LIBAVCODEC_IDENT) + 1, LIBAVCODEC_IDENT); if (avctx->pix_fmt == PIX_FMT_PAL8) { uint16_t pal[256 * 3]; for (i = 0; i < 256; i++) { uint32_t rgb = *(uint32_t *) (p->data[1] + i * 4); pal[i] = ((rgb >> 16) & 0xff) * 257; pal[i + 256] = ((rgb >> 8 ) & 0xff) * 257; pal[i + 512] = ( rgb & 0xff) * 257; } add_entry(s, TIFF_PAL, TIFF_SHORT, 256 * 3, pal); } if (is_yuv){ /** according to CCIR Recommendation 601.1 */ uint32_t refbw[12] = {15, 1, 235, 1, 128, 1, 240, 1, 128, 1, 240, 1}; add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling); add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw); } bytestream_put_le32(&offset, ptr - buf); // write offset to dir if (check_size(s, 6 + s->num_entries * 12)) goto fail; bytestream_put_le16(&ptr, s->num_entries); // write tag count bytestream_put_buffer(&ptr, s->entries, s->num_entries * 12); bytestream_put_le32(&ptr, 0); ret = ptr - buf; fail: av_free(strip_sizes); av_free(strip_offsets); av_free(yuv_line); return ret; } AVCodec tiff_encoder = { "tiff", AVMEDIA_TYPE_VIDEO, CODEC_ID_TIFF, sizeof(TiffEncoderContext), NULL, encode_frame, NULL, NULL, 0, NULL, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8, PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_YUV410P, PIX_FMT_YUV411P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("TIFF image"), };
123linslouis-android-video-cutter
jni/libavcodec/tiffenc.c
C
asf20
14,955
/* * PNM image parser * Copyright (c) 2002, 2003 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "parser.h" //for ParseContext #include "pnm.h" static int pnm_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { ParseContext *pc = s->priv_data; PNMContext pnmctx; int next; for (; pc->overread > 0; pc->overread--) { pc->buffer[pc->index++]= pc->buffer[pc->overread_index++]; } retry: if (pc->index) { pnmctx.bytestream_start = pnmctx.bytestream = pc->buffer; pnmctx.bytestream_end = pc->buffer + pc->index; } else { pnmctx.bytestream_start = pnmctx.bytestream = (uint8_t *) buf; /* casts avoid warnings */ pnmctx.bytestream_end = (uint8_t *) buf + buf_size; } if (ff_pnm_decode_header(avctx, &pnmctx) < 0) { if (pnmctx.bytestream < pnmctx.bytestream_end) { if (pc->index) { pc->index = 0; } else { buf++; buf_size--; } goto retry; } #if 0 if (pc->index && pc->index * 2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index) { memcpy(pc->buffer + pc->index, buf, pc->index); pc->index += pc->index; buf += pc->index; buf_size -= pc->index; goto retry; } #endif next = END_NOT_FOUND; } else { next = pnmctx.bytestream - pnmctx.bytestream_start + avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); if (pnmctx.bytestream_start != buf) next -= pc->index; if (next > buf_size) next = END_NOT_FOUND; } if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { *poutbuf = NULL; *poutbuf_size = 0; return buf_size; } *poutbuf = buf; *poutbuf_size = buf_size; return next; } AVCodecParser pnm_parser = { { CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PAM}, sizeof(ParseContext), NULL, pnm_parse, ff_parse_close, };
123linslouis-android-video-cutter
jni/libavcodec/pnm_parser.c
C
asf20
2,987
/* * WMA compatible decoder * copyright (c) 2002 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Various WMA tables. */ #ifndef AVCODEC_WMADATA_H #define AVCODEC_WMADATA_H #include <stdint.h> #include "wma.h" const uint16_t ff_wma_critical_freqs[25] = { 100, 200, 300, 400, 510, 630, 770, 920, 1080, 1270, 1480, 1720, 2000, 2320, 2700, 3150, 3700, 4400, 5300, 6400, 7700, 9500, 12000, 15500, 24500, }; /* first value is number of bands */ static const uint8_t exponent_band_22050[3][25] = { { 10, 4, 8, 4, 8, 8, 12, 20, 24, 24, 16, }, { 14, 4, 8, 8, 4, 12, 12, 16, 24, 16, 20, 24, 32, 40, 36, }, { 23, 4, 4, 4, 8, 4, 4, 8, 8, 8, 8, 8, 12, 12, 16, 16, 24, 24, 32, 44, 48, 60, 84, 72, }, }; static const uint8_t exponent_band_32000[3][25] = { { 11, 4, 4, 8, 4, 4, 12, 16, 24, 20, 28, 4, }, { 15, 4, 8, 4, 4, 8, 8, 16, 20, 12, 20, 20, 28, 40, 56, 8, }, { 16, 8, 4, 8, 8, 12, 16, 20, 24, 40, 32, 32, 44, 56, 80, 112, 16, }, }; static const uint8_t exponent_band_44100[3][25] = { { 12, 4, 4, 4, 4, 4, 8, 8, 8, 12, 16, 20, 36, }, { 15, 4, 8, 4, 8, 8, 4, 8, 8, 12, 12, 12, 24, 28, 40, 76, }, { 17, 4, 8, 8, 4, 12, 12, 8, 8, 24, 16, 20, 24, 32, 40, 60, 80, 152, }, }; const uint16_t ff_wma_hgain_huffcodes[37] = { 0x00003, 0x002e7, 0x00001, 0x005cd, 0x0005d, 0x005c9, 0x0005e, 0x00003, 0x00016, 0x0000b, 0x00001, 0x00006, 0x00001, 0x00006, 0x00004, 0x00005, 0x00004, 0x00007, 0x00003, 0x00007, 0x00004, 0x0000a, 0x0000a, 0x00002, 0x00003, 0x00000, 0x00005, 0x00002, 0x0005f, 0x00004, 0x00003, 0x00002, 0x005c8, 0x000b8, 0x005ca, 0x005cb, 0x005cc, }; const uint8_t ff_wma_hgain_huffbits[37] = { 10, 12, 10, 13, 9, 13, 9, 8, 7, 5, 5, 4, 4, 3, 3, 3, 4, 3, 4, 4, 5, 5, 6, 8, 7, 10, 8, 10, 9, 8, 9, 9, 13, 10, 13, 13, 13, }; const float ff_wma_lsp_codebook[NB_LSP_COEFS][16] = { { 1.98732877, 1.97944528, 1.97179088, 1.96260549, 1.95038374, 1.93336114, 1.90719232, 1.86191415, }, { 1.97260000, 1.96083160, 1.94982586, 1.93806164, 1.92516608, 1.91010199, 1.89232331, 1.87149812, 1.84564818, 1.81358067, 1.77620070, 1.73265264, 1.67907855, 1.60959081, 1.50829650, 1.33120330, }, { 1.90109110, 1.86482426, 1.83419671, 1.80168452, 1.76650116, 1.72816320, 1.68502700, 1.63738256, 1.58501580, 1.51795181, 1.43679906, 1.33950585, 1.24176208, 1.12260729, 0.96749668, 0.74048265, }, { 1.76943864, 1.67822463, 1.59946365, 1.53560582, 1.47470796, 1.41210167, 1.34509536, 1.27339507, 1.19303814, 1.09765169, 0.98818722, 0.87239446, 0.74369172, 0.59768184, 0.43168630, 0.17977021, }, { 1.43428349, 1.32038354, 1.21074086, 1.10577988, 1.00561746, 0.90335924, 0.80437489, 0.70709671, 0.60427395, 0.49814048, 0.38509539, 0.27106800, 0.14407416, 0.00219910, -0.16725141, -0.36936085, }, { 0.99895687, 0.84188166, 0.70753739, 0.57906595, 0.47055563, 0.36966965, 0.26826648, 0.17163380, 0.07208392, -0.03062936, -1.40037388, -0.25128968, -0.37213937, -0.51075646, -0.64887512, -0.80308031, }, { 0.26515280, 0.06313551, -0.08872080, -0.21103548, -0.31069678, -0.39680323, -0.47223474, -0.54167135, -0.61444740, -0.68943343, -0.76580211, -0.85170082, -0.95289061, -1.06514703, -1.20510707, -1.37617746, }, { -0.53940301, -0.73770929, -0.88424876, -1.01117930, -1.13389091, -1.26830073, -1.42041987, -1.62033919, -1.10158808, -1.16512566, -1.23337128, -1.30414401, -1.37663312, -1.46853845, -1.57625798, -1.66893638, }, { -0.38601997, -0.56009350, -0.66978483, -0.76028471, -0.83846064, -0.90868087, -0.97408881, -1.03694962, }, { -1.56144989, -1.65944032, -1.72689685, -1.77857740, -1.82203011, -1.86220079, -1.90283983, -1.94820479, }, }; static const uint32_t coef0_huffcodes[666] = { 0x00258, 0x0003d, 0x00000, 0x00005, 0x00008, 0x00008, 0x0000c, 0x0001b, 0x0001f, 0x00015, 0x00024, 0x00032, 0x0003a, 0x00026, 0x0002c, 0x0002f, 0x0004a, 0x0004d, 0x00061, 0x00070, 0x00073, 0x00048, 0x00052, 0x0005a, 0x0005d, 0x0006e, 0x00099, 0x0009e, 0x000c1, 0x000ce, 0x000e4, 0x000f0, 0x00093, 0x0009e, 0x000a2, 0x000a1, 0x000b8, 0x000d2, 0x000d3, 0x0012e, 0x00130, 0x000de, 0x0012d, 0x0019b, 0x001e4, 0x00139, 0x0013a, 0x0013f, 0x0014f, 0x0016d, 0x001a2, 0x0027c, 0x0027e, 0x00332, 0x0033c, 0x0033f, 0x0038b, 0x00396, 0x003c5, 0x00270, 0x0027c, 0x0025a, 0x00395, 0x00248, 0x004bd, 0x004fb, 0x00662, 0x00661, 0x0071b, 0x004e6, 0x004ff, 0x00666, 0x0071c, 0x0071a, 0x0071f, 0x00794, 0x00536, 0x004e2, 0x0078e, 0x004ee, 0x00518, 0x00535, 0x004fb, 0x0078d, 0x00530, 0x00680, 0x0068f, 0x005cb, 0x00965, 0x006a6, 0x00967, 0x0097f, 0x00682, 0x006ae, 0x00cd0, 0x00e28, 0x00f13, 0x00f1f, 0x009f5, 0x00cd3, 0x00f11, 0x00926, 0x00964, 0x00f32, 0x00f12, 0x00f30, 0x00966, 0x00d0b, 0x00a68, 0x00b91, 0x009c7, 0x00b73, 0x012fa, 0x0131d, 0x013f9, 0x01ca0, 0x0199c, 0x01c7a, 0x0198c, 0x01248, 0x01c74, 0x01c64, 0x0139e, 0x012fd, 0x00a77, 0x012fc, 0x01c7b, 0x012ca, 0x014cc, 0x014d2, 0x014e3, 0x014dc, 0x012dc, 0x03344, 0x02598, 0x0263c, 0x0333b, 0x025e6, 0x01a1c, 0x01e3c, 0x014e2, 0x033d4, 0x01a11, 0x03349, 0x03cce, 0x014e1, 0x01a34, 0x0273e, 0x02627, 0x0273f, 0x038ee, 0x03971, 0x03c67, 0x03c61, 0x0333d, 0x038c2, 0x0263f, 0x038cd, 0x02638, 0x02e41, 0x0351f, 0x03348, 0x03c66, 0x03562, 0x02989, 0x027d5, 0x0333c, 0x02e4f, 0x0343b, 0x02ddf, 0x04bc8, 0x029c0, 0x02e57, 0x04c72, 0x025b7, 0x03547, 0x03540, 0x029d3, 0x04c45, 0x025bb, 0x06600, 0x04c73, 0x04bce, 0x0357b, 0x029a6, 0x029d2, 0x0263e, 0x0298a, 0x07183, 0x06602, 0x07958, 0x04b66, 0x0537d, 0x05375, 0x04fe9, 0x04b67, 0x0799f, 0x04bc9, 0x051fe, 0x06a3b, 0x05bb6, 0x04fa8, 0x0728f, 0x05376, 0x0492c, 0x0537e, 0x0795a, 0x06a3c, 0x0e515, 0x07887, 0x0683a, 0x051f9, 0x051fd, 0x0cc6a, 0x06a8a, 0x0cc6d, 0x05bb3, 0x0683b, 0x051fc, 0x05378, 0x0728e, 0x07886, 0x05bb7, 0x0f2a4, 0x0795b, 0x0683c, 0x09fc1, 0x0683d, 0x0b752, 0x09678, 0x0a3e8, 0x06ac7, 0x051f0, 0x0b759, 0x06af3, 0x04b6b, 0x0f2a0, 0x0f2ad, 0x096c3, 0x0e518, 0x0b75c, 0x0d458, 0x0cc6b, 0x0537c, 0x067aa, 0x04fea, 0x0343a, 0x0cc71, 0x0967f, 0x09fc4, 0x096c2, 0x0e516, 0x0f2a1, 0x0d45c, 0x0d45d, 0x0d45e, 0x12fb9, 0x0967e, 0x1982f, 0x09883, 0x096c4, 0x0b753, 0x12fb8, 0x0f2a8, 0x1ca21, 0x096c5, 0x0e51a, 0x1ca27, 0x12f3c, 0x0d471, 0x0f2aa, 0x0b75b, 0x12fbb, 0x0f2a9, 0x0f2ac, 0x0d45a, 0x0b74f, 0x096c8, 0x16e91, 0x096ca, 0x12fbf, 0x0d0a7, 0x13103, 0x0d516, 0x16e99, 0x12cbd, 0x0a3ea, 0x19829, 0x0b755, 0x29ba7, 0x1ca28, 0x29ba5, 0x16e93, 0x1982c, 0x19828, 0x25994, 0x0a3eb, 0x1ca29, 0x16e90, 0x1ca25, 0x1982d, 0x1ca26, 0x16e9b, 0x0b756, 0x0967c, 0x25997, 0x0b75f, 0x198d3, 0x0b757, 0x19a2a, 0x0d45b, 0x0e517, 0x1ca24, 0x1ca23, 0x1ca22, 0x0b758, 0x16e97, 0x0cd14, 0x13100, 0x00007, 0x0003b, 0x0006b, 0x00097, 0x00138, 0x00125, 0x00173, 0x00258, 0x00335, 0x0028e, 0x004c6, 0x00715, 0x00729, 0x004ef, 0x00519, 0x004ed, 0x00532, 0x0068c, 0x00686, 0x00978, 0x00e5d, 0x00e31, 0x009f4, 0x00b92, 0x012f8, 0x00d06, 0x00a67, 0x00d44, 0x00a76, 0x00d59, 0x012cd, 0x01c78, 0x01c75, 0x0199f, 0x0198f, 0x01c67, 0x014c6, 0x01c79, 0x01c76, 0x00b94, 0x00d1b, 0x01e32, 0x01e31, 0x01ab0, 0x01a05, 0x01aa1, 0x0333a, 0x025e5, 0x02626, 0x03541, 0x03544, 0x03421, 0x03546, 0x02e55, 0x02e56, 0x0492d, 0x02dde, 0x0299b, 0x02ddc, 0x0357a, 0x0249c, 0x0668b, 0x1c77f, 0x1ca20, 0x0d45f, 0x09886, 0x16e9a, 0x0f2a7, 0x0b751, 0x0a3ee, 0x0cf59, 0x0cf57, 0x0b754, 0x0d0a6, 0x16e98, 0x0b760, 0x06ac6, 0x0a3f0, 0x12fbe, 0x13104, 0x0f2a5, 0x0a3ef, 0x0d472, 0x12cba, 0x1982e, 0x16e9c, 0x1c77e, 0x198d0, 0x13105, 0x16e92, 0x0b75d, 0x0d459, 0x0001a, 0x000c0, 0x0016c, 0x003cd, 0x00350, 0x0067b, 0x0051e, 0x006a9, 0x009f4, 0x00b72, 0x00d09, 0x01249, 0x01e3d, 0x01ca1, 0x01a1f, 0x01721, 0x01a8a, 0x016e8, 0x03347, 0x01a35, 0x0249d, 0x0299a, 0x02596, 0x02e4e, 0x0298b, 0x07182, 0x04c46, 0x025ba, 0x02e40, 0x027d6, 0x04fe8, 0x06607, 0x05310, 0x09884, 0x072e1, 0x06a3d, 0x04b6a, 0x04c7a, 0x06603, 0x04c7b, 0x03428, 0x06605, 0x09664, 0x09fc0, 0x071de, 0x06601, 0x05bb2, 0x09885, 0x0a3e2, 0x1c61f, 0x12cbb, 0x0b750, 0x0cf58, 0x0967d, 0x25995, 0x668ad, 0x0b75a, 0x09fc2, 0x0537f, 0x0b75e, 0x13fae, 0x12fbc, 0x00031, 0x001c4, 0x004c5, 0x005b8, 0x00cf4, 0x0096f, 0x00d46, 0x01e57, 0x01a04, 0x02625, 0x03346, 0x028f9, 0x04c47, 0x072e0, 0x04b69, 0x03420, 0x07957, 0x06639, 0x0799e, 0x07959, 0x07881, 0x04b68, 0x09fc3, 0x09fd6, 0x0cc70, 0x0a3f1, 0x12cbe, 0x0e30e, 0x0e51b, 0x06af2, 0x12cbc, 0x1c77d, 0x0f2ab, 0x12fbd, 0x1aa2f, 0x0a3ec, 0x0d473, 0x05377, 0x0a3e9, 0x1982b, 0x0e300, 0x12f3f, 0x0cf5f, 0x096c0, 0x38c3c, 0x16e94, 0x16e95, 0x12f3d, 0x29ba4, 0x29ba6, 0x1c77c, 0x6a8ba, 0x3545c, 0x33457, 0x668ac, 0x6a8bb, 0x16e9d, 0x0e519, 0x25996, 0x12f3e, 0x00036, 0x0033e, 0x006ad, 0x00d03, 0x012c8, 0x0124a, 0x03c42, 0x03ccd, 0x06606, 0x07880, 0x06852, 0x06a3a, 0x05bb4, 0x0f2a2, 0x09fc7, 0x12cb9, 0x0cc6c, 0x0a6e8, 0x096c1, 0x0004a, 0x00355, 0x012f9, 0x014e8, 0x01abe, 0x025b6, 0x0492e, 0x09fc6, 0x051ff, 0x0cc6f, 0x096cb, 0x0d071, 0x198d1, 0x12cb8, 0x38c3d, 0x13faf, 0x096c9, 0x0009d, 0x00539, 0x012ce, 0x0341f, 0x029c1, 0x04b33, 0x0a3e3, 0x0d070, 0x16e96, 0x0b763, 0x000a0, 0x009ce, 0x038cc, 0x0343d, 0x051fa, 0x09888, 0x12fba, 0x000df, 0x00a75, 0x029a7, 0x09fc5, 0x0e301, 0x0967b, 0x001e7, 0x012c9, 0x051fb, 0x09889, 0x0f2a6, 0x0016f, 0x01cb9, 0x0cf5a, 0x12cbf, 0x09679, 0x00272, 0x01a15, 0x0967a, 0x003cb, 0x025f6, 0x0b762, 0x0028d, 0x03c60, 0x0cf5e, 0x00352, 0x03ccc, 0x0072f, 0x07186, 0x004ec, 0x05379, 0x0068e, 0x09887, 0x006a7, 0x06af1, 0x00e29, 0x0cf5b, 0x00f31, 0x0d470, 0x009c6, 0x013fb, 0x13102, 0x019a5, 0x13101, 0x01983, 0x01c65, 0x0124f, 0x014c7, 0x01726, 0x01abf, 0x03304, 0x02624, 0x03c41, 0x027d7, 0x02ddd, 0x02e54, 0x0343c, 0x06604, 0x07181, 0x0663a, 0x04fa9, 0x0663b, 0x05311, 0x0537a, 0x06839, 0x05bb5, 0x0492f, 0x06af0, 0x096c7, 0x0cc6e, 0x0537b, 0x0cf5c, 0x0cf56, 0x198d2, 0x0cf5d, 0x0a3ed, 0x0f2a3, 0x1982a, 0x0b761, 0x096c6, }; static const uint8_t coef0_huffbits[666] = { 11, 6, 2, 3, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 11, 12, 12, 12, 12, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 14, 14, 15, 15, 15, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 15, 15, 15, 15, 16, 16, 16, 15, 16, 15, 15, 16, 16, 16, 16, 15, 16, 16, 16, 15, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 16, 16, 15, 16, 16, 16, 17, 17, 17, 16, 16, 17, 16, 16, 16, 16, 17, 16, 17, 17, 16, 16, 15, 15, 15, 16, 17, 16, 17, 16, 16, 17, 17, 17, 17, 17, 17, 16, 17, 17, 17, 16, 17, 17, 16, 17, 17, 17, 16, 17, 17, 16, 16, 17, 17, 17, 18, 17, 17, 17, 17, 17, 18, 18, 17, 17, 17, 19, 17, 19, 18, 17, 17, 18, 17, 17, 18, 17, 17, 17, 18, 17, 17, 18, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 18, 16, 17, 4, 6, 8, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 15, 15, 15, 15, 15, 15, 17, 17, 17, 16, 18, 16, 17, 17, 16, 16, 17, 17, 18, 17, 16, 17, 17, 17, 16, 17, 17, 18, 17, 18, 17, 17, 17, 18, 17, 17, 5, 8, 10, 10, 11, 11, 12, 12, 12, 13, 13, 14, 13, 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 15, 16, 16, 15, 15, 15, 15, 15, 16, 16, 15, 15, 16, 16, 17, 17, 18, 17, 16, 17, 18, 19, 17, 16, 16, 17, 17, 17, 6, 9, 11, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 17, 18, 16, 16, 16, 18, 17, 16, 17, 18, 17, 17, 16, 17, 17, 16, 17, 16, 17, 18, 18, 18, 17, 19, 19, 17, 20, 19, 18, 19, 20, 18, 16, 18, 17, 7, 10, 12, 13, 13, 14, 14, 14, 15, 15, 16, 16, 16, 16, 16, 18, 16, 17, 17, 8, 11, 13, 14, 14, 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 17, 17, 8, 12, 14, 15, 15, 15, 17, 17, 18, 17, 9, 12, 14, 15, 16, 16, 17, 9, 13, 15, 16, 16, 17, 9, 13, 16, 16, 16, 10, 13, 16, 18, 17, 10, 14, 17, 10, 14, 17, 11, 14, 16, 11, 14, 11, 15, 12, 16, 12, 16, 12, 16, 12, 16, 12, 17, 13, 13, 17, 13, 17, 13, 13, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 16, 15, 16, 16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 17, 16, 17, 16, 17, 17, 17, }; static const uint32_t coef1_huffcodes[555] = { 0x00115, 0x00002, 0x00001, 0x00000, 0x0000d, 0x00007, 0x00013, 0x0001d, 0x00008, 0x0000c, 0x00023, 0x0002b, 0x0003f, 0x00017, 0x0001b, 0x00043, 0x00049, 0x00050, 0x00055, 0x00054, 0x00067, 0x00064, 0x0007b, 0x0002d, 0x00028, 0x0002a, 0x00085, 0x00089, 0x0002b, 0x00035, 0x00090, 0x00091, 0x00094, 0x00088, 0x000c1, 0x000c6, 0x000f2, 0x000e3, 0x000c5, 0x000e2, 0x00036, 0x000f0, 0x000a7, 0x000cd, 0x000fb, 0x00059, 0x00116, 0x00103, 0x00108, 0x0012b, 0x0012d, 0x00188, 0x0012e, 0x0014c, 0x001c3, 0x00187, 0x001e7, 0x0006f, 0x00094, 0x00069, 0x001e6, 0x001ca, 0x00147, 0x00195, 0x000a7, 0x00213, 0x00209, 0x00303, 0x00295, 0x00289, 0x0028c, 0x0028d, 0x00312, 0x00330, 0x0029b, 0x00308, 0x00328, 0x0029a, 0x0025e, 0x003c5, 0x00384, 0x0039f, 0x00397, 0x00296, 0x0032e, 0x00332, 0x003c6, 0x003e6, 0x0012d, 0x000d1, 0x00402, 0x000dd, 0x00161, 0x0012b, 0x00127, 0x0045d, 0x00601, 0x004ab, 0x0045f, 0x00410, 0x004bf, 0x00528, 0x0045c, 0x00424, 0x00400, 0x00511, 0x00618, 0x0073d, 0x0063a, 0x00614, 0x0073c, 0x007c0, 0x007cf, 0x00802, 0x00966, 0x00964, 0x00951, 0x008a0, 0x00346, 0x00803, 0x00a52, 0x0024a, 0x007c1, 0x0063f, 0x00126, 0x00406, 0x00789, 0x008a2, 0x00960, 0x00967, 0x00c05, 0x00c70, 0x00c79, 0x00a5d, 0x00c26, 0x00c4d, 0x00372, 0x008a5, 0x00c08, 0x002c5, 0x00f11, 0x00cc4, 0x00f8e, 0x00e16, 0x00496, 0x00e77, 0x00f9c, 0x00c25, 0x00f1e, 0x00c27, 0x00f1f, 0x00e17, 0x00ccd, 0x00355, 0x00c09, 0x00c78, 0x00f90, 0x00521, 0x00357, 0x00356, 0x0068e, 0x00f9d, 0x00c04, 0x00e58, 0x00a20, 0x00a2c, 0x00c4c, 0x0052f, 0x00f8d, 0x01178, 0x01053, 0x01097, 0x0180f, 0x0180d, 0x012fb, 0x012aa, 0x0202a, 0x00a40, 0x018ed, 0x01ceb, 0x01455, 0x018e3, 0x012a1, 0x00354, 0x00353, 0x00f1c, 0x00c7b, 0x00c37, 0x0101d, 0x012cb, 0x01142, 0x0197d, 0x01095, 0x01e3b, 0x0186b, 0x00588, 0x01c2a, 0x014b8, 0x01e3a, 0x018ec, 0x01f46, 0x012fa, 0x00a53, 0x01ce8, 0x00a55, 0x01c29, 0x0117b, 0x01052, 0x012a0, 0x00589, 0x00950, 0x01c2b, 0x00a50, 0x0208b, 0x0180e, 0x02027, 0x02556, 0x01e20, 0x006e7, 0x01c28, 0x0197a, 0x00684, 0x020a2, 0x01f22, 0x03018, 0x039cf, 0x03e25, 0x02557, 0x0294c, 0x028a6, 0x00d11, 0x028a9, 0x02979, 0x00d46, 0x00a56, 0x039ce, 0x030cc, 0x0329a, 0x0149d, 0x0510f, 0x0451c, 0x02028, 0x03299, 0x01ced, 0x014b9, 0x00f85, 0x00c7a, 0x01800, 0x00341, 0x012ca, 0x039c8, 0x0329d, 0x00d0d, 0x03e20, 0x05144, 0x00d45, 0x030d0, 0x0186d, 0x030d5, 0x00d0f, 0x00d40, 0x04114, 0x020a1, 0x0297f, 0x03e24, 0x032f1, 0x04047, 0x030d4, 0x028a8, 0x00d0e, 0x0451d, 0x04044, 0x0297e, 0x04042, 0x030d2, 0x030cf, 0x03e21, 0x03e26, 0x028a5, 0x0451a, 0x00d48, 0x01a16, 0x00d44, 0x04518, 0x0149b, 0x039ca, 0x01498, 0x0403d, 0x0451b, 0x0149c, 0x032f3, 0x030cb, 0x08073, 0x03e22, 0x0529a, 0x020aa, 0x039cc, 0x0738a, 0x06530, 0x07389, 0x06193, 0x08071, 0x04043, 0x030ce, 0x05147, 0x07388, 0x05145, 0x08072, 0x04521, 0x00d47, 0x0297c, 0x030cd, 0x030ca, 0x0000b, 0x0000c, 0x00083, 0x000e4, 0x00048, 0x00102, 0x001cc, 0x001f5, 0x00097, 0x0020b, 0x00124, 0x00453, 0x00627, 0x00639, 0x00605, 0x00517, 0x001b8, 0x00663, 0x00667, 0x007c3, 0x00823, 0x00961, 0x00963, 0x00e5a, 0x00e59, 0x00a2b, 0x00cbf, 0x00292, 0x00a2d, 0x007d0, 0x00953, 0x00cc5, 0x00f84, 0x004ab, 0x014a7, 0x0068a, 0x0117a, 0x0052e, 0x01442, 0x0052c, 0x00c77, 0x00f8f, 0x004aa, 0x01094, 0x01801, 0x012c4, 0x0297b, 0x00952, 0x01f19, 0x006a5, 0x01149, 0x012c5, 0x01803, 0x022f2, 0x0329b, 0x04520, 0x0149e, 0x00d13, 0x01f16, 0x01ce9, 0x0101c, 0x006e6, 0x039c9, 0x06191, 0x07c8e, 0x06192, 0x0ca63, 0x039cd, 0x06190, 0x06884, 0x06885, 0x07382, 0x00d49, 0x00d41, 0x0450c, 0x0149a, 0x030d1, 0x08077, 0x03e23, 0x01a15, 0x0e701, 0x0e702, 0x08079, 0x0822a, 0x0a218, 0x07887, 0x0403f, 0x0520b, 0x0529b, 0x0e700, 0x04519, 0x00007, 0x000e0, 0x000d0, 0x0039b, 0x003e5, 0x00163, 0x0063e, 0x007c9, 0x00806, 0x00954, 0x01044, 0x01f44, 0x0197c, 0x01f45, 0x00a51, 0x01f47, 0x00951, 0x0052d, 0x02291, 0x0092f, 0x00a54, 0x00d12, 0x0297d, 0x00d0c, 0x01499, 0x0329e, 0x032f0, 0x02025, 0x039c6, 0x00a57, 0x03e46, 0x00d42, 0x0738b, 0x05146, 0x04046, 0x08078, 0x0510e, 0x07886, 0x02904, 0x04156, 0x04157, 0x06032, 0x030d3, 0x08bce, 0x04040, 0x0403e, 0x0a414, 0x10457, 0x08075, 0x06887, 0x07c8f, 0x039c7, 0x07387, 0x08070, 0x08bcf, 0x1482a, 0x10456, 0x1482b, 0x01a17, 0x06886, 0x0450d, 0x00013, 0x0006b, 0x00615, 0x0080b, 0x0082b, 0x00952, 0x00e5b, 0x018e2, 0x0186c, 0x01f18, 0x0329f, 0x00d43, 0x03e29, 0x05140, 0x05141, 0x0ca62, 0x06033, 0x03c42, 0x03e28, 0x0450f, 0x0a21a, 0x07384, 0x0a219, 0x0e703, 0x0a21b, 0x01a14, 0x07383, 0x045e6, 0x0007a, 0x0012c, 0x00ccc, 0x0068f, 0x01802, 0x00a52, 0x00953, 0x04045, 0x01a20, 0x0451f, 0x000a4, 0x00735, 0x01cec, 0x02029, 0x020a3, 0x0451e, 0x00069, 0x00c24, 0x02024, 0x032f2, 0x05142, 0x00196, 0x00523, 0x000a6, 0x0197b, 0x0030b, 0x0092e, 0x003e9, 0x03e27, 0x00160, 0x05143, 0x00652, 0x04041, 0x00734, 0x028a7, 0x0080f, 0x01483, 0x0097c, 0x00340, 0x0068b, 0x00522, 0x01054, 0x01096, 0x01f17, 0x0202b, 0x01cea, 0x020a0, 0x02978, 0x02026, 0x0297a, 0x039cb, 0x03e2b, 0x0149f, 0x0329c, 0x07385, 0x08074, 0x0450e, 0x03e2a, 0x05149, 0x08076, 0x07386, 0x05148, }; static const uint8_t coef1_huffbits[555] = { 9, 5, 2, 4, 4, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 12, 12, 12, 12, 12, 12, 12, 13, 12, 12, 12, 12, 12, 12, 12, 12, 13, 12, 12, 12, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 13, 12, 13, 13, 13, 13, 13, 13, 13, 14, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 14, 13, 14, 14, 13, 14, 14, 13, 14, 13, 13, 14, 14, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 14, 14, 14, 14, 15, 15, 15, 14, 14, 13, 13, 12, 12, 13, 13, 13, 14, 14, 15, 14, 15, 15, 14, 13, 14, 15, 15, 15, 14, 14, 14, 14, 15, 14, 14, 15, 15, 15, 14, 15, 14, 14, 14, 14, 14, 15, 15, 16, 15, 15, 15, 14, 15, 15, 15, 15, 14, 14, 16, 14, 15, 14, 14, 15, 15, 15, 15, 16, 15, 14, 15, 15, 15, 16, 15, 15, 14, 14, 14, 4, 7, 8, 8, 9, 9, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 12, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 13, 13, 13, 14, 14, 13, 14, 13, 13, 13, 14, 14, 15, 15, 14, 13, 13, 13, 14, 14, 15, 15, 15, 16, 14, 15, 17, 17, 15, 15, 15, 15, 15, 14, 16, 14, 16, 16, 16, 16, 16, 16, 15, 15, 17, 15, 16, 15, 6, 8, 10, 10, 10, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 13, 14, 13, 14, 14, 14, 14, 14, 15, 15, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 16, 15, 15, 16, 15, 15, 15, 14, 16, 15, 15, 18, 17, 16, 17, 15, 14, 15, 16, 16, 19, 17, 19, 16, 17, 15, 7, 10, 11, 12, 12, 12, 12, 13, 13, 13, 14, 15, 14, 15, 15, 16, 15, 14, 14, 15, 16, 15, 16, 16, 16, 16, 15, 15, 7, 11, 12, 13, 13, 14, 14, 15, 15, 15, 8, 11, 13, 14, 14, 15, 9, 12, 14, 14, 15, 9, 13, 10, 13, 10, 14, 10, 14, 11, 15, 11, 15, 11, 14, 12, 15, 12, 13, 13, 13, 13, 13, 13, 14, 13, 14, 14, 14, 14, 14, 14, 15, 14, 15, 16, 15, 14, 15, 16, 15, 15, }; static const uint32_t coef2_huffcodes[1336] = { 0x003e6, 0x000f6, 0x00000, 0x00002, 0x00006, 0x0000f, 0x0001b, 0x00028, 0x00039, 0x0003f, 0x0006b, 0x00076, 0x000b7, 0x000e8, 0x000ef, 0x00169, 0x001a7, 0x001d4, 0x001dc, 0x002c4, 0x00349, 0x00355, 0x00391, 0x003dc, 0x00581, 0x005b2, 0x00698, 0x0070c, 0x00755, 0x0073a, 0x00774, 0x007cf, 0x00b0a, 0x00b66, 0x00d2e, 0x00d5e, 0x00e1b, 0x00eac, 0x00e5a, 0x00f7e, 0x00fa1, 0x0163e, 0x01a37, 0x01a52, 0x01c39, 0x01ab3, 0x01d5f, 0x01cb6, 0x01f52, 0x01dd9, 0x02c04, 0x02c2e, 0x02c2d, 0x02c23, 0x03467, 0x034a3, 0x0351b, 0x03501, 0x03a5d, 0x0351c, 0x03875, 0x03dea, 0x0397b, 0x039db, 0x03df1, 0x039d8, 0x03bb4, 0x0580a, 0x0584d, 0x05842, 0x05b13, 0x058ea, 0x0697d, 0x06a06, 0x068cc, 0x06ac7, 0x06a96, 0x072f4, 0x07543, 0x072b4, 0x07d20, 0x0b003, 0x073b5, 0x07be6, 0x0d180, 0x07bd1, 0x07cb8, 0x07d06, 0x07d25, 0x0d2f2, 0x0d19a, 0x0d334, 0x0e1dc, 0x0d529, 0x0d584, 0x0e1d2, 0x0e5e3, 0x0eec4, 0x0e564, 0x0fa49, 0x16001, 0x0eedc, 0x0f7fa, 0x1a32c, 0x16131, 0x16003, 0x0f9c8, 0x1ef80, 0x1d2a0, 0x1aa4b, 0x0f7ce, 0x1abfe, 0x1aa50, 0x1a458, 0x1a816, 0x1cae4, 0x1d2fe, 0x1d52e, 0x1aa4c, 0x2c245, 0x1d2a1, 0x1a35d, 0x1ca1b, 0x1d5d8, 0x1f531, 0x1ca1c, 0x1f389, 0x1f4af, 0x3a5e7, 0x351fb, 0x2c24b, 0x34bce, 0x2c24d, 0x2c249, 0x2c24a, 0x72dfc, 0x357ef, 0x35002, 0x3a5e6, 0x39431, 0x5843b, 0x34a77, 0x58431, 0x3a5f3, 0x3a5dd, 0x3e5e5, 0x356bd, 0x3976e, 0x6a3d2, 0x3500d, 0x694c4, 0x580bd, 0x3e5e8, 0x74b95, 0x34a6e, 0x3977c, 0x39432, 0x5b0d2, 0x6a3d8, 0x580b8, 0x5b0cb, 0x5b0d7, 0x72dee, 0x72ded, 0x72dec, 0x74b9c, 0x3977f, 0x72dea, 0x74b9e, 0x7be7d, 0x580bf, 0x5b0d5, 0x7cba8, 0x74b91, 0x3e5dd, 0xb6171, 0xd46b3, 0xd46b9, 0x7cba1, 0x74b9f, 0x72de1, 0xe59f5, 0x3e5eb, 0x00004, 0x00015, 0x00038, 0x00075, 0x000e8, 0x001d3, 0x00347, 0x0039c, 0x00690, 0x0074a, 0x00b60, 0x00e93, 0x00f74, 0x0163d, 0x01a5a, 0x01d24, 0x01cbe, 0x01f4b, 0x03468, 0x03562, 0x03947, 0x03e82, 0x05804, 0x05b12, 0x05803, 0x0696d, 0x06a9e, 0x0697c, 0x06978, 0x06afb, 0x074b2, 0x072f5, 0x073c0, 0x07541, 0x06944, 0x074b7, 0x070d3, 0x07ba9, 0x0b0b1, 0x0d1af, 0x0e1dd, 0x0e5e2, 0x0e1a3, 0x0eec3, 0x1612f, 0x0e961, 0x0eeda, 0x0e78e, 0x0fa48, 0x1612c, 0x0e511, 0x0e565, 0x0e953, 0x1aa4a, 0x0e59d, 0x1d52c, 0x1a811, 0x1cae7, 0x1abfc, 0x1d52d, 0x1cacf, 0x1cf05, 0x2c254, 0x34a72, 0x1f4ac, 0x3976b, 0x34a71, 0x2c6d9, 0x2d873, 0x34a6a, 0x357e7, 0x3464c, 0x3e5f5, 0x58433, 0x1f53a, 0x3500a, 0x357ea, 0x34a73, 0x3942f, 0x357e5, 0x39775, 0x694cd, 0x39772, 0x7cba5, 0x6a3ef, 0x35483, 0x74b98, 0x5b0c1, 0x39770, 0x3a5d7, 0x39433, 0x39434, 0x694ce, 0x580be, 0x3e5ff, 0x6a3ec, 0xb616f, 0xd46b1, 0x6a3d1, 0x72de5, 0x74b6e, 0x72de9, 0x3e700, 0xd46b6, 0x6a3e9, 0x74b69, 0xe5675, 0xd46b8, 0x7cbaa, 0x3a5d1, 0x0000c, 0x0003c, 0x000eb, 0x001f1, 0x003a4, 0x006a8, 0x007d5, 0x00d43, 0x00e77, 0x016c5, 0x01cb1, 0x02c5d, 0x03a55, 0x03a56, 0x03e51, 0x03bb5, 0x05b0a, 0x06a9f, 0x074b8, 0x07d28, 0x0d187, 0x0d40e, 0x0d52e, 0x0d425, 0x0eae3, 0x0e1d3, 0x1612e, 0x0e59e, 0x0eec2, 0x0e578, 0x0e51a, 0x0e579, 0x0e515, 0x0e960, 0x0d183, 0x0d220, 0x0d2cb, 0x0e512, 0x16c3e, 0x16002, 0x16c42, 0x1cae9, 0x3461a, 0x1d2fa, 0x1a308, 0x1a849, 0x1cf07, 0x1f38f, 0x34b65, 0x2c253, 0x1ef9e, 0x1cbc3, 0x1cbc1, 0x2c255, 0x1f384, 0x58435, 0x2c5cd, 0x3a5f7, 0x2c252, 0x3959c, 0x2c6d8, 0x3a5d3, 0x6ad78, 0x6a3f2, 0x7cba9, 0xb6176, 0x72deb, 0x39764, 0x3e5f6, 0x3a5d8, 0x74a8c, 0x6a3e6, 0x694d1, 0x6ad79, 0x1a4592, 0xe59fb, 0x7cbb3, 0x5b0cd, 0x00017, 0x000b5, 0x002c3, 0x005b7, 0x00b1c, 0x00e5c, 0x0163f, 0x01ab2, 0x01efa, 0x0348a, 0x0396e, 0x058da, 0x06963, 0x06a30, 0x072cd, 0x073cf, 0x07ce7, 0x0d2ca, 0x0d2d8, 0x0e764, 0x0e794, 0x16008, 0x16167, 0x1617e, 0x1aa49, 0x1a30b, 0x1a813, 0x2c6da, 0x1a580, 0x1cbc2, 0x0f9ca, 0x1617f, 0x1d2fe, 0x0f7fc, 0x16c40, 0x0e513, 0x0eec5, 0x0f7c3, 0x1d508, 0x1a81e, 0x1d2fd, 0x39430, 0x35486, 0x3e5fd, 0x2c24c, 0x2c75a, 0x34a74, 0x3a5f4, 0x3464d, 0x694ca, 0x3a5f1, 0x1d509, 0x1d5c0, 0x34648, 0x3464e, 0x6a3d5, 0x6a3e8, 0x6a3e7, 0x5b0c3, 0x2c248, 0x1f38a, 0x3a5f2, 0x6a3e5, 0x00029, 0x00168, 0x0058c, 0x00b67, 0x00f9d, 0x01c3d, 0x01cbf, 0x02c20, 0x0351d, 0x03df6, 0x06af9, 0x072b5, 0x0b1d7, 0x0b0b2, 0x0d40a, 0x0d52b, 0x0e952, 0x0e797, 0x163c3, 0x1c3a0, 0x1f386, 0x1ca21, 0x34655, 0x2c247, 0x1f53b, 0x2c250, 0x2c24f, 0x1f385, 0x1ef5d, 0x1cf15, 0x1caea, 0x1ab0a, 0x1cf19, 0x1f53d, 0x1d5c2, 0x1d2fb, 0x1ef58, 0x34a78, 0x357ec, 0x1f533, 0x3a5e1, 0x694d2, 0x58482, 0x3a5ee, 0x2c6dc, 0x357eb, 0x5b0c4, 0x39778, 0x6a3e1, 0x7cbb4, 0x3a5e1, 0x74b68, 0x3a5ef, 0x3a5d2, 0x39424, 0x72de2, 0xe59f6, 0xe59f7, 0x3e702, 0x3e5ec, 0x1f38b, 0x0003b, 0x001f0, 0x00777, 0x00fa8, 0x01cb2, 0x02d84, 0x03a57, 0x03dd6, 0x06917, 0x06a11, 0x07d07, 0x0eae2, 0x0e796, 0x0f9c9, 0x0f7fb, 0x16166, 0x16160, 0x1ab1b, 0x1abfa, 0x2d87b, 0x1d2f7, 0x39768, 0x1f38c, 0x34653, 0x34651, 0x6a3d9, 0x35001, 0x3abbd, 0x38742, 0x39426, 0x34a76, 0x3a5ec, 0x34a75, 0x35000, 0x35488, 0x1cf10, 0x2c6db, 0x357ed, 0x357e8, 0x357e9, 0x3a5f0, 0x694c2, 0xb6178, 0x72df5, 0x39425, 0x3942b, 0x74b6d, 0x74b6f, 0xb6177, 0xb6179, 0x74b6a, 0xb6172, 0x58487, 0x3e5ee, 0x3e5ed, 0x72df2, 0x72df4, 0x7cbae, 0x6a3ca, 0x70e86, 0x34bcf, 0x6a3c8, 0x00059, 0x00384, 0x00d5b, 0x01c38, 0x03560, 0x0395b, 0x0584e, 0x06964, 0x073cd, 0x0b1e7, 0x0e798, 0x0e78d, 0x0fa43, 0x1a848, 0x1a32f, 0x1aa4e, 0x3464a, 0x1f4ab, 0x1f38d, 0x3a5eb, 0x3a5d4, 0x3548a, 0x6a3c7, 0x5b0d0, 0x6a3c5, 0x7cbb0, 0x694cb, 0x3a5e5, 0x3e5e2, 0x3942c, 0x2d872, 0x1f4ae, 0x3a5d5, 0x694d3, 0x58481, 0x35009, 0x39774, 0x58432, 0xb616c, 0x5b0db, 0x3548b, 0xb6174, 0x1d5d95, 0xb004c, 0x7cbb2, 0x3a5e5, 0x74a8f, 0xe59f9, 0x72df6, 0xe59fd, 0x7cbad, 0xd427d, 0x72cff, 0x3977a, 0x5b0d9, 0xb616d, 0xb616b, 0x1a4593, 0x7cbaf, 0x5b0da, 0x00071, 0x003eb, 0x01603, 0x02c6c, 0x03961, 0x068c8, 0x06a31, 0x072bd, 0x0d2c2, 0x0e51b, 0x0e5e6, 0x1abfb, 0x1d2ff, 0x1cae5, 0x1ef5c, 0x1ef5e, 0x1cf13, 0x34a6d, 0x3976d, 0xb616a, 0x3e5f2, 0x6a3c4, 0xb6169, 0x3e5dc, 0x580b9, 0x74b99, 0x75764, 0x58434, 0x3a5d9, 0x6945a, 0x69459, 0x3548c, 0x3a5e9, 0x69457, 0x72df1, 0x6945e, 0x6a35e, 0x3e701, 0xb6168, 0x5b0dd, 0x3a5de, 0x6a3c2, 0xd4278, 0x6a3cc, 0x72dfd, 0xb6165, 0x16009a, 0x7cbb1, 0xd427c, 0xb6162, 0xe765e, 0x1cecbe, 0x7cbb6, 0x69454, 0xb6160, 0xd427a, 0x1d5d96, 0xb1d6d, 0xe59f4, 0x72de8, 0x3a5db, 0x0007a, 0x006ae, 0x01c3c, 0x03aba, 0x058e9, 0x072cc, 0x0d2dd, 0x0d22d, 0x0eec1, 0x0eedb, 0x1d2a2, 0x1ef5b, 0x357e2, 0x3abbf, 0x1d2f9, 0x35004, 0x3a5dc, 0x351fc, 0x3976c, 0x6a3c6, 0x6a3cb, 0x3e5ea, 0xe59f3, 0x6a3ce, 0x69452, 0xe59f0, 0x74b90, 0xd4279, 0xd427b, 0x7cbb5, 0x5b0c5, 0x3a5e3, 0x3a5e2, 0x000d0, 0x00775, 0x01efe, 0x03dd5, 0x0728c, 0x07cb9, 0x0e1a2, 0x0ea85, 0x0eed8, 0x1a30a, 0x1aa4f, 0x3a5df, 0x35008, 0x3a5e0, 0x3e5f4, 0x3e5f7, 0xb1d6c, 0x5843e, 0x34a70, 0x72df8, 0x74b6b, 0xd427f, 0x72df0, 0x5b0bf, 0x5b0c0, 0xd46b0, 0x72def, 0xe59f8, 0x162e64, 0xb1d6f, 0x3a5e0, 0x39427, 0x69166, 0x6a3e2, 0x6a3e3, 0x74a8d, 0xd427e, 0x1d5d97, 0xd46b4, 0x5b0d8, 0x6a3d3, 0x000e0, 0x00b63, 0x034cc, 0x06a33, 0x073c9, 0x0e1a0, 0x0f7fd, 0x0f9cc, 0x1617d, 0x1caeb, 0x1f4a9, 0x3abb3, 0x69450, 0x39420, 0x39777, 0x3e5e0, 0x6a3d4, 0x6a3ed, 0xb6166, 0xe59f1, 0xb1d6e, 0xe5676, 0x6a3ea, 0xe5674, 0xb6163, 0xd46b7, 0x7cba6, 0xd46ba, 0x1d5d94, 0xb6164, 0x6a3f1, 0x7cba2, 0x69451, 0x72dfa, 0xd46bb, 0x72df7, 0x74b94, 0x1cecbf, 0xe59fa, 0x16009b, 0x6a3e4, 0x000e6, 0x00e94, 0x03876, 0x070ef, 0x0d52a, 0x16015, 0x16014, 0x1abf9, 0x1cf17, 0x34a79, 0x34650, 0x3e705, 0x6a3d0, 0x58430, 0x74b9d, 0x7be7e, 0x5b0be, 0x39773, 0x6a3de, 0x000fb, 0x00f7b, 0x03dd7, 0x07bd0, 0x0e59c, 0x0f9cd, 0x1cf18, 0x1d2ff, 0x34a7a, 0x39429, 0x3500c, 0x72de0, 0x69456, 0x7be7c, 0xd46b5, 0xd46b2, 0x6a3dd, 0x001a2, 0x0163b, 0x06913, 0x0b016, 0x0fa42, 0x1a32d, 0x1cf06, 0x34a7c, 0x34a7d, 0xb6161, 0x35481, 0x3e5fa, 0x7cba0, 0x7be7f, 0x7cba3, 0x7cba7, 0x5b0d3, 0x72de6, 0x6a3dc, 0x001a9, 0x01ab4, 0x06a34, 0x0d46a, 0x16130, 0x1ef5f, 0x1f532, 0x1f536, 0x3942e, 0x58436, 0x6a3db, 0x6945b, 0x001c9, 0x01ca0, 0x0728b, 0x0eed9, 0x1f539, 0x1ca1d, 0x39765, 0x39766, 0x58439, 0x6945d, 0x39767, 0x001d3, 0x01f2c, 0x07bfc, 0x16161, 0x34652, 0x3a5ed, 0x3548d, 0x58438, 0x6a3da, 0x002c1, 0x02c5e, 0x0d335, 0x1ab1a, 0x2d874, 0x35006, 0x35484, 0x5b0cc, 0x74b9a, 0x72df3, 0x6a3d6, 0x002da, 0x034b3, 0x0d5ae, 0x1caee, 0x2d871, 0x357e3, 0x74b97, 0x72df9, 0x580ba, 0x5b0d4, 0x0034d, 0x0354e, 0x0f750, 0x1cbc0, 0x3a5e7, 0x3a5e4, 0x00385, 0x03a58, 0x16c41, 0x2c5cf, 0x3e5e1, 0x74b6c, 0xe5677, 0x6a3df, 0x00390, 0x03e50, 0x163c2, 0x2d876, 0x35482, 0x5b0d6, 0x5843a, 0x0039f, 0x0585e, 0x1a583, 0x3500f, 0x74b93, 0x39771, 0x003e4, 0x06912, 0x16c43, 0x357e1, 0x0058a, 0x0696f, 0x1f538, 0x5b0c9, 0x6a3cf, 0x005b6, 0x06af8, 0x1f534, 0x58483, 0x6a3e0, 0x00695, 0x07d02, 0x1cae8, 0x58485, 0x006a2, 0x0754a, 0x357ee, 0x3977b, 0x00748, 0x074b2, 0x34a7b, 0x00729, 0x0b1e0, 0x34649, 0x3e5e3, 0x0073d, 0x0d2c4, 0x3e5e6, 0x007bb, 0x0b099, 0x39762, 0x5b0ce, 0x6945f, 0x007d1, 0x0d5ab, 0x39779, 0x007d3, 0x0d52f, 0x39763, 0x6945c, 0x00b1a, 0x0d2c5, 0x35489, 0x00d23, 0x0eaed, 0x3e5f8, 0x00d32, 0x16016, 0x3e5fb, 0x00d41, 0x0e768, 0x3a5ed, 0x00e1f, 0x16017, 0x58027, 0x00ead, 0x0fa07, 0x69455, 0x00e54, 0x1612b, 0x00e55, 0x1a581, 0x00f78, 0x1a32b, 0x580bc, 0x6a3ee, 0x00f79, 0x1abfd, 0x00f95, 0x1ab18, 0x6a3f0, 0x01637, 0x1aa4d, 0x0162d, 0x1f53c, 0x6a3f3, 0x01a31, 0x1a810, 0x39769, 0x01a50, 0x1caef, 0x01a36, 0x1a32e, 0x01a67, 0x1f38e, 0x01a85, 0x1ef59, 0x01aa6, 0x1ef83, 0x01d51, 0x2c012, 0x01d53, 0x2d879, 0x01d5e, 0x35005, 0x01cba, 0x1cf04, 0x69453, 0x01d2d, 0x351ff, 0x01f2d, 0x2d86f, 0x01f29, 0x35007, 0x02c22, 0x351fa, 0x02c03, 0x3a5ec, 0x02c5f, 0x3a5eb, 0x02c58, 0x34a6b, 0x03469, 0x356be, 0x02c59, 0x34a6c, 0x0346a, 0x3a5ea, 0x034bd, 0x034bf, 0x356bf, 0x0386a, 0x03ab9, 0x5843f, 0x0386b, 0x3a5f5, 0x03a4b, 0x39421, 0x03aa4, 0x3a5e9, 0x03a5a, 0x03960, 0x3977e, 0x03de9, 0x03958, 0x03df7, 0x039e1, 0x3e5e4, 0x0395f, 0x69458, 0x03e91, 0x03df2, 0x39428, 0x058f2, 0x03e80, 0x6a3c3, 0x03e93, 0x694c0, 0x058b8, 0x5b0ca, 0x0584f, 0x694c1, 0x058f1, 0x068d6, 0x06a10, 0x06ac3, 0x06a32, 0x070d2, 0x06911, 0x074b1, 0x07494, 0x06ad4, 0x06ad6, 0x072b8, 0x06afa, 0x074b3, 0x07540, 0x073ce, 0x0b005, 0x074b3, 0x07495, 0x074b9, 0x0d336, 0x07bff, 0x07763, 0x073c8, 0x07d29, 0x0b622, 0x0d221, 0x0d181, 0x0b1d1, 0x074b8, 0x0b1d0, 0x0d19b, 0x0d2c3, 0x0b172, 0x0d2dc, 0x0b623, 0x0d5aa, 0x0d426, 0x0d182, 0x0e795, 0x0e1d1, 0x0d337, 0x0e96c, 0x0e5e4, 0x0e514, 0x0eaee, 0x16000, 0x0e767, 0x0e1a1, 0x0e78f, 0x16004, 0x0f7c2, 0x0e799, 0x0e5e7, 0x0e566, 0x0e769, 0x0f751, 0x0eede, 0x0fa06, 0x16005, 0x0fa9f, 0x1a5e6, 0x0e766, 0x1636f, 0x0eedd, 0x0eec0, 0x1a309, 0x1ceca, 0x163cd, 0x0f9cb, 0x0eedf, 0x1a582, 0x1612d, 0x0e5e5, 0x1abf8, 0x1a30c, 0x1ca1f, 0x163cc, 0x1a35c, 0x1ca1e, 0x1aa51, 0x163ac, 0x1a84e, 0x1a53f, 0x1cf16, 0x1d2fc, 0x1a5b3, 0x1ab19, 0x1a81f, 0x1d5c3, 0x16c3f, 0x1d5c1, 0x1d2fc, 0x1f4aa, 0x1a812, 0x1f535, 0x1cf12, 0x1a817, 0x1617c, 0x1ab0b, 0x1d2f8, 0x1ef82, 0x2d87a, 0x1d52f, 0x1f530, 0x1aa48, 0x35487, 0x1d2fd, 0x1f4ad, 0x1cf11, 0x3461b, 0x35485, 0x1ca20, 0x1caed, 0x1cae6, 0x1abff, 0x3464f, 0x34a6f, 0x1ef81, 0x3464b, 0x39d96, 0x1f383, 0x1f537, 0x1cf14, 0x2c5ce, 0x3500e, 0x2c251, 0x1caec, 0x1f387, 0x34654, 0x357e4, 0x2d878, 0x3500b, 0x35480, 0x3a5e8, 0x3548e, 0x34b64, 0x1f4a8, 0x35003, 0x3e5df, 0x2d870, 0x357e6, 0x3e5f0, 0x1ef5a, 0x3a5ea, 0x1f388, 0x3e703, 0x2c24e, 0x3a5e2, 0x351fd, 0x2c6dd, 0x3e704, 0x351fe, 0x2d875, 0x5b0c7, 0x3976a, 0x3a5e6, 0x39423, 0x58480, 0x2c246, 0x3a5e3, 0x2d877, 0x3e5f1, 0x3abbe, 0x58489, 0x3e5f9, 0x357e0, 0x3abbc, 0x5b0c6, 0x69167, 0x69165, 0x3e5e9, 0x39422, 0x3976f, 0x3977d, 0x3e5de, 0x6a3c9, 0x58b98, 0x3a5f6, 0x3a5d0, 0x58486, 0x6a3c1, 0x3e5fc, 0x5b0dc, 0x3548f, 0x3942d, 0x694c9, 0x58484, 0x3a5e8, 0x74b9b, 0x74b96, 0x694d0, 0x58488, 0x3a5e4, 0x3942a, 0x72ec2, 0x39776, 0x5b0d1, 0x5b0cf, 0x3a5d6, 0xe59fc, 0x5b0c8, 0x3e5e7, 0x7cbb7, 0x70e87, 0x7cbab, 0x5b0c2, 0x694c3, 0x74a8e, 0x3e5f3, 0x6a3cd, 0x72dfe, 0x73b2e, 0x72ec0, 0x694c5, 0x58437, 0x694c8, 0x72dff, 0x39435, 0x5843d, 0x6a3d7, 0x72ec1, 0xd22c8, 0x694cf, 0xb6173, 0x3e5fe, 0x580bb, 0xe59f2, 0xb616e, 0xb6175, 0x3a5da, 0x5b0bd, 0x694cc, 0x5843c, 0x694c7, 0x74b92, 0x72ec3, 0x694c6, 0xb6170, 0x7cbac, 0xb1733, 0x7cba4, 0xb6167, 0x72de7, 0x72de4, 0x6a3c0, 0x3e5ef, 0x162e65, 0x72de3, 0x72dfb, 0x6a35f, 0x6a3eb, }; static const uint8_t coef2_huffbits[1336] = { 11, 9, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 17, 17, 17, 17, 17, 17, 17, 18, 18, 17, 17, 18, 17, 17, 18, 17, 18, 18, 18, 18, 19, 18, 18, 18, 18, 18, 18, 20, 18, 18, 18, 19, 19, 18, 19, 18, 19, 19, 18, 19, 19, 18, 19, 19, 19, 19, 18, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 19, 19, 20, 19, 20, 19, 19, 20, 19, 19, 20, 20, 20, 20, 19, 20, 21, 19, 3, 5, 7, 8, 9, 9, 10, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 15, 15, 15, 15, 16, 16, 16, 16, 17, 16, 17, 17, 16, 17, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 18, 17, 17, 18, 18, 18, 18, 18, 19, 18, 18, 18, 18, 18, 18, 19, 19, 18, 18, 18, 18, 19, 18, 19, 19, 19, 20, 19, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 19, 20, 19, 20, 19, 20, 19, 19, 21, 20, 20, 19, 4, 7, 8, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 18, 18, 18, 19, 18, 19, 19, 19, 20, 20, 20, 19, 19, 19, 19, 19, 19, 19, 21, 21, 20, 19, 5, 8, 10, 11, 12, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 17, 18, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 19, 18, 19, 18, 18, 18, 18, 18, 19, 18, 17, 17, 18, 18, 19, 19, 19, 19, 18, 18, 18, 19, 6, 9, 11, 12, 13, 13, 14, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 18, 18, 17, 18, 18, 18, 18, 18, 18, 19, 19, 18, 18, 18, 19, 19, 19, 20, 19, 19, 18, 19, 19, 20, 21, 21, 19, 19, 18, 6, 10, 12, 13, 14, 14, 14, 15, 15, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 18, 18, 19, 18, 18, 18, 19, 18, 18, 18, 19, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 20, 20, 19, 19, 19, 19, 20, 20, 19, 20, 19, 19, 19, 20, 20, 20, 19, 19, 18, 19, 7, 10, 12, 13, 14, 15, 15, 15, 16, 16, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 19, 18, 19, 19, 19, 20, 19, 18, 19, 19, 18, 18, 19, 19, 19, 18, 19, 19, 20, 19, 18, 20, 21, 20, 20, 19, 19, 21, 20, 21, 20, 20, 20, 19, 19, 20, 20, 21, 20, 19, 7, 11, 13, 14, 15, 15, 15, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 18, 19, 20, 19, 19, 20, 19, 19, 19, 19, 19, 19, 19, 19, 18, 18, 19, 20, 19, 19, 19, 20, 19, 19, 19, 20, 19, 20, 20, 21, 20, 20, 20, 21, 22, 20, 19, 20, 20, 21, 20, 21, 20, 19, 8, 11, 13, 14, 15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18, 18, 19, 18, 19, 19, 19, 19, 21, 19, 19, 21, 19, 20, 20, 20, 19, 18, 18, 8, 12, 14, 15, 16, 16, 16, 16, 17, 17, 17, 19, 18, 18, 19, 19, 20, 19, 18, 20, 19, 20, 20, 19, 19, 20, 20, 21, 21, 20, 19, 19, 19, 19, 19, 19, 20, 21, 20, 19, 19, 8, 12, 14, 15, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 19, 19, 20, 21, 20, 21, 19, 21, 20, 20, 20, 20, 21, 20, 19, 20, 19, 20, 20, 20, 19, 22, 21, 21, 19, 9, 12, 14, 15, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20, 19, 19, 19, 9, 13, 15, 16, 17, 17, 18, 18, 18, 19, 18, 20, 19, 20, 20, 20, 19, 9, 13, 15, 16, 17, 17, 18, 18, 18, 20, 18, 19, 20, 20, 20, 20, 19, 20, 19, 9, 13, 15, 16, 17, 18, 18, 18, 19, 19, 19, 19, 10, 14, 16, 17, 18, 18, 19, 19, 19, 19, 19, 10, 14, 16, 17, 18, 18, 18, 19, 19, 10, 14, 16, 17, 18, 18, 18, 19, 19, 20, 19, 10, 14, 16, 18, 18, 18, 19, 20, 19, 19, 10, 14, 17, 18, 18, 18, 10, 15, 17, 18, 19, 19, 21, 19, 11, 15, 17, 18, 18, 19, 19, 11, 15, 17, 18, 19, 19, 11, 15, 17, 18, 11, 15, 18, 19, 19, 11, 15, 18, 19, 19, 11, 16, 18, 19, 11, 15, 18, 19, 11, 16, 18, 12, 16, 18, 19, 12, 16, 19, 12, 16, 19, 19, 19, 12, 16, 19, 12, 16, 19, 19, 12, 16, 18, 12, 16, 19, 12, 17, 19, 12, 17, 19, 12, 17, 19, 12, 17, 19, 13, 17, 13, 17, 13, 17, 19, 19, 13, 17, 13, 17, 19, 13, 17, 13, 18, 19, 13, 17, 19, 13, 18, 13, 17, 13, 18, 13, 18, 13, 18, 13, 18, 13, 18, 13, 18, 14, 18, 19, 14, 18, 14, 18, 14, 18, 14, 18, 14, 19, 14, 19, 14, 18, 14, 18, 14, 18, 14, 19, 14, 14, 18, 14, 14, 19, 14, 18, 14, 19, 14, 19, 14, 15, 19, 15, 15, 15, 15, 19, 15, 19, 15, 15, 19, 15, 15, 19, 15, 19, 15, 19, 15, 19, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 15, 15, 15, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 16, 16, 16, 17, 17, 16, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 17, 17, 17, 17, 17, 17, 17, 17, 18, 17, 17, 18, 17, 17, 17, 17, 18, 18, 17, 17, 17, 17, 17, 17, 17, 18, 17, 18, 18, 17, 17, 17, 18, 18, 18, 17, 18, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 18, 18, 18, 18, 19, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 18, 18, 19, 18, 18, 18, 19, 18, 19, 18, 18, 19, 18, 18, 19, 19, 19, 19, 19, 18, 19, 18, 19, 18, 19, 19, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 18, 19, 19, 19, 19, 19, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 20, 19, 19, 19, 19, 21, 19, 19, 20, 19, 20, 19, 19, 19, 19, 19, 20, 20, 20, 19, 19, 19, 20, 19, 19, 19, 20, 20, 19, 20, 19, 19, 21, 20, 20, 19, 19, 19, 19, 19, 19, 20, 19, 20, 20, 20, 20, 20, 20, 20, 19, 19, 21, 20, 20, 19, 19, }; static const uint32_t coef3_huffcodes[1072] = { 0x001b2, 0x00069, 0x00000, 0x00004, 0x00006, 0x0000e, 0x00014, 0x00019, 0x00016, 0x0002b, 0x00030, 0x0003d, 0x0003c, 0x0005a, 0x0005f, 0x0006d, 0x0007e, 0x0005f, 0x0007f, 0x000b6, 0x000bc, 0x000d8, 0x000f2, 0x000fe, 0x000bc, 0x000fc, 0x00161, 0x0016e, 0x00174, 0x00176, 0x001a2, 0x001e3, 0x001f3, 0x00174, 0x0017a, 0x001ea, 0x002a8, 0x002c4, 0x002e6, 0x00314, 0x00346, 0x00367, 0x003e9, 0x002e5, 0x002ee, 0x003d6, 0x00555, 0x00554, 0x00557, 0x005c3, 0x005d6, 0x006e0, 0x0062f, 0x006e2, 0x00799, 0x00789, 0x007fa, 0x005ce, 0x007fe, 0x005ec, 0x007cc, 0x007af, 0x00aa7, 0x00b19, 0x00b94, 0x00b85, 0x00b9f, 0x00c48, 0x00c45, 0x00dd8, 0x00c4c, 0x00c4b, 0x00d99, 0x00d1f, 0x00dc2, 0x00f95, 0x00fa2, 0x00bb5, 0x00b9f, 0x00f5d, 0x00bbf, 0x00f47, 0x0154a, 0x00fd5, 0x00f45, 0x00f7f, 0x0160d, 0x01889, 0x01757, 0x01722, 0x018b3, 0x0172d, 0x01a39, 0x01a18, 0x01bb3, 0x01b30, 0x01e63, 0x0173c, 0x01b35, 0x01723, 0x01e80, 0x01fee, 0x01761, 0x01ffc, 0x01f7f, 0x02c7c, 0x01fa1, 0x0177b, 0x01755, 0x0175a, 0x01fa6, 0x02eab, 0x0310a, 0x02c69, 0x03669, 0x03127, 0x03103, 0x02e43, 0x03662, 0x03165, 0x03124, 0x0313b, 0x03111, 0x03668, 0x0343b, 0x03c52, 0x03efc, 0x02e6c, 0x03fda, 0x03ef8, 0x02e7b, 0x03ee2, 0x03cc5, 0x03d72, 0x058c0, 0x03df8, 0x02ea9, 0x03e7e, 0x0556d, 0x05c82, 0x03d71, 0x03e7b, 0x03c42, 0x058d7, 0x03f4e, 0x06200, 0x03d70, 0x05cb2, 0x05c96, 0x05cb0, 0x03f45, 0x05cb1, 0x02e6d, 0x03110, 0x02f68, 0x05c90, 0x07ca6, 0x07c88, 0x06204, 0x062c8, 0x078a6, 0x07986, 0x079d5, 0x0b1ad, 0x07989, 0x0b079, 0x05cdd, 0x0aad4, 0x05de8, 0x07dcd, 0x07987, 0x05d67, 0x05d99, 0x0b91d, 0x07cf1, 0x05d9b, 0x079d7, 0x0b07b, 0x05c85, 0x05d9a, 0x07dcc, 0x07ebf, 0x07dce, 0x07dfb, 0x07ec0, 0x07d1a, 0x07a07, 0x05c84, 0x0c471, 0x07cf2, 0x0baef, 0x0b9d2, 0x05deb, 0x07bd6, 0x0b845, 0x05d98, 0x0b91a, 0x0bae8, 0x0c4e0, 0x0dc31, 0x0f93d, 0x0bbce, 0x0d1d2, 0x0f7a9, 0x0d9b9, 0x0bbcb, 0x0b900, 0x0aad7, 0x0babd, 0x0c4e1, 0x0f46f, 0x0c588, 0x0c58b, 0x160e6, 0x0bbcf, 0x0bac3, 0x0f945, 0x0f7a3, 0x0d1c1, 0x0fb8e, 0x0f7a4, 0x0fb8c, 0x0f40c, 0x0c473, 0x0fd72, 0x0bbcd, 0x0fffa, 0x0f940, 0x0bbc9, 0x0f7a8, 0x1a1ed, 0x0bbc5, 0x1f26f, 0x163fd, 0x160c7, 0x1a1f5, 0x0f947, 0x163fc, 0x154b3, 0x0fff6, 0x163f6, 0x160e9, 0x1a1f0, 0x0bab9, 0x0baba, 0x17086, 0x0b903, 0x0fd75, 0x0f308, 0x176f3, 0x163ff, 0x0fd7d, 0x1bb78, 0x163fb, 0x188db, 0x1a1f7, 0x154b2, 0x172fd, 0x163f4, 0x1bb73, 0x172ff, 0x0babc, 0x0f97d, 0x1a1f3, 0x1bb6d, 0x1ffd5, 0x1a1f4, 0x1f272, 0x17380, 0x17382, 0x1ffe7, 0x0bac8, 0x0bbc4, 0x188d3, 0x160e0, 0x0fd7b, 0x1725f, 0x172f5, 0x1bb79, 0x1fad9, 0x1f269, 0x188d0, 0x0bac4, 0x0bac5, 0x31185, 0x188d2, 0x188cc, 0x31187, 0x3e7fe, 0x188d1, 0x1bb6c, 0x1f268, 0x1fad2, 0x1ffd9, 0x1a1ea, 0x1bb68, 0x1facb, 0x3fdb2, 0x1e81a, 0x188ce, 0x172fb, 0x1a1ef, 0x1face, 0x1bb70, 0x0bac1, 0x1bb6b, 0x172f8, 0x1bb66, 0x1ffdf, 0x1bb6a, 0x1ffd7, 0x1f266, 0x176f8, 0x37653, 0x1fa7e, 0x31182, 0x1fac8, 0x2c7e3, 0x370ee, 0x176ec, 0x176e9, 0x2e4bc, 0x160c5, 0x3765a, 0x3ce9c, 0x17373, 0x176e8, 0x188d4, 0x176f1, 0x176ef, 0x37659, 0x1bb7c, 0x1ffde, 0x176f2, 0x3118b, 0x2c7d4, 0x37651, 0x5ce9f, 0x37650, 0x31191, 0x3f4f6, 0x3f4f5, 0x7a06c, 0x1fac1, 0x5c97b, 0x2c7e0, 0x79d3a, 0x3e7fd, 0x2c7df, 0x3f4f0, 0x7a06d, 0x376c1, 0x79d3b, 0x00004, 0x00014, 0x00059, 0x000ab, 0x000b8, 0x00177, 0x001f5, 0x001f2, 0x00315, 0x003fc, 0x005bd, 0x0062d, 0x006e8, 0x007dd, 0x00b04, 0x007cd, 0x00b1e, 0x00d1e, 0x00f15, 0x00f3b, 0x00f41, 0x01548, 0x018b0, 0x0173b, 0x01884, 0x01a1c, 0x01bb4, 0x01f25, 0x017b5, 0x0176d, 0x01ef8, 0x02e73, 0x03107, 0x03125, 0x03105, 0x02e49, 0x03ce8, 0x03ef9, 0x03e5e, 0x02e72, 0x03471, 0x03fd9, 0x0623f, 0x078a0, 0x06867, 0x05cb3, 0x06272, 0x068ec, 0x06e9a, 0x079d4, 0x06e98, 0x0b1aa, 0x06e1a, 0x07985, 0x068ee, 0x06e9b, 0x05c88, 0x0b1ac, 0x07dfa, 0x05d65, 0x07cf0, 0x07cbf, 0x0c475, 0x160eb, 0x1bb7e, 0x0f7a6, 0x1fedd, 0x160e3, 0x0fffb, 0x0fb8d, 0x0fff9, 0x0d1c0, 0x0c58c, 0x1a1e9, 0x0bab8, 0x0f5cf, 0x0fff5, 0x376c5, 0x1a1ec, 0x160ed, 0x1fede, 0x1fac9, 0x1a1eb, 0x1f224, 0x176ee, 0x0fd79, 0x17080, 0x17387, 0x1bb7a, 0x1ffe9, 0x176f7, 0x17385, 0x17781, 0x2c7d5, 0x17785, 0x1ffe3, 0x163f5, 0x1fac2, 0x3e7f9, 0x3118d, 0x3fdb1, 0x1ffe2, 0x1f226, 0x3118a, 0x2c7d9, 0x31190, 0x3118c, 0x3f4f3, 0x1bb7f, 0x1bb72, 0x31184, 0xb92f4, 0x3e7fb, 0x6e1d9, 0x1faca, 0x62300, 0x3fdb8, 0x3d037, 0x3e7fc, 0x62301, 0x3f4f2, 0x1f26a, 0x0000e, 0x00063, 0x000f8, 0x001ee, 0x00377, 0x003f7, 0x006e3, 0x005cc, 0x00b05, 0x00dd2, 0x00fd4, 0x0172e, 0x0172a, 0x01e23, 0x01f2d, 0x01763, 0x01769, 0x0176c, 0x02e75, 0x03104, 0x02ec1, 0x03e58, 0x0583f, 0x03f62, 0x03f44, 0x058c5, 0x0623c, 0x05cf4, 0x07bd7, 0x05d9d, 0x0aad2, 0x05d66, 0x0b1a9, 0x0b078, 0x07cfe, 0x0b918, 0x0c46f, 0x0b919, 0x0b847, 0x06e1b, 0x0b84b, 0x0aad8, 0x0fd74, 0x172f4, 0x17081, 0x0f97c, 0x1f273, 0x0f7a0, 0x0fd7c, 0x172f7, 0x0fd7a, 0x1bb77, 0x172fe, 0x1f270, 0x0fd73, 0x1bb7b, 0x1a1bc, 0x1bb7d, 0x0bbc3, 0x172f6, 0x0baeb, 0x0fb8f, 0x3f4f4, 0x3fdb4, 0x376c8, 0x3e7fa, 0x1ffd0, 0x62303, 0xb92f5, 0x1f261, 0x31189, 0x3fdb5, 0x2c7db, 0x376c9, 0x1fad6, 0x1fad1, 0x00015, 0x000f0, 0x002e0, 0x0058e, 0x005d7, 0x00c4d, 0x00fa1, 0x00bdb, 0x01756, 0x01f70, 0x02c19, 0x0313c, 0x0370f, 0x03cc0, 0x02ea8, 0x058c6, 0x058c7, 0x02eb7, 0x058d0, 0x07d18, 0x0aa58, 0x0b848, 0x05d9e, 0x05d6c, 0x0b84c, 0x0c589, 0x0b901, 0x163f8, 0x0bac9, 0x0b9c5, 0x0f93c, 0x188d8, 0x0bbc7, 0x160ec, 0x0fd6f, 0x188d9, 0x160ea, 0x0f7a7, 0x0f944, 0x0baab, 0x0dc3a, 0x188cf, 0x176fb, 0x2c7d8, 0x2c7d7, 0x1bb75, 0x5ce9e, 0x62302, 0x370ed, 0x176f4, 0x1ffd1, 0x370ef, 0x3f4f8, 0x376c7, 0x1ffe1, 0x376c6, 0x176ff, 0x6e1d8, 0x176f6, 0x17087, 0x0f5cd, 0x00035, 0x001a0, 0x0058b, 0x00aac, 0x00b9a, 0x0175f, 0x01e22, 0x01e8c, 0x01fb2, 0x0310b, 0x058d1, 0x0552e, 0x05c27, 0x0686e, 0x07ca7, 0x0c474, 0x0dc33, 0x07bf2, 0x05de9, 0x07a35, 0x0baaa, 0x0b9eb, 0x0fb95, 0x0b9b8, 0x17381, 0x1f262, 0x188cd, 0x17088, 0x172fa, 0x0f7a2, 0x1fad3, 0x0bac0, 0x3765c, 0x1fedf, 0x1f225, 0x1fad4, 0x2c7da, 0x5ce9d, 0x3e7f8, 0x1e203, 0x188d7, 0x00054, 0x002c0, 0x007a1, 0x00f78, 0x01b36, 0x01fa3, 0x0313a, 0x03436, 0x0343a, 0x07d1d, 0x07bd8, 0x05cdf, 0x0b846, 0x0b189, 0x0d9b8, 0x0fff8, 0x0d9be, 0x0c58a, 0x05dea, 0x0d1d3, 0x160e4, 0x1f26b, 0x188da, 0x1e202, 0x2c7d2, 0x163fe, 0x31193, 0x17782, 0x376c2, 0x2c7d1, 0x3fdb0, 0x3765d, 0x2c7d0, 0x1fad0, 0x1e201, 0x188dd, 0x2c7e2, 0x37657, 0x37655, 0x376c4, 0x376c0, 0x176ea, 0x0006f, 0x003cf, 0x00dd5, 0x01f23, 0x02c61, 0x02ed0, 0x05d54, 0x0552d, 0x07883, 0x0b1a8, 0x0b91c, 0x0babf, 0x0b902, 0x0f7aa, 0x0f7a5, 0x1a1e8, 0x1ffd6, 0x0babe, 0x1a1bf, 0x163f3, 0x1ffd8, 0x1fad7, 0x1f275, 0x1ffdc, 0x0007d, 0x005bc, 0x01549, 0x02a99, 0x03def, 0x06273, 0x079d6, 0x07d1b, 0x0aad3, 0x0d0fc, 0x2c7dd, 0x188d6, 0x0bac2, 0x2c7e1, 0x1bb76, 0x1a1bd, 0x31186, 0x0fd78, 0x1a1be, 0x31183, 0x3fdb6, 0x3f4f1, 0x37652, 0x1fad5, 0x3f4f9, 0x3e7ff, 0x5ce9c, 0x3765b, 0x31188, 0x17372, 0x000bd, 0x0078b, 0x01f21, 0x03c43, 0x03ded, 0x0aad6, 0x07ec1, 0x0f942, 0x05c86, 0x17089, 0x0babb, 0x1ffe8, 0x2c7de, 0x1f26e, 0x1fac4, 0x3f4f7, 0x37656, 0x1fa7d, 0x376c3, 0x3fdb3, 0x3118f, 0x1fac6, 0x000f8, 0x007ed, 0x01efd, 0x03e7a, 0x05c91, 0x0aad9, 0x0baec, 0x0dc32, 0x0f46e, 0x1e200, 0x176fa, 0x3765e, 0x3fdb7, 0x2c7d6, 0x3fdb9, 0x37654, 0x37658, 0x3118e, 0x1ffdb, 0x000f6, 0x00c43, 0x03106, 0x068ef, 0x0b84d, 0x0b188, 0x0bbcc, 0x1f264, 0x1bb69, 0x17386, 0x1fac0, 0x00171, 0x00f39, 0x03e41, 0x068ed, 0x0d9bc, 0x0f7a1, 0x1bb67, 0x1ffdd, 0x176f9, 0x001b9, 0x00f7d, 0x03f63, 0x0d0fd, 0x0b9ea, 0x188dc, 0x1fac3, 0x1a1f2, 0x31192, 0x1ffe4, 0x001f6, 0x01754, 0x06865, 0x0f309, 0x160e5, 0x176f5, 0x3765f, 0x1facc, 0x001e9, 0x01a1a, 0x06201, 0x0f105, 0x176f0, 0x002df, 0x01756, 0x05d6d, 0x163fa, 0x176ed, 0x00342, 0x02e40, 0x0d0ff, 0x17082, 0x003cd, 0x02a98, 0x0fffc, 0x2c7dc, 0x1fa7f, 0x003fe, 0x03764, 0x0fffd, 0x176fc, 0x1fac5, 0x002f7, 0x02ed1, 0x0fb97, 0x0058a, 0x02edc, 0x0bbc8, 0x005d4, 0x0623d, 0x160e8, 0x0062e, 0x05830, 0x163f9, 0x006eb, 0x06205, 0x1f274, 0x007de, 0x062c9, 0x1f265, 0x005c9, 0x05cde, 0x1ffd3, 0x005d4, 0x07988, 0x007ce, 0x0b849, 0x00b1b, 0x05c89, 0x1fac7, 0x00b93, 0x05c83, 0x00b9e, 0x0f14f, 0x00c4a, 0x0b9c7, 0x00dd4, 0x0c470, 0x1f271, 0x00f38, 0x0fb96, 0x176eb, 0x00fa0, 0x163f7, 0x00bb2, 0x0b91b, 0x00bbe, 0x0f102, 0x00f44, 0x0f946, 0x1facd, 0x00f79, 0x0d9bd, 0x0154d, 0x0bbc6, 0x00fd2, 0x160e7, 0x0172b, 0x188cb, 0x0175e, 0x0fd76, 0x0175c, 0x1bb71, 0x0189f, 0x1a1ee, 0x01f24, 0x1a1f6, 0x01ba7, 0x0bbca, 0x01f7d, 0x0ffff, 0x01f2e, 0x1bb65, 0x01bb5, 0x172f9, 0x01fef, 0x1f26c, 0x01f3e, 0x0fd77, 0x01762, 0x1bb6e, 0x01ef9, 0x172fc, 0x01fa0, 0x02ab7, 0x02e4a, 0x1f267, 0x01fb3, 0x1ffda, 0x02e42, 0x03101, 0x17780, 0x0313d, 0x03475, 0x17784, 0x03126, 0x1facf, 0x03c51, 0x17783, 0x03e40, 0x1ffe5, 0x03663, 0x1ffe0, 0x03e8f, 0x1f26d, 0x0343c, 0x03cc1, 0x176fd, 0x03e45, 0x02ec0, 0x03f61, 0x03dee, 0x03fd8, 0x0583e, 0x02e45, 0x03e59, 0x03d02, 0x05ce8, 0x05568, 0x176fe, 0x02f69, 0x1fad8, 0x058c1, 0x05c83, 0x1ffe6, 0x06271, 0x06e1c, 0x062c7, 0x068e1, 0x0552f, 0x06864, 0x06866, 0x06e99, 0x05cbc, 0x07ca5, 0x078a1, 0x05c82, 0x07dcf, 0x0623b, 0x0623e, 0x068e8, 0x07a36, 0x05d9c, 0x0b077, 0x07cf3, 0x07a34, 0x07ca4, 0x07d19, 0x079d2, 0x07d1c, 0x07bd9, 0x0b84a, 0x0fb94, 0x0aad5, 0x0dc30, 0x07bf3, 0x0baee, 0x0b07a, 0x0c472, 0x0b91e, 0x0d9ba, 0x05d9f, 0x0d0fe, 0x0b9c6, 0x05c87, 0x0f14e, 0x0baed, 0x0b92e, 0x0f103, 0x0b9c4, 0x0fb91, 0x0d9bb, 0x0b1ab, 0x0c58d, 0x0fffe, 0x0f93b, 0x0f941, 0x0baea, 0x0b91f, 0x0f5cc, 0x0d9bf, 0x0f943, 0x0f104, 0x1f260, 0x0fb92, 0x0f93f, 0x0f3a6, 0x0bac7, 0x0f7ab, 0x0bac6, 0x17383, 0x0fd6d, 0x0bae9, 0x0fd6e, 0x1e74f, 0x188ca, 0x1f227, 0x0fb93, 0x0fb90, 0x0fff7, 0x17085, 0x17083, 0x160e1, 0x17084, 0x0f93e, 0x160e2, 0x160c6, 0x1a1f1, 0x1bb6f, 0x17384, 0x0fd70, 0x1f263, 0x188d5, 0x173a6, 0x0f5ce, 0x163f2, 0x0fd71, 0x1ffd2, 0x160c4, 0x1ffd4, 0x2c7d3, 0x1bb74, }; static const uint8_t coef3_huffbits[1072] = { 9, 7, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 13, 14, 14, 13, 14, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 14, 14, 15, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 15, 16, 16, 16, 16, 15, 15, 16, 16, 16, 16, 16, 15, 16, 16, 16, 15, 16, 15, 15, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 16, 17, 16, 17, 17, 16, 17, 16, 17, 16, 16, 17, 17, 17, 16, 17, 16, 16, 17, 16, 17, 16, 17, 17, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 16, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 17, 16, 18, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 17, 17, 17, 17, 18, 17, 17, 18, 19, 17, 17, 17, 18, 17, 17, 17, 18, 18, 18, 17, 17, 17, 18, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 18, 18, 18, 18, 17, 18, 18, 18, 17, 17, 18, 18, 18, 18, 19, 18, 18, 19, 19, 20, 18, 19, 18, 19, 19, 18, 19, 20, 18, 19, 4, 6, 7, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 15, 15, 15, 15, 16, 16, 15, 16, 16, 15, 16, 17, 17, 17, 17, 17, 16, 16, 16, 16, 16, 17, 17, 17, 16, 18, 17, 17, 17, 18, 17, 17, 18, 17, 17, 17, 17, 17, 18, 17, 18, 18, 18, 17, 17, 18, 19, 18, 18, 17, 17, 18, 18, 18, 18, 19, 17, 17, 18, 20, 19, 19, 18, 19, 18, 19, 19, 19, 19, 17, 5, 7, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 14, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 16, 16, 17, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 16, 16, 19, 18, 18, 19, 17, 19, 20, 17, 18, 18, 18, 18, 18, 18, 6, 8, 10, 11, 12, 12, 12, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 16, 16, 17, 17, 17, 17, 17, 17, 17, 16, 16, 16, 17, 18, 18, 18, 17, 19, 19, 18, 18, 17, 18, 19, 18, 17, 18, 18, 19, 18, 17, 17, 6, 9, 11, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 17, 16, 17, 17, 17, 17, 17, 17, 17, 18, 17, 18, 17, 17, 18, 18, 19, 19, 17, 17, 7, 10, 12, 13, 13, 14, 14, 14, 14, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 17, 18, 18, 18, 18, 18, 18, 18, 18, 17, 17, 18, 18, 18, 18, 18, 18, 7, 10, 12, 13, 14, 15, 15, 15, 15, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 17, 17, 8, 11, 13, 14, 15, 15, 15, 15, 16, 16, 18, 17, 17, 18, 17, 17, 18, 17, 17, 18, 18, 19, 18, 18, 19, 19, 19, 18, 18, 18, 8, 11, 13, 14, 15, 16, 16, 16, 16, 17, 17, 17, 18, 17, 18, 19, 18, 18, 18, 18, 18, 18, 8, 12, 14, 15, 15, 16, 16, 16, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 17, 9, 12, 14, 15, 16, 16, 17, 17, 17, 17, 18, 9, 12, 14, 15, 16, 17, 17, 17, 18, 9, 13, 15, 16, 17, 17, 18, 17, 18, 17, 9, 13, 15, 16, 17, 18, 18, 18, 10, 13, 15, 16, 18, 10, 14, 16, 17, 18, 10, 14, 16, 17, 10, 14, 16, 18, 18, 10, 14, 16, 18, 18, 11, 15, 16, 11, 15, 17, 11, 15, 17, 11, 15, 17, 11, 15, 17, 11, 15, 17, 12, 16, 17, 12, 15, 12, 16, 12, 16, 18, 12, 16, 12, 16, 12, 16, 12, 16, 17, 12, 16, 18, 12, 17, 13, 16, 13, 16, 13, 16, 18, 13, 16, 13, 17, 13, 17, 13, 17, 13, 17, 13, 17, 13, 17, 13, 17, 13, 17, 13, 16, 13, 17, 13, 17, 13, 17, 14, 17, 14, 17, 14, 17, 14, 14, 14, 17, 14, 17, 14, 14, 18, 14, 14, 18, 14, 18, 14, 18, 14, 17, 14, 17, 14, 17, 14, 14, 18, 14, 15, 15, 15, 14, 15, 15, 14, 15, 15, 15, 18, 15, 18, 15, 15, 17, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 15, 15, 15, 15, 16, 16, 16, 16, 16, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 16, 16, 16, 17, 16, 16, 16, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 16, 16, 16, 17, 17, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 17, }; static const uint32_t coef4_huffcodes[476] = { 0x00f01, 0x0001e, 0x00000, 0x00004, 0x00006, 0x0000d, 0x0000a, 0x00017, 0x0001d, 0x00017, 0x0002c, 0x00031, 0x00039, 0x0003e, 0x00039, 0x0005a, 0x00066, 0x00070, 0x0007b, 0x00070, 0x00077, 0x000af, 0x000c9, 0x000f2, 0x000f4, 0x000b2, 0x000e3, 0x0015b, 0x0015d, 0x00181, 0x0019d, 0x001e3, 0x001c5, 0x002b5, 0x002db, 0x00338, 0x003c3, 0x003cc, 0x003f0, 0x002cd, 0x003fa, 0x003a1, 0x005b4, 0x00657, 0x007ab, 0x0074d, 0x0074c, 0x00ac1, 0x00ac5, 0x0076b, 0x00ca8, 0x00f04, 0x00f00, 0x00fe3, 0x00f3c, 0x00f10, 0x00f39, 0x00fe6, 0x00e26, 0x00e90, 0x016c5, 0x01827, 0x01954, 0x015c5, 0x01958, 0x01f8a, 0x01c4a, 0x02b0f, 0x02b41, 0x02b0e, 0x033c6, 0x03050, 0x01c4f, 0x02d88, 0x0305c, 0x03c18, 0x02b4f, 0x02cc2, 0x03a47, 0x05680, 0x0569d, 0x06442, 0x06443, 0x06446, 0x0656e, 0x06444, 0x07120, 0x0748a, 0x0c1ba, 0x07e22, 0x07aa6, 0x07f25, 0x07aa7, 0x07e20, 0x0c11b, 0x0c118, 0x07aa5, 0x0ad0a, 0x0f389, 0x19ebb, 0x0caad, 0x0fe42, 0x0fe40, 0x16c34, 0x2b4e5, 0x33d65, 0x16c30, 0x1e7ae, 0x1e25c, 0x18370, 0x1e703, 0x19eba, 0x16c37, 0x0e234, 0x16c6e, 0x00004, 0x0002a, 0x00061, 0x00075, 0x000cb, 0x000ff, 0x00190, 0x001eb, 0x001d1, 0x002b9, 0x00307, 0x00339, 0x0033f, 0x003fb, 0x003b4, 0x0060c, 0x00679, 0x00645, 0x0067d, 0x0078a, 0x007e3, 0x00749, 0x00ac4, 0x00ad2, 0x00ae3, 0x00c10, 0x00c16, 0x00ad1, 0x00cf4, 0x00fe2, 0x01586, 0x00e9d, 0x019f1, 0x01664, 0x01e26, 0x01d38, 0x02b4d, 0x033c5, 0x01fc2, 0x01fc3, 0x01d28, 0x03c1d, 0x0598e, 0x0f094, 0x07aa4, 0x0ad38, 0x0ac0c, 0x0c11a, 0x079ea, 0x0c881, 0x0fe44, 0x0b635, 0x0ac0d, 0x0b61e, 0x05987, 0x07121, 0x0f382, 0x0f387, 0x0e237, 0x0fe47, 0x0f383, 0x0f091, 0x0f385, 0x0e233, 0x182ee, 0x19eb8, 0x1663e, 0x0f093, 0x00014, 0x00058, 0x00159, 0x00167, 0x00300, 0x003d4, 0x005b5, 0x0079d, 0x0076a, 0x00b67, 0x00b60, 0x00f05, 0x00cf0, 0x00f17, 0x00e95, 0x01822, 0x01913, 0x016c2, 0x0182f, 0x01959, 0x01fcb, 0x01e27, 0x01c40, 0x033c7, 0x01e7b, 0x01c49, 0x02d89, 0x01e23, 0x01660, 0x03f12, 0x02cc6, 0x033e1, 0x05b34, 0x0609a, 0x06569, 0x07488, 0x07e21, 0x0cf5f, 0x0712c, 0x0389d, 0x067cf, 0x07f28, 0x1663f, 0x33d67, 0x1663d, 0x1e25d, 0x3c1ab, 0x15c44, 0x16c36, 0x0001f, 0x000ec, 0x00323, 0x005b2, 0x0079f, 0x00ac2, 0x00f16, 0x00e9e, 0x01956, 0x01e0f, 0x019ea, 0x01666, 0x02b89, 0x02b02, 0x02d8c, 0x03c1b, 0x03c19, 0x032b5, 0x03f9c, 0x02ccf, 0x03897, 0x05b35, 0x0ad02, 0x07f29, 0x06441, 0x03884, 0x07888, 0x0784e, 0x06568, 0x0c1bb, 0x05986, 0x067cc, 0x0fe49, 0x0fe48, 0x0c1bc, 0x0fe41, 0x18371, 0x1663c, 0x0e231, 0x0711e, 0x0ad09, 0x0f092, 0x0002d, 0x001db, 0x00781, 0x00c1a, 0x00f55, 0x01580, 0x01ea8, 0x02d9b, 0x032af, 0x03f16, 0x03c1c, 0x07834, 0x03c45, 0x0389c, 0x067ce, 0x06445, 0x0c1b9, 0x07889, 0x07f3a, 0x0784f, 0x07f2b, 0x0ad0b, 0x0f090, 0x0c11d, 0x0e94e, 0x0711f, 0x0e9f1, 0x0f38e, 0x079e9, 0x0ad03, 0x0f09b, 0x0caae, 0x0fe46, 0x2b4e6, 0x0e9f0, 0x19eb6, 0x67ac1, 0x67ac0, 0x33d66, 0x0f388, 0x00071, 0x003a0, 0x00ca9, 0x01829, 0x01d39, 0x02b43, 0x02cc4, 0x06554, 0x0f09a, 0x0b61f, 0x067cd, 0x0711c, 0x0b636, 0x07f2a, 0x0b634, 0x0c11f, 0x0cf5e, 0x0b61d, 0x0f06b, 0x0caab, 0x0c1be, 0x0e94c, 0x0f099, 0x182ed, 0x0e94f, 0x0c119, 0x0e232, 0x2b4e4, 0x0f38a, 0x19eb4, 0x1e25f, 0x0e94d, 0x000b7, 0x00785, 0x016cc, 0x03051, 0x033c4, 0x0656f, 0x03891, 0x0711d, 0x0caaf, 0x0f097, 0x07489, 0x0f098, 0x0c880, 0x0caaa, 0x0f386, 0x19eb7, 0x16c6f, 0x0f384, 0x182e8, 0x182e9, 0x0e230, 0x1e700, 0x33d62, 0x33d63, 0x33d64, 0x16c33, 0x0e216, 0x000fd, 0x00c15, 0x01665, 0x03c4a, 0x07f3b, 0x07896, 0x0c11c, 0x0e215, 0x16c32, 0x0f38b, 0x0f38d, 0x182ea, 0x1e701, 0x712df, 0x15c46, 0x00194, 0x00fe0, 0x03f13, 0x0748b, 0x0f096, 0x0cf80, 0x1e25e, 0xe25bd, 0x33d61, 0x16c31, 0x001f9, 0x01912, 0x05710, 0x0f3d0, 0x0c1bf, 0x00301, 0x01e24, 0x0ad08, 0x003cd, 0x01c41, 0x0c1bd, 0x00563, 0x03a52, 0x0f3d1, 0x00570, 0x02cce, 0x0e217, 0x0067b, 0x0655d, 0x0074b, 0x06447, 0x00c12, 0x074fb, 0x00f08, 0x0b61c, 0x00e22, 0x0fe43, 0x016c7, 0x01836, 0x019f2, 0x01c43, 0x01d3f, 0x01fcf, 0x02b4c, 0x0304c, 0x032b6, 0x03a46, 0x05607, 0x03f17, 0x02cc5, 0x0609b, 0x0655c, 0x07e23, 0x067c1, 0x07f26, 0x07f27, 0x0f095, 0x0e9f3, 0x0cf81, 0x0c11e, 0x0caac, 0x0f38f, 0x0e9f2, 0x074fa, 0x0e236, 0x0fe45, 0x1c428, 0x0e235, 0x182ef, 0x19eb5, 0x0f3d6, 0x182ec, 0x16c35, 0x0f38c, 0x2b4e7, 0x15c47, 0xe25bc, 0x1e702, 0x1c4b6, 0x0e25a, 0x3c1aa, 0x15c45, 0x1c429, 0x19eb9, 0x1e7af, 0x182eb, 0x1e0d4, 0x3896e, }; static const uint8_t coef4_huffbits[476] = { 12, 6, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 11, 10, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 15, 15, 15, 15, 15, 16, 16, 15, 16, 16, 17, 16, 16, 16, 17, 18, 18, 17, 17, 17, 17, 17, 17, 17, 17, 17, 4, 6, 7, 8, 8, 8, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 14, 13, 14, 14, 14, 13, 13, 14, 14, 16, 16, 15, 16, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 17, 17, 17, 18, 16, 5, 8, 9, 10, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 13, 14, 14, 13, 14, 14, 15, 14, 15, 15, 15, 16, 15, 16, 16, 15, 15, 15, 18, 18, 18, 17, 18, 17, 17, 6, 9, 10, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 16, 15, 15, 15, 15, 15, 15, 16, 16, 15, 16, 16, 16, 16, 17, 18, 17, 16, 16, 16, 7, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 15, 14, 15, 15, 15, 16, 15, 15, 15, 15, 16, 16, 16, 17, 16, 17, 16, 15, 16, 16, 16, 16, 18, 17, 17, 19, 19, 18, 16, 7, 11, 12, 13, 14, 14, 15, 15, 16, 16, 15, 16, 16, 15, 16, 16, 16, 16, 16, 16, 16, 17, 16, 17, 17, 16, 17, 18, 16, 17, 17, 17, 8, 11, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 16, 17, 17, 17, 17, 18, 18, 18, 17, 17, 8, 12, 14, 14, 15, 15, 16, 17, 17, 16, 16, 17, 17, 20, 17, 9, 12, 14, 16, 16, 16, 17, 21, 18, 17, 9, 13, 15, 16, 16, 10, 13, 16, 10, 14, 16, 11, 15, 16, 11, 15, 17, 11, 15, 12, 15, 12, 16, 12, 16, 13, 16, 13, 13, 13, 14, 14, 13, 14, 14, 14, 15, 15, 14, 15, 15, 15, 15, 15, 15, 15, 16, 17, 16, 16, 16, 16, 17, 16, 17, 16, 18, 17, 17, 17, 16, 17, 17, 16, 18, 17, 21, 17, 18, 17, 18, 17, 18, 17, 17, 17, 17, 19, }; static const uint32_t coef5_huffcodes[435] = { 0x00347, 0x0000b, 0x00001, 0x00001, 0x0000c, 0x00004, 0x00010, 0x00015, 0x0001f, 0x0000b, 0x00023, 0x00026, 0x00029, 0x00035, 0x00037, 0x00001, 0x00015, 0x0001a, 0x0001d, 0x0001c, 0x0001e, 0x0004e, 0x00049, 0x00051, 0x00078, 0x00004, 0x00000, 0x00008, 0x0000d, 0x0007b, 0x00005, 0x00032, 0x00095, 0x00091, 0x00096, 0x000a1, 0x000d9, 0x00003, 0x00019, 0x00061, 0x00066, 0x00060, 0x00017, 0x0000e, 0x00063, 0x001a0, 0x001b7, 0x001e6, 0x001e7, 0x001b6, 0x00018, 0x001e8, 0x00038, 0x00031, 0x00005, 0x0003d, 0x00027, 0x001ea, 0x0001a, 0x000c5, 0x000f9, 0x000ff, 0x000db, 0x00250, 0x000fc, 0x0025c, 0x00008, 0x00075, 0x003d7, 0x003d3, 0x001b0, 0x0007c, 0x003ca, 0x00036, 0x00189, 0x004a6, 0x004a2, 0x004fb, 0x000c0, 0x0007f, 0x0009a, 0x00311, 0x0006e, 0x0009b, 0x0068c, 0x006c0, 0x00484, 0x00012, 0x000c3, 0x0094f, 0x00979, 0x009f9, 0x00d09, 0x00da6, 0x00da8, 0x00901, 0x000c1, 0x00373, 0x00d08, 0x009fa, 0x00d8b, 0x00d85, 0x00d86, 0x000df, 0x006e2, 0x000ce, 0x00f24, 0x009fe, 0x001f7, 0x007c1, 0x000cf, 0x009fc, 0x009ff, 0x00d89, 0x00da9, 0x009fd, 0x001f8, 0x01a36, 0x0128c, 0x0129d, 0x01a37, 0x00196, 0x003ea, 0x00f8b, 0x00d93, 0x01e45, 0x01e58, 0x01e4b, 0x01e59, 0x013f1, 0x00309, 0x00265, 0x00308, 0x0243a, 0x027e1, 0x00f89, 0x00324, 0x03cbc, 0x03c86, 0x03695, 0x0243c, 0x0243b, 0x0243e, 0x01e4a, 0x003a5, 0x03468, 0x03428, 0x03c84, 0x027e0, 0x025e2, 0x01880, 0x00197, 0x00325, 0x03cb7, 0x0791e, 0x007ec, 0x06c75, 0x004c8, 0x04bc7, 0x004c6, 0x00983, 0x0481e, 0x01b53, 0x0251b, 0x01b58, 0x00984, 0x04fa8, 0x03cbb, 0x00f8a, 0x00322, 0x0346a, 0x0243d, 0x00326, 0x03469, 0x0481f, 0x0481d, 0x00746, 0x09032, 0x01b50, 0x01d13, 0x0d8e4, 0x0481b, 0x06c74, 0x0796b, 0x07969, 0x00985, 0x0d8e3, 0x00986, 0x00fa2, 0x01301, 0x06c7c, 0x00987, 0x03cb8, 0x0f4af, 0x00e88, 0x1b1c0, 0x00fce, 0x033eb, 0x03f6a, 0x03f69, 0x00fcf, 0x0791f, 0x004c9, 0x04871, 0x00fcd, 0x00982, 0x00fcc, 0x00fa3, 0x01d12, 0x0796c, 0x01b47, 0x00321, 0x0796a, 0x0d8e2, 0x04872, 0x04873, 0x0000e, 0x00014, 0x0000a, 0x000a0, 0x00012, 0x0007d, 0x001a2, 0x0003b, 0x0025f, 0x000dd, 0x0027c, 0x00343, 0x00368, 0x0036b, 0x0003e, 0x001fa, 0x00485, 0x001b3, 0x0007f, 0x001b1, 0x0019e, 0x004ba, 0x007ad, 0x00339, 0x00066, 0x007a4, 0x00793, 0x006c6, 0x0007e, 0x000f1, 0x00372, 0x009fb, 0x00d83, 0x00d8a, 0x00947, 0x009f4, 0x001d0, 0x01b09, 0x01b4b, 0x007ec, 0x003e1, 0x000ca, 0x003ec, 0x02539, 0x04fa9, 0x01b57, 0x03429, 0x03d2a, 0x00d97, 0x003a7, 0x00dc0, 0x00d96, 0x00dc1, 0x007eb, 0x03cba, 0x00c43, 0x00c41, 0x01b52, 0x007ef, 0x00323, 0x03cb9, 0x03c83, 0x007d0, 0x007ed, 0x06c7f, 0x09033, 0x03f6c, 0x36383, 0x1e95d, 0x06c78, 0x00747, 0x01b51, 0x00022, 0x00016, 0x00039, 0x00252, 0x00079, 0x00486, 0x00338, 0x00369, 0x00d88, 0x00026, 0x00d87, 0x00f4b, 0x00d82, 0x00027, 0x001e1, 0x01a15, 0x007c7, 0x012f0, 0x001e0, 0x006d0, 0x01a16, 0x01e44, 0x01e5f, 0x03690, 0x00d90, 0x00c42, 0x00daf, 0x00d92, 0x00f80, 0x00cfb, 0x0342f, 0x0487f, 0x01b46, 0x07968, 0x00d95, 0x00d91, 0x01b55, 0x03f68, 0x04bc6, 0x03cbd, 0x00f81, 0x00320, 0x00069, 0x000fe, 0x006d5, 0x0033f, 0x000de, 0x007c6, 0x01e40, 0x00d94, 0x00f88, 0x03c8e, 0x03694, 0x00dae, 0x00dad, 0x00267, 0x003a6, 0x00327, 0x0487e, 0x007ee, 0x00749, 0x004c7, 0x03692, 0x01b56, 0x00fd1, 0x07a56, 0x06c77, 0x09031, 0x00748, 0x06c7a, 0x0796d, 0x033ea, 0x06c76, 0x00fd0, 0x36382, 0x1e417, 0x00745, 0x04faf, 0x0d8e1, 0x03f6b, 0x1e95c, 0x04fad, 0x0009e, 0x004bd, 0x0067c, 0x01b08, 0x003eb, 0x01b45, 0x03691, 0x0d8e5, 0x07904, 0x00981, 0x007ea, 0x019f4, 0x06c7d, 0x04fab, 0x04fac, 0x06c7e, 0x01300, 0x06c7b, 0x0006f, 0x003f7, 0x03c85, 0x004c4, 0x0001e, 0x006e1, 0x03693, 0x01b44, 0x00241, 0x01e46, 0x0019d, 0x00266, 0x004bb, 0x02538, 0x007ac, 0x01b54, 0x00902, 0x04870, 0x00da7, 0x00900, 0x00185, 0x06c79, 0x006e3, 0x003e9, 0x01e94, 0x003ed, 0x003f2, 0x0342e, 0x0346b, 0x0251a, 0x004c5, 0x01881, 0x0481c, 0x01b59, 0x03c87, 0x04fae, 0x007e9, 0x03f6d, 0x0f20a, 0x09030, 0x04faa, 0x0d8e6, 0x03f6f, 0x0481a, 0x03f6e, 0x1e416, 0x0d8e7, }; static const uint8_t coef5_huffbits[435] = { 10, 4, 2, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 9, 10, 10, 10, 10, 10, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 10, 10, 11, 11, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 12, 12, 13, 13, 13, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 15, 14, 14, 14, 14, 14, 14, 13, 14, 14, 14, 14, 14, 14, 15, 14, 15, 14, 15, 15, 15, 15, 15, 15, 16, 15, 15, 14, 15, 16, 15, 14, 14, 15, 14, 14, 15, 14, 15, 15, 15, 16, 15, 17, 16, 15, 15, 15, 15, 16, 16, 16, 16, 17, 15, 16, 14, 16, 16, 17, 16, 16, 16, 16, 16, 15, 15, 15, 16, 16, 16, 16, 17, 15, 15, 15, 15, 16, 15, 15, 4, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 12, 13, 14, 14, 15, 15, 14, 14, 14, 14, 14, 14, 14, 15, 14, 14, 14, 15, 15, 15, 14, 14, 15, 15, 15, 16, 16, 18, 17, 15, 15, 15, 6, 9, 10, 10, 11, 11, 12, 12, 12, 13, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 14, 14, 15, 16, 15, 14, 14, 15, 7, 10, 11, 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 14, 15, 16, 15, 15, 16, 15, 15, 15, 16, 15, 16, 18, 17, 15, 15, 16, 16, 17, 15, 8, 11, 13, 13, 14, 15, 14, 16, 15, 16, 15, 15, 15, 15, 15, 15, 17, 15, 9, 12, 14, 15, 10, 13, 14, 15, 10, 13, 11, 14, 11, 14, 11, 15, 12, 15, 12, 12, 13, 15, 13, 14, 13, 14, 14, 14, 14, 14, 15, 15, 15, 15, 14, 15, 15, 16, 16, 16, 15, 16, 16, 15, 16, 17, 16, }; static const uint16_t levels0[60] = { 317, 92, 62, 60, 19, 17, 10, 7, 6, 5, 5, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; static const uint16_t levels1[40] = { 311, 91, 61, 28, 10, 6, 5, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; static const uint16_t levels2[340] = { 181,110, 78, 63, 61, 62, 60, 61, 33, 41, 41, 19, 17, 19, 12, 11, 9, 11, 10, 6, 8, 7, 6, 4, 5, 5, 4, 4, 3, 4, 3, 5, 3, 4, 3, 3, 3, 3, 3, 3, 2, 2, 4, 2, 3, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 2, 1, 2, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; static const uint16_t levels3[180] = { 351,122, 76, 61, 41, 42, 24, 30, 22, 19, 11, 9, 10, 8, 5, 5, 4, 5, 5, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 2, 2, 2, 3, 3, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; static const uint16_t levels4[70] = { 113, 68, 49, 42, 40, 32, 27, 15, 10, 5, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; static const uint16_t levels5[40] = { 214, 72, 42, 40, 18, 4, 4, 2, 2, 2, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; static const CoefVLCTable coef_vlcs[6] = { { sizeof(coef0_huffbits), sizeof(levels0)/2, coef0_huffcodes, coef0_huffbits, levels0, }, { sizeof(coef1_huffbits), sizeof(levels1)/2, coef1_huffcodes, coef1_huffbits, levels1, }, { sizeof(coef2_huffbits), sizeof(levels2)/2, coef2_huffcodes, coef2_huffbits, levels2, }, { sizeof(coef3_huffbits), sizeof(levels3)/2, coef3_huffcodes, coef3_huffbits, levels3, }, { sizeof(coef4_huffbits), sizeof(levels4)/2, coef4_huffcodes, coef4_huffbits, levels4, }, { sizeof(coef5_huffbits), sizeof(levels5)/2, coef5_huffcodes, coef5_huffbits, levels5, }, }; #endif /* AVCODEC_WMADATA_H */
123linslouis-android-video-cutter
jni/libavcodec/wmadata.h
C
asf20
69,219
/* * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org> * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdint.h> #include "avcodec.h" #include "vp56dsp.h" /* Gives very similar result than the vp6 version except in a few cases */ static int vp5_adjust(int v, int t) { int s2, s1 = v >> 31; v ^= s1; v -= s1; v *= v < 2*t; v -= t; s2 = v >> 31; v ^= s2; v -= s2; v = t - v; v += s1; v ^= s1; return v; } static int vp6_adjust(int v, int t) { int V = v, s = v >> 31; V ^= s; V -= s; if (V-t-1 >= (unsigned)(t-1)) return v; V = 2*t - V; V += s; V ^= s; return V; } #define VP56_EDGE_FILTER(pfx, suf, pix_inc, line_inc) \ static void pfx##_edge_filter_##suf(uint8_t *yuv, int stride, int t) \ { \ int pix2_inc = 2 * pix_inc; \ int i, v; \ \ for (i=0; i<12; i++) { \ v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4)>>3;\ v = pfx##_adjust(v, t); \ yuv[-pix_inc] = av_clip_uint8(yuv[-pix_inc] + v); \ yuv[0] = av_clip_uint8(yuv[0] - v); \ yuv += line_inc; \ } \ } VP56_EDGE_FILTER(vp5, hor, 1, stride) VP56_EDGE_FILTER(vp5, ver, stride, 1) VP56_EDGE_FILTER(vp6, hor, 1, stride) VP56_EDGE_FILTER(vp6, ver, stride, 1) void ff_vp56dsp_init(VP56DSPContext *s, enum CodecID codec) { if (codec == CODEC_ID_VP5) { s->edge_filter_hor = vp5_edge_filter_hor; s->edge_filter_ver = vp5_edge_filter_ver; } else { s->edge_filter_hor = vp6_edge_filter_hor; s->edge_filter_ver = vp6_edge_filter_ver; } if (ARCH_ARM) ff_vp56dsp_init_arm(s, codec); }
123linslouis-android-video-cutter
jni/libavcodec/vp56dsp.c
C
asf20
2,928
/** * LPC utility code * Copyright (c) 2006 Justin Ruggles <justin.ruggles@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_LPC_H #define AVCODEC_LPC_H #include <stdint.h> #include "dsputil.h" #define ORDER_METHOD_EST 0 #define ORDER_METHOD_2LEVEL 1 #define ORDER_METHOD_4LEVEL 2 #define ORDER_METHOD_8LEVEL 3 #define ORDER_METHOD_SEARCH 4 #define ORDER_METHOD_LOG 5 #define MIN_LPC_ORDER 1 #define MAX_LPC_ORDER 32 /** * Calculate LPC coefficients for multiple orders */ int ff_lpc_calc_coefs(DSPContext *s, const int32_t *samples, int blocksize, int min_order, int max_order, int precision, int32_t coefs[][MAX_LPC_ORDER], int *shift, int use_lpc, int omethod, int max_shift, int zero_shift); void ff_lpc_compute_autocorr(const int32_t *data, int len, int lag, double *autoc); #ifdef LPC_USE_DOUBLE #define LPC_TYPE double #else #define LPC_TYPE float #endif /** * Levinson-Durbin recursion. * Produces LPC coefficients from autocorrelation data. */ static inline int compute_lpc_coefs(const LPC_TYPE *autoc, int max_order, LPC_TYPE *lpc, int lpc_stride, int fail, int normalize) { int i, j; LPC_TYPE err; LPC_TYPE *lpc_last = lpc; if (normalize) err = *autoc++; if (fail && (autoc[max_order - 1] == 0 || err <= 0)) return -1; for(i=0; i<max_order; i++) { LPC_TYPE r = -autoc[i]; if (normalize) { for(j=0; j<i; j++) r -= lpc_last[j] * autoc[i-j-1]; r /= err; err *= 1.0 - (r * r); } lpc[i] = r; for(j=0; j < (i+1)>>1; j++) { LPC_TYPE f = lpc_last[ j]; LPC_TYPE b = lpc_last[i-1-j]; lpc[ j] = f + r * b; lpc[i-1-j] = b + r * f; } if (fail && err < 0) return -1; lpc_last = lpc; lpc += lpc_stride; } return 0; } #endif /* AVCODEC_LPC_H */
123linslouis-android-video-cutter
jni/libavcodec/lpc.h
C
asf20
2,860
/* * MJPEG A dump header bitstream filter * Copyright (c) 2006 Baptiste Coudurier * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MJPEG A dump header bitstream filter * modifies bitstream to be decoded by quicktime */ #include "avcodec.h" #include "bytestream.h" #include "mjpeg.h" static int mjpega_dump_header(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe) { uint8_t *poutbufp; unsigned dqt = 0, dht = 0, sof0 = 0; int i; if (avctx->codec_id != CODEC_ID_MJPEG) { av_log(avctx, AV_LOG_ERROR, "mjpega bitstream filter only applies to mjpeg codec\n"); return 0; } *poutbuf_size = 0; *poutbuf = av_malloc(buf_size + 44 + FF_INPUT_BUFFER_PADDING_SIZE); poutbufp = *poutbuf; bytestream_put_byte(&poutbufp, 0xff); bytestream_put_byte(&poutbufp, SOI); bytestream_put_byte(&poutbufp, 0xff); bytestream_put_byte(&poutbufp, APP1); bytestream_put_be16(&poutbufp, 42); /* size */ bytestream_put_be32(&poutbufp, 0); bytestream_put_buffer(&poutbufp, "mjpg", 4); bytestream_put_be32(&poutbufp, buf_size + 44); /* field size */ bytestream_put_be32(&poutbufp, buf_size + 44); /* pad field size */ bytestream_put_be32(&poutbufp, 0); /* next ptr */ for (i = 0; i < buf_size - 1; i++) { if (buf[i] == 0xff) { switch (buf[i + 1]) { case DQT: dqt = i + 46; break; case DHT: dht = i + 46; break; case SOF0: sof0 = i + 46; break; case SOS: bytestream_put_be32(&poutbufp, dqt); /* quant off */ bytestream_put_be32(&poutbufp, dht); /* huff off */ bytestream_put_be32(&poutbufp, sof0); /* image off */ bytestream_put_be32(&poutbufp, i + 46); /* scan off */ bytestream_put_be32(&poutbufp, i + 46 + AV_RB16(buf + i + 2)); /* data off */ bytestream_put_buffer(&poutbufp, buf + 2, buf_size - 2); /* skip already written SOI */ *poutbuf_size = poutbufp - *poutbuf; return 1; case APP1: if (i + 8 < buf_size && AV_RL32(buf + i + 8) == AV_RL32("mjpg")) { av_log(avctx, AV_LOG_ERROR, "bitstream already formatted\n"); memcpy(*poutbuf, buf, buf_size); *poutbuf_size = buf_size; return 1; } } } } av_freep(poutbuf); av_log(avctx, AV_LOG_ERROR, "could not find SOS marker in bitstream\n"); return 0; } AVBitStreamFilter mjpega_dump_header_bsf = { "mjpegadump", 0, mjpega_dump_header, };
123linslouis-android-video-cutter
jni/libavcodec/mjpega_dump_header_bsf.c
C
asf20
3,541
/* * IMC compatible decoder * Copyright (c) 2002-2004 Maxim Poliakovski * Copyright (c) 2006 Benjamin Larsson * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * IMC - Intel Music Coder * A mdct based codec using a 256 points large transform * divied into 32 bands with some mix of scale factors. * Only mono is supported. * */ #include <math.h> #include <stddef.h> #include <stdio.h> #define ALT_BITSTREAM_READER #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "fft.h" #include "imcdata.h" #define IMC_BLOCK_SIZE 64 #define IMC_FRAME_ID 0x21 #define BANDS 32 #define COEFFS 256 typedef struct { float old_floor[BANDS]; float flcoeffs1[BANDS]; float flcoeffs2[BANDS]; float flcoeffs3[BANDS]; float flcoeffs4[BANDS]; float flcoeffs5[BANDS]; float flcoeffs6[BANDS]; float CWdecoded[COEFFS]; /** MDCT tables */ //@{ float mdct_sine_window[COEFFS]; float post_cos[COEFFS]; float post_sin[COEFFS]; float pre_coef1[COEFFS]; float pre_coef2[COEFFS]; float last_fft_im[COEFFS]; //@} int bandWidthT[BANDS]; ///< codewords per band int bitsBandT[BANDS]; ///< how many bits per codeword in band int CWlengthT[COEFFS]; ///< how many bits in each codeword int levlCoeffBuf[BANDS]; int bandFlagsBuf[BANDS]; ///< flags for each band int sumLenArr[BANDS]; ///< bits for all coeffs in band int skipFlagRaw[BANDS]; ///< skip flags are stored in raw form or not int skipFlagBits[BANDS]; ///< bits used to code skip flags int skipFlagCount[BANDS]; ///< skipped coeffients per band int skipFlags[COEFFS]; ///< skip coefficient decoding or not int codewords[COEFFS]; ///< raw codewords read from bitstream float sqrt_tab[30]; GetBitContext gb; int decoder_reset; float one_div_log2; DSPContext dsp; FFTContext fft; DECLARE_ALIGNED(16, FFTComplex, samples)[COEFFS/2]; DECLARE_ALIGNED(16, float, out_samples)[COEFFS]; } IMCContext; static VLC huffman_vlc[4][4]; #define VLC_TABLES_SIZE 9512 static const int vlc_offsets[17] = { 0, 640, 1156, 1732, 2308, 2852, 3396, 3924, 4452, 5220, 5860, 6628, 7268, 7908, 8424, 8936, VLC_TABLES_SIZE}; static VLC_TYPE vlc_tables[VLC_TABLES_SIZE][2]; static av_cold int imc_decode_init(AVCodecContext * avctx) { int i, j; IMCContext *q = avctx->priv_data; double r1, r2; q->decoder_reset = 1; for(i = 0; i < BANDS; i++) q->old_floor[i] = 1.0; /* Build mdct window, a simple sine window normalized with sqrt(2) */ ff_sine_window_init(q->mdct_sine_window, COEFFS); for(i = 0; i < COEFFS; i++) q->mdct_sine_window[i] *= sqrt(2.0); for(i = 0; i < COEFFS/2; i++){ q->post_cos[i] = cos(i / 256.0 * M_PI); q->post_sin[i] = sin(i / 256.0 * M_PI); r1 = sin((i * 4.0 + 1.0) / 1024.0 * M_PI); r2 = cos((i * 4.0 + 1.0) / 1024.0 * M_PI); if (i & 0x1) { q->pre_coef1[i] = (r1 + r2) * sqrt(2.0); q->pre_coef2[i] = -(r1 - r2) * sqrt(2.0); } else { q->pre_coef1[i] = -(r1 + r2) * sqrt(2.0); q->pre_coef2[i] = (r1 - r2) * sqrt(2.0); } q->last_fft_im[i] = 0; } /* Generate a square root table */ for(i = 0; i < 30; i++) { q->sqrt_tab[i] = sqrt(i); } /* initialize the VLC tables */ for(i = 0; i < 4 ; i++) { for(j = 0; j < 4; j++) { huffman_vlc[i][j].table = &vlc_tables[vlc_offsets[i * 4 + j]]; huffman_vlc[i][j].table_allocated = vlc_offsets[i * 4 + j + 1] - vlc_offsets[i * 4 + j]; init_vlc(&huffman_vlc[i][j], 9, imc_huffman_sizes[i], imc_huffman_lens[i][j], 1, 1, imc_huffman_bits[i][j], 2, 2, INIT_VLC_USE_NEW_STATIC); } } q->one_div_log2 = 1/log(2); ff_fft_init(&q->fft, 7, 1); dsputil_init(&q->dsp, avctx); avctx->sample_fmt = SAMPLE_FMT_S16; avctx->channel_layout = (avctx->channels==2) ? CH_LAYOUT_STEREO : CH_LAYOUT_MONO; return 0; } static void imc_calculate_coeffs(IMCContext* q, float* flcoeffs1, float* flcoeffs2, int* bandWidthT, float* flcoeffs3, float* flcoeffs5) { float workT1[BANDS]; float workT2[BANDS]; float workT3[BANDS]; float snr_limit = 1.e-30; float accum = 0.0; int i, cnt2; for(i = 0; i < BANDS; i++) { flcoeffs5[i] = workT2[i] = 0.0; if (bandWidthT[i]){ workT1[i] = flcoeffs1[i] * flcoeffs1[i]; flcoeffs3[i] = 2.0 * flcoeffs2[i]; } else { workT1[i] = 0.0; flcoeffs3[i] = -30000.0; } workT3[i] = bandWidthT[i] * workT1[i] * 0.01; if (workT3[i] <= snr_limit) workT3[i] = 0.0; } for(i = 0; i < BANDS; i++) { for(cnt2 = i; cnt2 < cyclTab[i]; cnt2++) flcoeffs5[cnt2] = flcoeffs5[cnt2] + workT3[i]; workT2[cnt2-1] = workT2[cnt2-1] + workT3[i]; } for(i = 1; i < BANDS; i++) { accum = (workT2[i-1] + accum) * imc_weights1[i-1]; flcoeffs5[i] += accum; } for(i = 0; i < BANDS; i++) workT2[i] = 0.0; for(i = 0; i < BANDS; i++) { for(cnt2 = i-1; cnt2 > cyclTab2[i]; cnt2--) flcoeffs5[cnt2] += workT3[i]; workT2[cnt2+1] += workT3[i]; } accum = 0.0; for(i = BANDS-2; i >= 0; i--) { accum = (workT2[i+1] + accum) * imc_weights2[i]; flcoeffs5[i] += accum; //there is missing code here, but it seems to never be triggered } } static void imc_read_level_coeffs(IMCContext* q, int stream_format_code, int* levlCoeffs) { int i; VLC *hufftab[4]; int start = 0; const uint8_t *cb_sel; int s; s = stream_format_code >> 1; hufftab[0] = &huffman_vlc[s][0]; hufftab[1] = &huffman_vlc[s][1]; hufftab[2] = &huffman_vlc[s][2]; hufftab[3] = &huffman_vlc[s][3]; cb_sel = imc_cb_select[s]; if(stream_format_code & 4) start = 1; if(start) levlCoeffs[0] = get_bits(&q->gb, 7); for(i = start; i < BANDS; i++){ levlCoeffs[i] = get_vlc2(&q->gb, hufftab[cb_sel[i]]->table, hufftab[cb_sel[i]]->bits, 2); if(levlCoeffs[i] == 17) levlCoeffs[i] += get_bits(&q->gb, 4); } } static void imc_decode_level_coefficients(IMCContext* q, int* levlCoeffBuf, float* flcoeffs1, float* flcoeffs2) { int i, level; float tmp, tmp2; //maybe some frequency division thingy flcoeffs1[0] = 20000.0 / pow (2, levlCoeffBuf[0] * 0.18945); // 0.18945 = log2(10) * 0.05703125 flcoeffs2[0] = log(flcoeffs1[0])/log(2); tmp = flcoeffs1[0]; tmp2 = flcoeffs2[0]; for(i = 1; i < BANDS; i++) { level = levlCoeffBuf[i]; if (level == 16) { flcoeffs1[i] = 1.0; flcoeffs2[i] = 0.0; } else { if (level < 17) level -=7; else if (level <= 24) level -=32; else level -=16; tmp *= imc_exp_tab[15 + level]; tmp2 += 0.83048 * level; // 0.83048 = log2(10) * 0.25 flcoeffs1[i] = tmp; flcoeffs2[i] = tmp2; } } } static void imc_decode_level_coefficients2(IMCContext* q, int* levlCoeffBuf, float* old_floor, float* flcoeffs1, float* flcoeffs2) { int i; //FIXME maybe flag_buf = noise coding and flcoeffs1 = new scale factors // and flcoeffs2 old scale factors // might be incomplete due to a missing table that is in the binary code for(i = 0; i < BANDS; i++) { flcoeffs1[i] = 0; if(levlCoeffBuf[i] < 16) { flcoeffs1[i] = imc_exp_tab2[levlCoeffBuf[i]] * old_floor[i]; flcoeffs2[i] = (levlCoeffBuf[i]-7) * 0.83048 + flcoeffs2[i]; // 0.83048 = log2(10) * 0.25 } else { flcoeffs1[i] = old_floor[i]; } } } /** * Perform bit allocation depending on bits available */ static int bit_allocation (IMCContext* q, int stream_format_code, int freebits, int flag) { int i, j; const float limit = -1.e20; float highest = 0.0; int indx; int t1 = 0; int t2 = 1; float summa = 0.0; int iacc = 0; int summer = 0; int rres, cwlen; float lowest = 1.e10; int low_indx = 0; float workT[32]; int flg; int found_indx = 0; for(i = 0; i < BANDS; i++) highest = FFMAX(highest, q->flcoeffs1[i]); for(i = 0; i < BANDS-1; i++) { q->flcoeffs4[i] = q->flcoeffs3[i] - log(q->flcoeffs5[i])/log(2); } q->flcoeffs4[BANDS - 1] = limit; highest = highest * 0.25; for(i = 0; i < BANDS; i++) { indx = -1; if ((band_tab[i+1] - band_tab[i]) == q->bandWidthT[i]) indx = 0; if ((band_tab[i+1] - band_tab[i]) > q->bandWidthT[i]) indx = 1; if (((band_tab[i+1] - band_tab[i])/2) >= q->bandWidthT[i]) indx = 2; if (indx == -1) return -1; q->flcoeffs4[i] = q->flcoeffs4[i] + xTab[(indx*2 + (q->flcoeffs1[i] < highest)) * 2 + flag]; } if (stream_format_code & 0x2) { q->flcoeffs4[0] = limit; q->flcoeffs4[1] = limit; q->flcoeffs4[2] = limit; q->flcoeffs4[3] = limit; } for(i = (stream_format_code & 0x2)?4:0; i < BANDS-1; i++) { iacc += q->bandWidthT[i]; summa += q->bandWidthT[i] * q->flcoeffs4[i]; } q->bandWidthT[BANDS-1] = 0; summa = (summa * 0.5 - freebits) / iacc; for(i = 0; i < BANDS/2; i++) { rres = summer - freebits; if((rres >= -8) && (rres <= 8)) break; summer = 0; iacc = 0; for(j = (stream_format_code & 0x2)?4:0; j < BANDS; j++) { cwlen = av_clip((int)((q->flcoeffs4[j] * 0.5) - summa + 0.5), 0, 6); q->bitsBandT[j] = cwlen; summer += q->bandWidthT[j] * cwlen; if (cwlen > 0) iacc += q->bandWidthT[j]; } flg = t2; t2 = 1; if (freebits < summer) t2 = -1; if (i == 0) flg = t2; if(flg != t2) t1++; summa = (float)(summer - freebits) / ((t1 + 1) * iacc) + summa; } for(i = (stream_format_code & 0x2)?4:0; i < BANDS; i++) { for(j = band_tab[i]; j < band_tab[i+1]; j++) q->CWlengthT[j] = q->bitsBandT[i]; } if (freebits > summer) { for(i = 0; i < BANDS; i++) { workT[i] = (q->bitsBandT[i] == 6) ? -1.e20 : (q->bitsBandT[i] * -2 + q->flcoeffs4[i] - 0.415); } highest = 0.0; do{ if (highest <= -1.e20) break; found_indx = 0; highest = -1.e20; for(i = 0; i < BANDS; i++) { if (workT[i] > highest) { highest = workT[i]; found_indx = i; } } if (highest > -1.e20) { workT[found_indx] -= 2.0; if (++(q->bitsBandT[found_indx]) == 6) workT[found_indx] = -1.e20; for(j = band_tab[found_indx]; j < band_tab[found_indx+1] && (freebits > summer); j++){ q->CWlengthT[j]++; summer++; } } }while (freebits > summer); } if (freebits < summer) { for(i = 0; i < BANDS; i++) { workT[i] = q->bitsBandT[i] ? (q->bitsBandT[i] * -2 + q->flcoeffs4[i] + 1.585) : 1.e20; } if (stream_format_code & 0x2) { workT[0] = 1.e20; workT[1] = 1.e20; workT[2] = 1.e20; workT[3] = 1.e20; } while (freebits < summer){ lowest = 1.e10; low_indx = 0; for(i = 0; i < BANDS; i++) { if (workT[i] < lowest) { lowest = workT[i]; low_indx = i; } } //if(lowest >= 1.e10) break; workT[low_indx] = lowest + 2.0; if (!(--q->bitsBandT[low_indx])) workT[low_indx] = 1.e20; for(j = band_tab[low_indx]; j < band_tab[low_indx+1] && (freebits < summer); j++){ if(q->CWlengthT[j] > 0){ q->CWlengthT[j]--; summer--; } } } } return 0; } static void imc_get_skip_coeff(IMCContext* q) { int i, j; memset(q->skipFlagBits, 0, sizeof(q->skipFlagBits)); memset(q->skipFlagCount, 0, sizeof(q->skipFlagCount)); for(i = 0; i < BANDS; i++) { if (!q->bandFlagsBuf[i] || !q->bandWidthT[i]) continue; if (!q->skipFlagRaw[i]) { q->skipFlagBits[i] = band_tab[i+1] - band_tab[i]; for(j = band_tab[i]; j < band_tab[i+1]; j++) { if ((q->skipFlags[j] = get_bits1(&q->gb))) q->skipFlagCount[i]++; } } else { for(j = band_tab[i]; j < (band_tab[i+1]-1); j += 2) { if(!get_bits1(&q->gb)){//0 q->skipFlagBits[i]++; q->skipFlags[j]=1; q->skipFlags[j+1]=1; q->skipFlagCount[i] += 2; }else{ if(get_bits1(&q->gb)){//11 q->skipFlagBits[i] +=2; q->skipFlags[j]=0; q->skipFlags[j+1]=1; q->skipFlagCount[i]++; }else{ q->skipFlagBits[i] +=3; q->skipFlags[j+1]=0; if(!get_bits1(&q->gb)){//100 q->skipFlags[j]=1; q->skipFlagCount[i]++; }else{//101 q->skipFlags[j]=0; } } } } if (j < band_tab[i+1]) { q->skipFlagBits[i]++; if ((q->skipFlags[j] = get_bits1(&q->gb))) q->skipFlagCount[i]++; } } } } /** * Increase highest' band coefficient sizes as some bits won't be used */ static void imc_adjust_bit_allocation (IMCContext* q, int summer) { float workT[32]; int corrected = 0; int i, j; float highest = 0; int found_indx=0; for(i = 0; i < BANDS; i++) { workT[i] = (q->bitsBandT[i] == 6) ? -1.e20 : (q->bitsBandT[i] * -2 + q->flcoeffs4[i] - 0.415); } while (corrected < summer) { if(highest <= -1.e20) break; highest = -1.e20; for(i = 0; i < BANDS; i++) { if (workT[i] > highest) { highest = workT[i]; found_indx = i; } } if (highest > -1.e20) { workT[found_indx] -= 2.0; if (++(q->bitsBandT[found_indx]) == 6) workT[found_indx] = -1.e20; for(j = band_tab[found_indx]; j < band_tab[found_indx+1] && (corrected < summer); j++) { if (!q->skipFlags[j] && (q->CWlengthT[j] < 6)) { q->CWlengthT[j]++; corrected++; } } } } } static void imc_imdct256(IMCContext *q) { int i; float re, im; /* prerotation */ for(i=0; i < COEFFS/2; i++){ q->samples[i].re = -(q->pre_coef1[i] * q->CWdecoded[COEFFS-1-i*2]) - (q->pre_coef2[i] * q->CWdecoded[i*2]); q->samples[i].im = (q->pre_coef2[i] * q->CWdecoded[COEFFS-1-i*2]) - (q->pre_coef1[i] * q->CWdecoded[i*2]); } /* FFT */ ff_fft_permute(&q->fft, q->samples); ff_fft_calc (&q->fft, q->samples); /* postrotation, window and reorder */ for(i = 0; i < COEFFS/2; i++){ re = (q->samples[i].re * q->post_cos[i]) + (-q->samples[i].im * q->post_sin[i]); im = (-q->samples[i].im * q->post_cos[i]) - (q->samples[i].re * q->post_sin[i]); q->out_samples[i*2] = (q->mdct_sine_window[COEFFS-1-i*2] * q->last_fft_im[i]) + (q->mdct_sine_window[i*2] * re); q->out_samples[COEFFS-1-i*2] = (q->mdct_sine_window[i*2] * q->last_fft_im[i]) - (q->mdct_sine_window[COEFFS-1-i*2] * re); q->last_fft_im[i] = im; } } static int inverse_quant_coeff (IMCContext* q, int stream_format_code) { int i, j; int middle_value, cw_len, max_size; const float* quantizer; for(i = 0; i < BANDS; i++) { for(j = band_tab[i]; j < band_tab[i+1]; j++) { q->CWdecoded[j] = 0; cw_len = q->CWlengthT[j]; if (cw_len <= 0 || q->skipFlags[j]) continue; max_size = 1 << cw_len; middle_value = max_size >> 1; if (q->codewords[j] >= max_size || q->codewords[j] < 0) return -1; if (cw_len >= 4){ quantizer = imc_quantizer2[(stream_format_code & 2) >> 1]; if (q->codewords[j] >= middle_value) q->CWdecoded[j] = quantizer[q->codewords[j] - 8] * q->flcoeffs6[i]; else q->CWdecoded[j] = -quantizer[max_size - q->codewords[j] - 8 - 1] * q->flcoeffs6[i]; }else{ quantizer = imc_quantizer1[((stream_format_code & 2) >> 1) | (q->bandFlagsBuf[i] << 1)]; if (q->codewords[j] >= middle_value) q->CWdecoded[j] = quantizer[q->codewords[j] - 1] * q->flcoeffs6[i]; else q->CWdecoded[j] = -quantizer[max_size - 2 - q->codewords[j]] * q->flcoeffs6[i]; } } } return 0; } static int imc_get_coeffs (IMCContext* q) { int i, j, cw_len, cw; for(i = 0; i < BANDS; i++) { if(!q->sumLenArr[i]) continue; if (q->bandFlagsBuf[i] || q->bandWidthT[i]) { for(j = band_tab[i]; j < band_tab[i+1]; j++) { cw_len = q->CWlengthT[j]; cw = 0; if (get_bits_count(&q->gb) + cw_len > 512){ //av_log(NULL,0,"Band %i coeff %i cw_len %i\n",i,j,cw_len); return -1; } if(cw_len && (!q->bandFlagsBuf[i] || !q->skipFlags[j])) cw = get_bits(&q->gb, cw_len); q->codewords[j] = cw; } } } return 0; } static int imc_decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; IMCContext *q = avctx->priv_data; int stream_format_code; int imc_hdr, i, j; int flag; int bits, summer; int counter, bitscount; uint16_t buf16[IMC_BLOCK_SIZE / 2]; if (buf_size < IMC_BLOCK_SIZE) { av_log(avctx, AV_LOG_ERROR, "imc frame too small!\n"); return -1; } for(i = 0; i < IMC_BLOCK_SIZE / 2; i++) buf16[i] = bswap_16(((const uint16_t*)buf)[i]); init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8); /* Check the frame header */ imc_hdr = get_bits(&q->gb, 9); if (imc_hdr != IMC_FRAME_ID) { av_log(avctx, AV_LOG_ERROR, "imc frame header check failed!\n"); av_log(avctx, AV_LOG_ERROR, "got %x instead of 0x21.\n", imc_hdr); return -1; } stream_format_code = get_bits(&q->gb, 3); if(stream_format_code & 1){ av_log(avctx, AV_LOG_ERROR, "Stream code format %X is not supported\n", stream_format_code); return -1; } // av_log(avctx, AV_LOG_DEBUG, "stream_format_code = %d\n", stream_format_code); if (stream_format_code & 0x04) q->decoder_reset = 1; if(q->decoder_reset) { memset(q->out_samples, 0, sizeof(q->out_samples)); for(i = 0; i < BANDS; i++)q->old_floor[i] = 1.0; for(i = 0; i < COEFFS; i++)q->CWdecoded[i] = 0; q->decoder_reset = 0; } flag = get_bits1(&q->gb); imc_read_level_coeffs(q, stream_format_code, q->levlCoeffBuf); if (stream_format_code & 0x4) imc_decode_level_coefficients(q, q->levlCoeffBuf, q->flcoeffs1, q->flcoeffs2); else imc_decode_level_coefficients2(q, q->levlCoeffBuf, q->old_floor, q->flcoeffs1, q->flcoeffs2); memcpy(q->old_floor, q->flcoeffs1, 32 * sizeof(float)); counter = 0; for (i=0 ; i<BANDS ; i++) { if (q->levlCoeffBuf[i] == 16) { q->bandWidthT[i] = 0; counter++; } else q->bandWidthT[i] = band_tab[i+1] - band_tab[i]; } memset(q->bandFlagsBuf, 0, BANDS * sizeof(int)); for(i = 0; i < BANDS-1; i++) { if (q->bandWidthT[i]) q->bandFlagsBuf[i] = get_bits1(&q->gb); } imc_calculate_coeffs(q, q->flcoeffs1, q->flcoeffs2, q->bandWidthT, q->flcoeffs3, q->flcoeffs5); bitscount = 0; /* first 4 bands will be assigned 5 bits per coefficient */ if (stream_format_code & 0x2) { bitscount += 15; q->bitsBandT[0] = 5; q->CWlengthT[0] = 5; q->CWlengthT[1] = 5; q->CWlengthT[2] = 5; for(i = 1; i < 4; i++){ bits = (q->levlCoeffBuf[i] == 16) ? 0 : 5; q->bitsBandT[i] = bits; for(j = band_tab[i]; j < band_tab[i+1]; j++) { q->CWlengthT[j] = bits; bitscount += bits; } } } if(bit_allocation (q, stream_format_code, 512 - bitscount - get_bits_count(&q->gb), flag) < 0) { av_log(avctx, AV_LOG_ERROR, "Bit allocations failed\n"); q->decoder_reset = 1; return -1; } for(i = 0; i < BANDS; i++) { q->sumLenArr[i] = 0; q->skipFlagRaw[i] = 0; for(j = band_tab[i]; j < band_tab[i+1]; j++) q->sumLenArr[i] += q->CWlengthT[j]; if (q->bandFlagsBuf[i]) if( (((band_tab[i+1] - band_tab[i]) * 1.5) > q->sumLenArr[i]) && (q->sumLenArr[i] > 0)) q->skipFlagRaw[i] = 1; } imc_get_skip_coeff(q); for(i = 0; i < BANDS; i++) { q->flcoeffs6[i] = q->flcoeffs1[i]; /* band has flag set and at least one coded coefficient */ if (q->bandFlagsBuf[i] && (band_tab[i+1] - band_tab[i]) != q->skipFlagCount[i]){ q->flcoeffs6[i] *= q->sqrt_tab[band_tab[i+1] - band_tab[i]] / q->sqrt_tab[(band_tab[i+1] - band_tab[i] - q->skipFlagCount[i])]; } } /* calculate bits left, bits needed and adjust bit allocation */ bits = summer = 0; for(i = 0; i < BANDS; i++) { if (q->bandFlagsBuf[i]) { for(j = band_tab[i]; j < band_tab[i+1]; j++) { if(q->skipFlags[j]) { summer += q->CWlengthT[j]; q->CWlengthT[j] = 0; } } bits += q->skipFlagBits[i]; summer -= q->skipFlagBits[i]; } } imc_adjust_bit_allocation(q, summer); for(i = 0; i < BANDS; i++) { q->sumLenArr[i] = 0; for(j = band_tab[i]; j < band_tab[i+1]; j++) if (!q->skipFlags[j]) q->sumLenArr[i] += q->CWlengthT[j]; } memset(q->codewords, 0, sizeof(q->codewords)); if(imc_get_coeffs(q) < 0) { av_log(avctx, AV_LOG_ERROR, "Read coefficients failed\n"); q->decoder_reset = 1; return 0; } if(inverse_quant_coeff(q, stream_format_code) < 0) { av_log(avctx, AV_LOG_ERROR, "Inverse quantization of coefficients failed\n"); q->decoder_reset = 1; return 0; } memset(q->skipFlags, 0, sizeof(q->skipFlags)); imc_imdct256(q); q->dsp.float_to_int16(data, q->out_samples, COEFFS); *data_size = COEFFS * sizeof(int16_t); return IMC_BLOCK_SIZE; } static av_cold int imc_decode_close(AVCodecContext * avctx) { IMCContext *q = avctx->priv_data; ff_fft_end(&q->fft); return 0; } AVCodec imc_decoder = { .name = "imc", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_IMC, .priv_data_size = sizeof(IMCContext), .init = imc_decode_init, .close = imc_decode_close, .decode = imc_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"), };
123linslouis-android-video-cutter
jni/libavcodec/imc.c
C
asf20
25,274
/* * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Context Adaptive Binary Arithmetic Coder. */ #include <string.h> #include "libavutil/common.h" #include "get_bits.h" #include "cabac.h" static const uint8_t lps_range[64][4]= { {128,176,208,240}, {128,167,197,227}, {128,158,187,216}, {123,150,178,205}, {116,142,169,195}, {111,135,160,185}, {105,128,152,175}, {100,122,144,166}, { 95,116,137,158}, { 90,110,130,150}, { 85,104,123,142}, { 81, 99,117,135}, { 77, 94,111,128}, { 73, 89,105,122}, { 69, 85,100,116}, { 66, 80, 95,110}, { 62, 76, 90,104}, { 59, 72, 86, 99}, { 56, 69, 81, 94}, { 53, 65, 77, 89}, { 51, 62, 73, 85}, { 48, 59, 69, 80}, { 46, 56, 66, 76}, { 43, 53, 63, 72}, { 41, 50, 59, 69}, { 39, 48, 56, 65}, { 37, 45, 54, 62}, { 35, 43, 51, 59}, { 33, 41, 48, 56}, { 32, 39, 46, 53}, { 30, 37, 43, 50}, { 29, 35, 41, 48}, { 27, 33, 39, 45}, { 26, 31, 37, 43}, { 24, 30, 35, 41}, { 23, 28, 33, 39}, { 22, 27, 32, 37}, { 21, 26, 30, 35}, { 20, 24, 29, 33}, { 19, 23, 27, 31}, { 18, 22, 26, 30}, { 17, 21, 25, 28}, { 16, 20, 23, 27}, { 15, 19, 22, 25}, { 14, 18, 21, 24}, { 14, 17, 20, 23}, { 13, 16, 19, 22}, { 12, 15, 18, 21}, { 12, 14, 17, 20}, { 11, 14, 16, 19}, { 11, 13, 15, 18}, { 10, 12, 15, 17}, { 10, 12, 14, 16}, { 9, 11, 13, 15}, { 9, 11, 12, 14}, { 8, 10, 12, 14}, { 8, 9, 11, 13}, { 7, 9, 11, 12}, { 7, 9, 10, 12}, { 7, 8, 10, 11}, { 6, 8, 9, 11}, { 6, 7, 9, 10}, { 6, 7, 8, 9}, { 2, 2, 2, 2}, }; uint8_t ff_h264_mlps_state[4*64]; uint8_t ff_h264_lps_range[4*2*64]; uint8_t ff_h264_lps_state[2*64]; uint8_t ff_h264_mps_state[2*64]; static const uint8_t mps_state[64]= { 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16, 17,18,19,20,21,22,23,24, 25,26,27,28,29,30,31,32, 33,34,35,36,37,38,39,40, 41,42,43,44,45,46,47,48, 49,50,51,52,53,54,55,56, 57,58,59,60,61,62,62,63, }; static const uint8_t lps_state[64]= { 0, 0, 1, 2, 2, 4, 4, 5, 6, 7, 8, 9, 9,11,11,12, 13,13,15,15,16,16,18,18, 19,19,21,21,22,22,23,24, 24,25,26,26,27,27,28,29, 29,30,30,30,31,32,32,33, 33,33,34,34,35,35,35,36, 36,36,37,37,37,38,38,63, }; #if 0 const uint8_t ff_h264_norm_shift_old[128]= { 7,6,5,5,4,4,4,4,3,3,3,3,3,3,3,3, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, }; #endif const uint8_t ff_h264_norm_shift[512]= { 9,8,7,7,6,6,6,6,5,5,5,5,5,5,5,5, 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, }; /** * * @param buf_size size of buf in bits */ void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size){ init_put_bits(&c->pb, buf, buf_size); c->low= 0; c->range= 0x1FE; c->outstanding_count= 0; #ifdef STRICT_LIMITS c->sym_count =0; #endif c->pb.bit_left++; //avoids firstBitFlag } /** * * @param buf_size size of buf in bits */ void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){ c->bytestream_start= c->bytestream= buf; c->bytestream_end= buf + buf_size; #if CABAC_BITS == 16 c->low = (*c->bytestream++)<<18; c->low+= (*c->bytestream++)<<10; #else c->low = (*c->bytestream++)<<10; #endif c->low+= ((*c->bytestream++)<<2) + 2; c->range= 0x1FE; } void ff_init_cabac_states(CABACContext *c){ int i, j; for(i=0; i<64; i++){ for(j=0; j<4; j++){ //FIXME check if this is worth the 1 shift we save ff_h264_lps_range[j*2*64+2*i+0]= ff_h264_lps_range[j*2*64+2*i+1]= lps_range[i][j]; } ff_h264_mlps_state[128+2*i+0]= ff_h264_mps_state[2*i+0]= 2*mps_state[i]+0; ff_h264_mlps_state[128+2*i+1]= ff_h264_mps_state[2*i+1]= 2*mps_state[i]+1; if( i ){ #ifdef BRANCHLESS_CABAC_DECODER ff_h264_mlps_state[128-2*i-1]= 2*lps_state[i]+0; ff_h264_mlps_state[128-2*i-2]= 2*lps_state[i]+1; }else{ ff_h264_mlps_state[128-2*i-1]= 1; ff_h264_mlps_state[128-2*i-2]= 0; #else ff_h264_lps_state[2*i+0]= 2*lps_state[i]+0; ff_h264_lps_state[2*i+1]= 2*lps_state[i]+1; }else{ ff_h264_lps_state[2*i+0]= 1; ff_h264_lps_state[2*i+1]= 0; #endif } } } #ifdef TEST #define SIZE 10240 #include "libavutil/lfg.h" #include "avcodec.h" #include "cabac.h" int main(void){ CABACContext c; uint8_t b[9*SIZE]; uint8_t r[9*SIZE]; int i; uint8_t state[10]= {0}; AVLFG prng; av_lfg_init(&prng, 1); ff_init_cabac_encoder(&c, b, SIZE); ff_init_cabac_states(&c); for(i=0; i<SIZE; i++){ r[i] = av_lfg_get(&prng) % 7; } for(i=0; i<SIZE; i++){ START_TIMER put_cabac_bypass(&c, r[i]&1); STOP_TIMER("put_cabac_bypass") } for(i=0; i<SIZE; i++){ START_TIMER put_cabac(&c, state, r[i]&1); STOP_TIMER("put_cabac") } for(i=0; i<SIZE; i++){ START_TIMER put_cabac_u(&c, state, r[i], 6, 3, i&1); STOP_TIMER("put_cabac_u") } for(i=0; i<SIZE; i++){ START_TIMER put_cabac_ueg(&c, state, r[i], 3, 0, 1, 2); STOP_TIMER("put_cabac_ueg") } put_cabac_terminate(&c, 1); ff_init_cabac_decoder(&c, b, SIZE); memset(state, 0, sizeof(state)); for(i=0; i<SIZE; i++){ START_TIMER if( (r[i]&1) != get_cabac_bypass(&c) ) av_log(NULL, AV_LOG_ERROR, "CABAC bypass failure at %d\n", i); STOP_TIMER("get_cabac_bypass") } for(i=0; i<SIZE; i++){ START_TIMER if( (r[i]&1) != get_cabac(&c, state) ) av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i); STOP_TIMER("get_cabac") } #if 0 for(i=0; i<SIZE; i++){ START_TIMER if( r[i] != get_cabac_u(&c, state, (i&1) ? 6 : 7, 3, i&1) ) av_log(NULL, AV_LOG_ERROR, "CABAC unary (truncated) binarization failure at %d\n", i); STOP_TIMER("get_cabac_u") } for(i=0; i<SIZE; i++){ START_TIMER if( r[i] != get_cabac_ueg(&c, state, 3, 0, 1, 2)) av_log(NULL, AV_LOG_ERROR, "CABAC unary (truncated) binarization failure at %d\n", i); STOP_TIMER("get_cabac_ueg") } #endif if(!get_cabac_terminate(&c)) av_log(NULL, AV_LOG_ERROR, "where's the Terminator?\n"); return 0; } #endif /* TEST */
123linslouis-android-video-cutter
jni/libavcodec/cabac.c
C
asf20
8,106
/* * ADX ADPCM codecs * Copyright (c) 2001,2003 BERO * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "adx.h" /** * @file * SEGA CRI adx codecs. * * Reference documents: * http://ku-www.ss.titech.ac.jp/~yatsushi/adx.html * adx2wav & wav2adx http://www.geocities.co.jp/Playtown/2004/ */ /* 18 bytes <-> 32 samples */ static void adx_encode(unsigned char *adx,const short *wav,PREV *prev) { int scale; int i; int s0,s1,s2,d; int max=0; int min=0; int data[32]; s1 = prev->s1; s2 = prev->s2; for(i=0;i<32;i++) { s0 = wav[i]; d = ((s0<<14) - SCALE1*s1 + SCALE2*s2)/BASEVOL; data[i]=d; if (max<d) max=d; if (min>d) min=d; s2 = s1; s1 = s0; } prev->s1 = s1; prev->s2 = s2; /* -8..+7 */ if (max==0 && min==0) { memset(adx,0,18); return; } if (max/7>-min/8) scale = max/7; else scale = -min/8; if (scale==0) scale=1; AV_WB16(adx, scale); for(i=0;i<16;i++) { adx[i+2] = ((data[i*2]/scale)<<4) | ((data[i*2+1]/scale)&0xf); } } static int adx_encode_header(AVCodecContext *avctx,unsigned char *buf,size_t bufsize) { #if 0 struct { uint32_t offset; /* 0x80000000 + sample start - 4 */ unsigned char unknown1[3]; /* 03 12 04 */ unsigned char channel; /* 1 or 2 */ uint32_t freq; uint32_t size; uint32_t unknown2; /* 01 f4 03 00 */ uint32_t unknown3; /* 00 00 00 00 */ uint32_t unknown4; /* 00 00 00 00 */ /* if loop unknown3 00 15 00 01 unknown4 00 00 00 01 long loop_start_sample; long loop_start_byte; long loop_end_sample; long loop_end_byte; long */ } adxhdr; /* big endian */ /* offset-6 "(c)CRI" */ #endif AV_WB32(buf+0x00,0x80000000|0x20); AV_WB32(buf+0x04,0x03120400|avctx->channels); AV_WB32(buf+0x08,avctx->sample_rate); AV_WB32(buf+0x0c,0); /* FIXME: set after */ AV_WB32(buf+0x10,0x01040300); AV_WB32(buf+0x14,0x00000000); AV_WB32(buf+0x18,0x00000000); memcpy(buf+0x1c,"\0\0(c)CRI",8); return 0x20+4; } static av_cold int adx_encode_init(AVCodecContext *avctx) { if (avctx->channels > 2) return -1; /* only stereo or mono =) */ avctx->frame_size = 32; avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; // avctx->bit_rate = avctx->sample_rate*avctx->channels*18*8/32; av_log(avctx, AV_LOG_DEBUG, "adx encode init\n"); return 0; } static av_cold int adx_encode_close(AVCodecContext *avctx) { av_freep(&avctx->coded_frame); return 0; } static int adx_encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data) { ADXContext *c = avctx->priv_data; const short *samples = data; unsigned char *dst = frame; int rest = avctx->frame_size; /* input data size = ffmpeg.c: do_audio_out() frame_bytes = enc->frame_size * 2 * enc->channels; */ // printf("sz=%d ",buf_size); fflush(stdout); if (!c->header_parsed) { int hdrsize = adx_encode_header(avctx,dst,buf_size); dst+=hdrsize; c->header_parsed = 1; } if (avctx->channels==1) { while(rest>=32) { adx_encode(dst,samples,c->prev); dst+=18; samples+=32; rest-=32; } } else { while(rest>=32*2) { short tmpbuf[32*2]; int i; for(i=0;i<32;i++) { tmpbuf[i] = samples[i*2]; tmpbuf[i+32] = samples[i*2+1]; } adx_encode(dst,tmpbuf,c->prev); adx_encode(dst+18,tmpbuf+32,c->prev+1); dst+=18*2; samples+=32*2; rest-=32*2; } } return dst-frame; } AVCodec adpcm_adx_encoder = { "adpcm_adx", AVMEDIA_TYPE_AUDIO, CODEC_ID_ADPCM_ADX, sizeof(ADXContext), adx_encode_init, adx_encode_frame, adx_encode_close, NULL, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"), };
123linslouis-android-video-cutter
jni/libavcodec/adxenc.c
C
asf20
4,969
/* * DSP Group TrueSpeech compatible decoder * copyright (c) 2005 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_TRUESPEECH_DATA_H #define AVCODEC_TRUESPEECH_DATA_H #include <stdint.h> /* codebooks fo expanding input filter */ static const int16_t ts_cb_0[32] = { 0x8240, 0x8364, 0x84CE, 0x865D, 0x8805, 0x89DE, 0x8BD7, 0x8DF4, 0x9051, 0x92E2, 0x95DE, 0x990F, 0x9C81, 0xA079, 0xA54C, 0xAAD2, 0xB18A, 0xB90A, 0xC124, 0xC9CC, 0xD339, 0xDDD3, 0xE9D6, 0xF893, 0x096F, 0x1ACA, 0x29EC, 0x381F, 0x45F9, 0x546A, 0x63C3, 0x73B5, }; static const int16_t ts_cb_1[32] = { 0x9F65, 0xB56B, 0xC583, 0xD371, 0xE018, 0xEBB4, 0xF61C, 0xFF59, 0x085B, 0x1106, 0x1952, 0x214A, 0x28C9, 0x2FF8, 0x36E6, 0x3D92, 0x43DF, 0x49BB, 0x4F46, 0x5467, 0x5930, 0x5DA3, 0x61EC, 0x65F9, 0x69D4, 0x6D5A, 0x709E, 0x73AD, 0x766B, 0x78F0, 0x7B5A, 0x7DA5, }; static const int16_t ts_cb_2[16] = { 0x96F8, 0xA3B4, 0xAF45, 0xBA53, 0xC4B1, 0xCECC, 0xD86F, 0xE21E, 0xEBF3, 0xF640, 0x00F7, 0x0C20, 0x1881, 0x269A, 0x376B, 0x4D60, }; static const int16_t ts_cb_3[16] = { 0xC654, 0xDEF2, 0xEFAA, 0xFD94, 0x096A, 0x143F, 0x1E7B, 0x282C, 0x3176, 0x3A89, 0x439F, 0x4CA2, 0x557F, 0x5E50, 0x6718, 0x6F8D, }; static const int16_t ts_cb_4[16] = { 0xABE7, 0xBBA8, 0xC81C, 0xD326, 0xDD0E, 0xE5D4, 0xEE22, 0xF618, 0xFE28, 0x064F, 0x0EB7, 0x17B8, 0x21AA, 0x2D8B, 0x3BA2, 0x4DF9, }; static const int16_t ts_cb_5[8] = { 0xD51B, 0xF12E, 0x042E, 0x13C7, 0x2260, 0x311B, 0x40DE, 0x5385, }; static const int16_t ts_cb_6[8] = { 0xB550, 0xC825, 0xD980, 0xE997, 0xF883, 0x0752, 0x1811, 0x2E18, }; static const int16_t ts_cb_7[8] = { 0xCEF0, 0xE4F9, 0xF6BB, 0x0646, 0x14F5, 0x23FF, 0x356F, 0x4A8D, }; static const int16_t * const ts_codebook[8] = { ts_cb_0, ts_cb_1, ts_cb_2, ts_cb_3, ts_cb_4, ts_cb_5, ts_cb_6, ts_cb_7 }; /* table used for decoding pulse positions */ static const int16_t ts_140[120] = { 0x0E46, 0x0CCC, 0x0B6D, 0x0A28, 0x08FC, 0x07E8, 0x06EB, 0x0604, 0x0532, 0x0474, 0x03C9, 0x0330, 0x02A8, 0x0230, 0x01C7, 0x016C, 0x011E, 0x00DC, 0x00A5, 0x0078, 0x0054, 0x0038, 0x0023, 0x0014, 0x000A, 0x0004, 0x0001, 0x0000, 0x0000, 0x0000, 0x0196, 0x017A, 0x015F, 0x0145, 0x012C, 0x0114, 0x00FD, 0x00E7, 0x00D2, 0x00BE, 0x00AB, 0x0099, 0x0088, 0x0078, 0x0069, 0x005B, 0x004E, 0x0042, 0x0037, 0x002D, 0x0024, 0x001C, 0x0015, 0x000F, 0x000A, 0x0006, 0x0003, 0x0001, 0x0000, 0x0000, 0x001D, 0x001C, 0x001B, 0x001A, 0x0019, 0x0018, 0x0017, 0x0016, 0x0015, 0x0014, 0x0013, 0x0012, 0x0011, 0x0010, 0x000F, 0x000E, 0x000D, 0x000C, 0x000B, 0x000A, 0x0009, 0x0008, 0x0007, 0x0006, 0x0005, 0x0004, 0x0003, 0x0002, 0x0001, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001 }; /* filter for correlated input filter */ static const int16_t ts_230[8] = { 0x7F3B, 0x7E78, 0x7DB6, 0x7CF5, 0x7C35, 0x7B76, 0x7AB8, 0x79FC }; /* two-point filters table */ static const int16_t ts_240[25 * 2] = { 0xED2F, 0x5239, 0x54F1, 0xE4A9, 0x2620, 0xEE3E, 0x09D6, 0x2C40, 0xEFB5, 0x2BE0, 0x3FE1, 0x3339, 0x442F, 0xE6FE, 0x4458, 0xF9DF, 0xF231, 0x43DB, 0x3DB0, 0xF705, 0x4F7B, 0xFEFB, 0x26AD, 0x0CDC, 0x33C2, 0x0739, 0x12BE, 0x43A2, 0x1BDF, 0x1F3E, 0x0211, 0x0796, 0x2AEB, 0x163F, 0x050D, 0x3A38, 0x0D1E, 0x0D78, 0x150F, 0x3346, 0x38A4, 0x0B7D, 0x2D5D, 0x1FDF, 0x19B7, 0x2822, 0x0D99, 0x1F12, 0x194C, 0x0CE6 }; /* possible pulse values */ static const int16_t ts_562[64] = { 0x0002, 0x0006, 0xFFFE, 0xFFFA, 0x0004, 0x000C, 0xFFFC, 0xFFF4, 0x0006, 0x0012, 0xFFFA, 0xFFEE, 0x000A, 0x001E, 0xFFF6, 0xFFE2, 0x0010, 0x0030, 0xFFF0, 0xFFD0, 0x0019, 0x004B, 0xFFE7, 0xFFB5, 0x0028, 0x0078, 0xFFD8, 0xFF88, 0x0040, 0x00C0, 0xFFC0, 0xFF40, 0x0065, 0x012F, 0xFF9B, 0xFED1, 0x00A1, 0x01E3, 0xFF5F, 0xFE1D, 0x0100, 0x0300, 0xFF00, 0xFD00, 0x0196, 0x04C2, 0xFE6A, 0xFB3E, 0x0285, 0x078F, 0xFD7B, 0xF871, 0x0400, 0x0C00, 0xFC00, 0xF400, 0x0659, 0x130B, 0xF9A7, 0xECF5, 0x0A14, 0x1E3C, 0xF5EC, 0xE1C4 }; /* filters used in final output calculations */ static const int16_t ts_5E2[8] = { 0x4666, 0x26B8, 0x154C, 0x0BB6, 0x0671, 0x038B, 0x01F3, 0x0112 }; static const int16_t ts_5F2[8] = { 0x6000, 0x4800, 0x3600, 0x2880, 0x1E60, 0x16C8, 0x1116, 0x0CD1 }; #endif /* AVCODEC_TRUESPEECH_DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/truespeech_data.h
C
asf20
5,370
/* * Generate a header file for hardcoded PCM tables * * Copyright (c) 2010 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #define CONFIG_HARDCODED_TABLES 0 #include "pcm_tablegen.h" #include "tableprint.h" int main(void) { pcm_alaw_tableinit(); pcm_ulaw_tableinit(); write_fileheader(); printf("static const uint8_t linear_to_alaw[1 << 14] = {\n"); write_uint8_array(linear_to_alaw, 1 << 14); printf("};\n"); printf("static const uint8_t linear_to_ulaw[1 << 14] = {\n"); write_uint8_array(linear_to_ulaw, 1 << 14); printf("};\n"); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/pcm_tablegen.c
C
asf20
1,375
/* * Faad decoder * Copyright (c) 2003 Zdenek Kabelac * Copyright (c) 2004 Thomas Raivio * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAC decoder. * * still a bit unfinished - but it plays something */ #include "avcodec.h" #include "faad.h" #ifndef FAADAPI #define FAADAPI #endif /* * when CONFIG_LIBFAADBIN is true libfaad will be opened at runtime */ //#undef CONFIG_LIBFAADBIN //#define CONFIG_LIBFAADBIN 0 //#define CONFIG_LIBFAADBIN 1 #if CONFIG_LIBFAADBIN #include <dlfcn.h> static const char* const libfaadname = "libfaad.so"; #else #define dlopen(a) #define dlclose(a) #endif typedef struct { void* handle; /* dlopen handle */ void* faac_handle; /* FAAD library handle */ int sample_size; int init; /* faad calls */ faacDecHandle FAADAPI (*faacDecOpen)(void); faacDecConfigurationPtr FAADAPI (*faacDecGetCurrentConfiguration)(faacDecHandle hDecoder); #ifndef FAAD2_VERSION int FAADAPI (*faacDecSetConfiguration)(faacDecHandle hDecoder, faacDecConfigurationPtr config); int FAADAPI (*faacDecInit)(faacDecHandle hDecoder, unsigned char *buffer, unsigned long *samplerate, unsigned long *channels); int FAADAPI (*faacDecInit2)(faacDecHandle hDecoder, unsigned char *pBuffer, unsigned long SizeOfDecoderSpecificInfo, unsigned long *samplerate, unsigned long *channels); int FAADAPI (*faacDecDecode)(faacDecHandle hDecoder, unsigned char *buffer, unsigned long *bytesconsumed, short *sample_buffer, unsigned long *samples); #else unsigned char FAADAPI (*faacDecSetConfiguration)(faacDecHandle hDecoder, faacDecConfigurationPtr config); long FAADAPI (*faacDecInit)(faacDecHandle hDecoder, unsigned char *buffer, unsigned long buffer_size, unsigned long *samplerate, unsigned char *channels); char FAADAPI (*faacDecInit2)(faacDecHandle hDecoder, unsigned char *pBuffer, unsigned long SizeOfDecoderSpecificInfo, unsigned long *samplerate, unsigned char *channels); void *FAADAPI (*faacDecDecode)(faacDecHandle hDecoder, faacDecFrameInfo *hInfo, unsigned char *buffer, unsigned long buffer_size); char* FAADAPI (*faacDecGetErrorMessage)(unsigned char errcode); #endif void FAADAPI (*faacDecClose)(faacDecHandle hDecoder); } FAACContext; static const unsigned long faac_srates[] = { 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000 }; static void channel_setup(AVCodecContext *avctx) { #ifdef FAAD2_VERSION FAACContext *s = avctx->priv_data; if (avctx->request_channels > 0 && avctx->request_channels == 2 && avctx->request_channels < avctx->channels) { faacDecConfigurationPtr faac_cfg; avctx->channels = 2; faac_cfg = s->faacDecGetCurrentConfiguration(s->faac_handle); faac_cfg->downMatrix = 1; s->faacDecSetConfiguration(s->faac_handle, faac_cfg); } #endif } static av_cold int faac_init_mp4(AVCodecContext *avctx) { FAACContext *s = avctx->priv_data; unsigned long samplerate; #ifndef FAAD2_VERSION unsigned long channels; #else unsigned char channels; #endif int r = 0; if (avctx->extradata){ r = s->faacDecInit2(s->faac_handle, (uint8_t*) avctx->extradata, avctx->extradata_size, &samplerate, &channels); if (r < 0){ av_log(avctx, AV_LOG_ERROR, "faacDecInit2 failed r:%d sr:%ld ch:%ld s:%d\n", r, samplerate, (long)channels, avctx->extradata_size); } else { avctx->sample_rate = samplerate; avctx->channels = channels; channel_setup(avctx); s->init = 1; } } return r; } static int faac_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FAACContext *s = avctx->priv_data; #ifndef FAAD2_VERSION unsigned long bytesconsumed; short *sample_buffer = NULL; unsigned long samples; int out; #else faacDecFrameInfo frame_info; void *out; #endif if(buf_size == 0) return 0; #ifndef FAAD2_VERSION out = s->faacDecDecode(s->faac_handle, (unsigned char*)buf, &bytesconsumed, data, &samples); samples *= s->sample_size; if (data_size) *data_size = samples; return (buf_size < (int)bytesconsumed) ? buf_size : (int)bytesconsumed; #else if(!s->init){ unsigned long srate; unsigned char channels; int r = s->faacDecInit(s->faac_handle, buf, buf_size, &srate, &channels); if(r < 0){ av_log(avctx, AV_LOG_ERROR, "libfaad: codec init failed.\n"); return -1; } avctx->sample_rate = srate; avctx->channels = channels; channel_setup(avctx); s->init = 1; } out = s->faacDecDecode(s->faac_handle, &frame_info, (unsigned char*)buf, (unsigned long)buf_size); if (frame_info.error > 0) { av_log(avctx, AV_LOG_ERROR, "libfaad: frame decoding failed: %s\n", s->faacDecGetErrorMessage(frame_info.error)); return -1; } if (!avctx->frame_size) avctx->frame_size = frame_info.samples/avctx->channels; frame_info.samples *= s->sample_size; memcpy(data, out, frame_info.samples); // CHECKME - can we cheat this one if (data_size) *data_size = frame_info.samples; return (buf_size < (int)frame_info.bytesconsumed) ? buf_size : (int)frame_info.bytesconsumed; #endif } static av_cold int faac_decode_end(AVCodecContext *avctx) { FAACContext *s = avctx->priv_data; s->faacDecClose(s->faac_handle); dlclose(s->handle); return 0; } static av_cold int faac_decode_init(AVCodecContext *avctx) { FAACContext *s = avctx->priv_data; faacDecConfigurationPtr faac_cfg; #if CONFIG_LIBFAADBIN const char* err = 0; s->handle = dlopen(libfaadname, RTLD_LAZY); if (!s->handle) { av_log(avctx, AV_LOG_ERROR, "FAAD library: %s could not be opened! \n%s\n", libfaadname, dlerror()); return -1; } #define dfaac(a) do { \ const char* n = AV_STRINGIFY(faacDec ## a); \ if (!err && !(s->faacDec ## a = dlsym(s->handle, n))) { \ err = n; \ } \ } while(0) #else /* !CONFIG_LIBFAADBIN */ #define dfaac(a) s->faacDec ## a = faacDec ## a #endif /* CONFIG_LIBFAADBIN */ // resolve all needed function calls dfaac(Open); dfaac(Close); dfaac(GetCurrentConfiguration); dfaac(SetConfiguration); dfaac(Init); dfaac(Init2); dfaac(Decode); #ifdef FAAD2_VERSION dfaac(GetErrorMessage); #endif #undef dfaac #if CONFIG_LIBFAADBIN if (err) { dlclose(s->handle); av_log(avctx, AV_LOG_ERROR, "FAAD library: cannot resolve %s in %s!\n", err, libfaadname); return -1; } #endif s->faac_handle = s->faacDecOpen(); if (!s->faac_handle) { av_log(avctx, AV_LOG_ERROR, "FAAD library: cannot create handler!\n"); faac_decode_end(avctx); return -1; } faac_cfg = s->faacDecGetCurrentConfiguration(s->faac_handle); if (faac_cfg) { switch (avctx->bits_per_coded_sample) { case 8: av_log(avctx, AV_LOG_ERROR, "FAADlib unsupported bps %d\n", avctx->bits_per_coded_sample); break; default: case 16: #ifdef FAAD2_VERSION faac_cfg->outputFormat = FAAD_FMT_16BIT; #endif s->sample_size = 2; break; case 24: #ifdef FAAD2_VERSION faac_cfg->outputFormat = FAAD_FMT_24BIT; #endif s->sample_size = 3; break; case 32: #ifdef FAAD2_VERSION faac_cfg->outputFormat = FAAD_FMT_32BIT; #endif s->sample_size = 4; break; } faac_cfg->defSampleRate = (!avctx->sample_rate) ? 44100 : avctx->sample_rate; faac_cfg->defObjectType = LC; } s->faacDecSetConfiguration(s->faac_handle, faac_cfg); faac_init_mp4(avctx); if(!s->init && avctx->channels > 0) channel_setup(avctx); avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } AVCodec libfaad_decoder = { "libfaad", AVMEDIA_TYPE_AUDIO, CODEC_ID_AAC, sizeof(FAACContext), faac_decode_init, NULL, faac_decode_end, faac_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("libfaad AAC (Advanced Audio Codec)"), };
123linslouis-android-video-cutter
jni/libavcodec/libfaad.c
C
asf20
10,236
/* * NellyMoser audio decoder * Copyright (c) 2007 a840bda5870ba11f19698ff6eb9581dfb0f95fa5, * 539459aeb7d425140b62a3ec7dbf6dc8e408a306, and * 520e17cd55896441042b14df2566a6eb610ed444 * Copyright (c) 2007 Loic Minier <lool at dooz.org> * Benjamin Larsson * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /** * @file * The 3 alphanumeric copyright notices are md5summed they are from the original * implementors. The original code is available from http://code.google.com/p/nelly2pcm/ */ #include "nellymoser.h" #include "libavutil/lfg.h" #include "libavutil/random_seed.h" #include "avcodec.h" #include "dsputil.h" #include "fft.h" #define ALT_BITSTREAM_READER_LE #include "get_bits.h" typedef struct NellyMoserDecodeContext { AVCodecContext* avctx; DECLARE_ALIGNED(16, float,float_buf)[NELLY_SAMPLES]; float state[128]; AVLFG random_state; GetBitContext gb; int add_bias; float scale_bias; DSPContext dsp; FFTContext imdct_ctx; DECLARE_ALIGNED(16, float,imdct_out)[NELLY_BUF_LEN * 2]; } NellyMoserDecodeContext; static void overlap_and_window(NellyMoserDecodeContext *s, float *state, float *audio, float *a_in) { int bot, top; bot = 0; top = NELLY_BUF_LEN-1; while (bot < NELLY_BUF_LEN) { audio[bot] = a_in [bot]*ff_sine_128[bot] +state[bot]*ff_sine_128[top] + s->add_bias; bot++; top--; } memcpy(state, a_in + NELLY_BUF_LEN, sizeof(float)*NELLY_BUF_LEN); } static void nelly_decode_block(NellyMoserDecodeContext *s, const unsigned char block[NELLY_BLOCK_LEN], float audio[NELLY_SAMPLES]) { int i,j; float buf[NELLY_FILL_LEN], pows[NELLY_FILL_LEN]; float *aptr, *bptr, *pptr, val, pval; int bits[NELLY_BUF_LEN]; unsigned char v; init_get_bits(&s->gb, block, NELLY_BLOCK_LEN * 8); bptr = buf; pptr = pows; val = ff_nelly_init_table[get_bits(&s->gb, 6)]; for (i=0 ; i<NELLY_BANDS ; i++) { if (i > 0) val += ff_nelly_delta_table[get_bits(&s->gb, 5)]; pval = -pow(2, val/2048) * s->scale_bias; for (j = 0; j < ff_nelly_band_sizes_table[i]; j++) { *bptr++ = val; *pptr++ = pval; } } ff_nelly_get_sample_bits(buf, bits); for (i = 0; i < 2; i++) { aptr = audio + i * NELLY_BUF_LEN; init_get_bits(&s->gb, block, NELLY_BLOCK_LEN * 8); skip_bits_long(&s->gb, NELLY_HEADER_BITS + i*NELLY_DETAIL_BITS); for (j = 0; j < NELLY_FILL_LEN; j++) { if (bits[j] <= 0) { aptr[j] = M_SQRT1_2*pows[j]; if (av_lfg_get(&s->random_state) & 1) aptr[j] *= -1.0; } else { v = get_bits(&s->gb, bits[j]); aptr[j] = ff_nelly_dequantization_table[(1<<bits[j])-1+v]*pows[j]; } } memset(&aptr[NELLY_FILL_LEN], 0, (NELLY_BUF_LEN - NELLY_FILL_LEN) * sizeof(float)); ff_imdct_calc(&s->imdct_ctx, s->imdct_out, aptr); /* XXX: overlapping and windowing should be part of a more generic imdct function */ overlap_and_window(s, s->state, aptr, s->imdct_out); } } static av_cold int decode_init(AVCodecContext * avctx) { NellyMoserDecodeContext *s = avctx->priv_data; s->avctx = avctx; av_lfg_init(&s->random_state, 0); ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0); dsputil_init(&s->dsp, avctx); if(s->dsp.float_to_int16 == ff_float_to_int16_c) { s->add_bias = 385; s->scale_bias = 1.0/(8*32768); } else { s->add_bias = 0; s->scale_bias = 1.0/(1*8); } /* Generate overlap window */ if (!ff_sine_128[127]) ff_init_ff_sine_windows(7); avctx->sample_fmt = SAMPLE_FMT_S16; avctx->channel_layout = CH_LAYOUT_MONO; return 0; } static int decode_tag(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; NellyMoserDecodeContext *s = avctx->priv_data; int blocks, i; int16_t* samples; *data_size = 0; samples = (int16_t*)data; if (buf_size < avctx->block_align) return buf_size; switch (buf_size) { case 64: // 8000Hz blocks = 1; break; case 128: // 11025Hz blocks = 2; break; case 192: // 16000Hz blocks = 3; break; case 256: // 22050Hz blocks = 4; break; case 512: // 44100Hz blocks = 8; break; default: av_log(avctx, AV_LOG_DEBUG, "Tag size %d.\n", buf_size); return buf_size; } for (i=0 ; i<blocks ; i++) { nelly_decode_block(s, &buf[i*NELLY_BLOCK_LEN], s->float_buf); s->dsp.float_to_int16(&samples[i*NELLY_SAMPLES], s->float_buf, NELLY_SAMPLES); *data_size += NELLY_SAMPLES*sizeof(int16_t); } return buf_size; } static av_cold int decode_end(AVCodecContext * avctx) { NellyMoserDecodeContext *s = avctx->priv_data; ff_mdct_end(&s->imdct_ctx); return 0; } AVCodec nellymoser_decoder = { "nellymoser", AVMEDIA_TYPE_AUDIO, CODEC_ID_NELLYMOSER, sizeof(NellyMoserDecodeContext), decode_init, NULL, decode_end, decode_tag, .long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"), };
123linslouis-android-video-cutter
jni/libavcodec/nellymoserdec.c
C
asf20
6,611
/* * AAC decoder data * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org ) * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com ) * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAC decoder data * @author Oded Shimon ( ods15 ods15 dyndns org ) * @author Maxim Gavrilov ( maxim.gavrilov gmail com ) */ #ifndef AVCODEC_AACDECTAB_H #define AVCODEC_AACDECTAB_H #include "aac.h" #include <stdint.h> /* @name tns_tmp2_map * Tables of the tmp2[] arrays of LPC coefficients used for TNS. * The suffix _M_N[] indicate the values of coef_compress and coef_res * respectively. * @{ */ static const float tns_tmp2_map_1_3[4] = { 0.00000000, -0.43388373, 0.64278758, 0.34202015, }; static const float tns_tmp2_map_0_3[8] = { 0.00000000, -0.43388373, -0.78183150, -0.97492790, 0.98480773, 0.86602539, 0.64278758, 0.34202015, }; static const float tns_tmp2_map_1_4[8] = { 0.00000000, -0.20791170, -0.40673664, -0.58778524, 0.67369562, 0.52643216, 0.36124167, 0.18374951, }; static const float tns_tmp2_map_0_4[16] = { 0.00000000, -0.20791170, -0.40673664, -0.58778524, -0.74314481, -0.86602539, -0.95105654, -0.99452192, 0.99573416, 0.96182561, 0.89516330, 0.79801720, 0.67369562, 0.52643216, 0.36124167, 0.18374951, }; static const float * const tns_tmp2_map[4] = { tns_tmp2_map_0_3, tns_tmp2_map_0_4, tns_tmp2_map_1_3, tns_tmp2_map_1_4 }; // @} static const int8_t tags_per_config[16] = { 0, 1, 1, 2, 3, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0 }; static const uint8_t aac_channel_layout_map[7][5][2] = { { { TYPE_SCE, 0 }, }, { { TYPE_CPE, 0 }, }, { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, }, { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_SCE, 1 }, }, { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_CPE, 1 }, }, { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 1 }, }, { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 2 }, { TYPE_CPE, 1 }, }, }; static const int64_t aac_channel_layout[8] = { CH_LAYOUT_MONO, CH_LAYOUT_STEREO, CH_LAYOUT_SURROUND, CH_LAYOUT_4POINT0, CH_LAYOUT_5POINT0_BACK, CH_LAYOUT_5POINT1_BACK, CH_LAYOUT_7POINT1_WIDE, 0, }; #endif /* AVCODEC_AACDECTAB_H */
123linslouis-android-video-cutter
jni/libavcodec/aacdectab.h
C
asf20
2,992
/* * RealVideo 4 decoder * copyright (c) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * miscellaneous RV30/40 tables */ #ifndef AVCODEC_RV34DATA_H #define AVCODEC_RV34DATA_H #include <stdint.h> /** * number of ones in nibble minus one */ static const uint8_t rv34_count_ones[16] = { 0, 0, 0, 1, 0, 1, 1, 2, 0, 1, 1, 2, 1, 2, 2, 3 }; /** * values used to reconstruct coded block pattern */ static const uint8_t rv34_cbp_code[16] = { 0x00, 0x20, 0x10, 0x30, 0x02, 0x22, 0x12, 0x32, 0x01, 0x21, 0x11, 0x31, 0x03, 0x23, 0x13, 0x33 }; /** * precalculated results of division by three and modulo three for values 0-107 * * A lot of four-tuples in RV40 are represented as c0*27+c1*9+c2*3+c3. * This table allows conversion from a value back to a vector. */ static const uint8_t modulo_three_table[108][4] = { { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { 0, 0, 0, 2 }, { 0, 0, 1, 0 }, { 0, 0, 1, 1 }, { 0, 0, 1, 2 }, { 0, 0, 2, 0 }, { 0, 0, 2, 1 }, { 0, 0, 2, 2 }, { 0, 1, 0, 0 }, { 0, 1, 0, 1 }, { 0, 1, 0, 2 }, { 0, 1, 1, 0 }, { 0, 1, 1, 1 }, { 0, 1, 1, 2 }, { 0, 1, 2, 0 }, { 0, 1, 2, 1 }, { 0, 1, 2, 2 }, { 0, 2, 0, 0 }, { 0, 2, 0, 1 }, { 0, 2, 0, 2 }, { 0, 2, 1, 0 }, { 0, 2, 1, 1 }, { 0, 2, 1, 2 }, { 0, 2, 2, 0 }, { 0, 2, 2, 1 }, { 0, 2, 2, 2 }, { 1, 0, 0, 0 }, { 1, 0, 0, 1 }, { 1, 0, 0, 2 }, { 1, 0, 1, 0 }, { 1, 0, 1, 1 }, { 1, 0, 1, 2 }, { 1, 0, 2, 0 }, { 1, 0, 2, 1 }, { 1, 0, 2, 2 }, { 1, 1, 0, 0 }, { 1, 1, 0, 1 }, { 1, 1, 0, 2 }, { 1, 1, 1, 0 }, { 1, 1, 1, 1 }, { 1, 1, 1, 2 }, { 1, 1, 2, 0 }, { 1, 1, 2, 1 }, { 1, 1, 2, 2 }, { 1, 2, 0, 0 }, { 1, 2, 0, 1 }, { 1, 2, 0, 2 }, { 1, 2, 1, 0 }, { 1, 2, 1, 1 }, { 1, 2, 1, 2 }, { 1, 2, 2, 0 }, { 1, 2, 2, 1 }, { 1, 2, 2, 2 }, { 2, 0, 0, 0 }, { 2, 0, 0, 1 }, { 2, 0, 0, 2 }, { 2, 0, 1, 0 }, { 2, 0, 1, 1 }, { 2, 0, 1, 2 }, { 2, 0, 2, 0 }, { 2, 0, 2, 1 }, { 2, 0, 2, 2 }, { 2, 1, 0, 0 }, { 2, 1, 0, 1 }, { 2, 1, 0, 2 }, { 2, 1, 1, 0 }, { 2, 1, 1, 1 }, { 2, 1, 1, 2 }, { 2, 1, 2, 0 }, { 2, 1, 2, 1 }, { 2, 1, 2, 2 }, { 2, 2, 0, 0 }, { 2, 2, 0, 1 }, { 2, 2, 0, 2 }, { 2, 2, 1, 0 }, { 2, 2, 1, 1 }, { 2, 2, 1, 2 }, { 2, 2, 2, 0 }, { 2, 2, 2, 1 }, { 2, 2, 2, 2 }, { 3, 0, 0, 0 }, { 3, 0, 0, 1 }, { 3, 0, 0, 2 }, { 3, 0, 1, 0 }, { 3, 0, 1, 1 }, { 3, 0, 1, 2 }, { 3, 0, 2, 0 }, { 3, 0, 2, 1 }, { 3, 0, 2, 2 }, { 3, 1, 0, 0 }, { 3, 1, 0, 1 }, { 3, 1, 0, 2 }, { 3, 1, 1, 0 }, { 3, 1, 1, 1 }, { 3, 1, 1, 2 }, { 3, 1, 2, 0 }, { 3, 1, 2, 1 }, { 3, 1, 2, 2 }, { 3, 2, 0, 0 }, { 3, 2, 0, 1 }, { 3, 2, 0, 2 }, { 3, 2, 1, 0 }, { 3, 2, 1, 1 }, { 3, 2, 1, 2 }, { 3, 2, 2, 0 }, { 3, 2, 2, 1 }, { 3, 2, 2, 2 }, }; /** * quantizer values used for AC and DC coefficients in chroma blocks */ static const uint8_t rv34_chroma_quant[2][32] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 23, 24, 24, 25, 25 }, { 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 20, 20, 21, 21, 22, 22, 23, 23 } }; /** * This table is used for dequantizing. */ static const uint16_t rv34_qscale_tab[32] = { 60, 67, 76, 85, 96, 108, 121, 136, 152, 171, 192, 216, 242, 272, 305, 341, 383, 432, 481, 544, 606, 683, 767, 854, 963, 1074, 1212, 1392, 1566, 1708, 1978, 2211 }; /** * 4x4 dezigzag pattern */ static const uint8_t rv34_dezigzag[16] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 25, 18, 11, 19, 26, 27 }; /** * tables used to translate a quantizer value into a VLC set for decoding * The first table is used for intraframes. */ static const uint8_t rv34_quant_to_vlc_set[2][31] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 0 }, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6 }, }; /** * table for obtaining the quantizer difference * @todo Use with modified_quant_tab from h263data.h. */ static const uint8_t rv34_dquant_tab[2][32]={ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 { 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28 },{ 0, 2, 3, 4, 5, 6, 7, 8, 9,10,11,13,14,15,16,17,18,19,20,21,22,24,25,26,27,28,29,30,31,31,31,26 } }; /** * maximum number of macroblocks for each of the possible slice offset sizes * @todo This is the same as ff_mba_max, maybe use it instead. */ static const uint16_t rv34_mb_max_sizes[6] = { 0x2F, 0x62, 0x18B, 0x62F, 0x18BF, 0x23FF }; /** * bits needed to code the slice offset for the given size * @todo This is the same as ff_mba_length, maybe use it instead. */ static const uint8_t rv34_mb_bits_sizes[6] = { 6, 7, 9, 11, 13, 14 }; #endif /* AVCODEC_RV34DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/rv34data.h
C
asf20
5,511
/* * Copyright (C) 2007 Marco Gerards <marco@gnu.org> * Copyright (C) 2009 David Conrad * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Dirac Decoder * @author Marco Gerards <marco@gnu.org> */ #include "dirac.h" #include "avcodec.h" #include "golomb.h" #include "mpeg12data.h" // defaults for source parameters static const dirac_source_params dirac_source_parameters_defaults[] = { { 640, 480, 2, 0, 0, 1, 1, 640, 480, 0, 0, 1, 0 }, { 176, 120, 2, 0, 0, 9, 2, 176, 120, 0, 0, 1, 1 }, { 176, 144, 2, 0, 1, 10, 3, 176, 144, 0, 0, 1, 2 }, { 352, 240, 2, 0, 0, 9, 2, 352, 240, 0, 0, 1, 1 }, { 352, 288, 2, 0, 1, 10, 3, 352, 288, 0, 0, 1, 2 }, { 704, 480, 2, 0, 0, 9, 2, 704, 480, 0, 0, 1, 1 }, { 704, 576, 2, 0, 1, 10, 3, 704, 576, 0, 0, 1, 2 }, { 720, 480, 1, 1, 0, 4, 2, 704, 480, 8, 0, 3, 1 }, { 720, 576, 1, 1, 1, 3, 3, 704, 576, 8, 0, 3, 2 }, { 1280, 720, 1, 0, 1, 7, 1, 1280, 720, 0, 0, 3, 3 }, { 1280, 720, 1, 0, 1, 6, 1, 1280, 720, 0, 0, 3, 3 }, { 1920, 1080, 1, 1, 1, 4, 1, 1920, 1080, 0, 0, 3, 3 }, { 1920, 1080, 1, 1, 1, 3, 1, 1920, 1080, 0, 0, 3, 3 }, { 1920, 1080, 1, 0, 1, 7, 1, 1920, 1080, 0, 0, 3, 3 }, { 1920, 1080, 1, 0, 1, 6, 1, 1920, 1080, 0, 0, 3, 3 }, { 2048, 1080, 0, 0, 1, 2, 1, 2048, 1080, 0, 0, 4, 4 }, { 4096, 2160, 0, 0, 1, 2, 1, 4096, 2160, 0, 0, 4, 4 }, { 3840, 2160, 1, 0, 1, 7, 1, 3840, 2160, 0, 0, 3, 3 }, { 3840, 2160, 1, 0, 1, 6, 1, 3840, 2160, 0, 0, 3, 3 }, { 7680, 4320, 1, 0, 1, 7, 1, 3840, 2160, 0, 0, 3, 3 }, { 7680, 4320, 1, 0, 1, 6, 1, 3840, 2160, 0, 0, 3, 3 }, }; static const AVRational dirac_preset_aspect_ratios[] = { {1, 1}, {10, 11}, {12, 11}, {40, 33}, {16, 11}, {4, 3}, }; static const AVRational dirac_frame_rate[] = { {15000, 1001}, {25, 2}, }; static const struct { uint8_t bitdepth; enum AVColorRange color_range; } pixel_range_presets[] = { {8, AVCOL_RANGE_JPEG}, {8, AVCOL_RANGE_MPEG}, {10, AVCOL_RANGE_MPEG}, {12, AVCOL_RANGE_MPEG}, }; static const enum AVColorPrimaries dirac_primaries[] = { AVCOL_PRI_BT709, AVCOL_PRI_SMPTE170M, AVCOL_PRI_BT470BG, }; static const struct { enum AVColorPrimaries color_primaries; enum AVColorSpace colorspace; enum AVColorTransferCharacteristic color_trc; } dirac_color_presets[] = { { AVCOL_PRI_BT709, AVCOL_SPC_BT709, AVCOL_TRC_BT709 }, { AVCOL_PRI_SMPTE170M, AVCOL_SPC_BT470BG, AVCOL_TRC_BT709 }, { AVCOL_PRI_BT470BG, AVCOL_SPC_BT470BG, AVCOL_TRC_BT709 }, { AVCOL_PRI_BT709, AVCOL_SPC_BT709, AVCOL_TRC_BT709 }, { AVCOL_PRI_BT709, AVCOL_SPC_BT709, AVCOL_TRC_UNSPECIFIED /* DCinema */ }, }; static const enum PixelFormat dirac_pix_fmt[2][3] = { { PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV420P }, { PIX_FMT_YUVJ444P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ420P }, }; static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb, dirac_source_params *source) { AVRational frame_rate = (AVRational){0,0}; unsigned luma_depth = 8, luma_offset = 16; int idx; if (get_bits1(gb)) { source->width = svq3_get_ue_golomb(gb); source->height = svq3_get_ue_golomb(gb); } // chroma subsampling if (get_bits1(gb)) source->chroma_format = svq3_get_ue_golomb(gb); if (source->chroma_format > 2) { av_log(avctx, AV_LOG_ERROR, "Unknown chroma format %d\n", source->chroma_format); return -1; } if (get_bits1(gb)) source->interlaced = svq3_get_ue_golomb(gb); if (source->interlaced > 1) return -1; // frame rate if (get_bits1(gb)) { source->frame_rate_index = svq3_get_ue_golomb(gb); if (source->frame_rate_index > 10) return -1; if (!source->frame_rate_index) { frame_rate.num = svq3_get_ue_golomb(gb); frame_rate.den = svq3_get_ue_golomb(gb); } } if (source->frame_rate_index > 0) { if (source->frame_rate_index <= 8) frame_rate = ff_frame_rate_tab[source->frame_rate_index]; else frame_rate = dirac_frame_rate[source->frame_rate_index-9]; } av_reduce(&avctx->time_base.num, &avctx->time_base.den, frame_rate.den, frame_rate.num, 1<<30); // aspect ratio if (get_bits1(gb)) { source->aspect_ratio_index = svq3_get_ue_golomb(gb); if (source->aspect_ratio_index > 6) return -1; if (!source->aspect_ratio_index) { avctx->sample_aspect_ratio.num = svq3_get_ue_golomb(gb); avctx->sample_aspect_ratio.den = svq3_get_ue_golomb(gb); } } if (source->aspect_ratio_index > 0) avctx->sample_aspect_ratio = dirac_preset_aspect_ratios[source->aspect_ratio_index-1]; if (get_bits1(gb)) { source->clean_width = svq3_get_ue_golomb(gb); source->clean_height = svq3_get_ue_golomb(gb); source->clean_left_offset = svq3_get_ue_golomb(gb); source->clean_right_offset = svq3_get_ue_golomb(gb); } // Override signal range. if (get_bits1(gb)) { source->pixel_range_index = svq3_get_ue_golomb(gb); if (source->pixel_range_index > 4) return -1; // This assumes either fullrange or MPEG levels only if (!source->pixel_range_index) { luma_offset = svq3_get_ue_golomb(gb); luma_depth = av_log2(svq3_get_ue_golomb(gb))+1; svq3_get_ue_golomb(gb); // chroma offset svq3_get_ue_golomb(gb); // chroma excursion avctx->color_range = luma_offset ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; } } if (source->pixel_range_index > 0) { idx = source->pixel_range_index-1; luma_depth = pixel_range_presets[idx].bitdepth; avctx->color_range = pixel_range_presets[idx].color_range; } if (luma_depth > 8) av_log(avctx, AV_LOG_WARNING, "Bitdepth greater than 8"); avctx->pix_fmt = dirac_pix_fmt[!luma_offset][source->chroma_format]; // color spec if (get_bits1(gb)) { idx = source->color_spec_index = svq3_get_ue_golomb(gb); if (source->color_spec_index > 4) return -1; avctx->color_primaries = dirac_color_presets[idx].color_primaries; avctx->colorspace = dirac_color_presets[idx].colorspace; avctx->color_trc = dirac_color_presets[idx].color_trc; if (!source->color_spec_index) { if (get_bits1(gb)) { idx = svq3_get_ue_golomb(gb); if (idx < 3) avctx->color_primaries = dirac_primaries[idx]; } if (get_bits1(gb)) { idx = svq3_get_ue_golomb(gb); if (!idx) avctx->colorspace = AVCOL_SPC_BT709; else if (idx == 1) avctx->colorspace = AVCOL_SPC_BT470BG; } if (get_bits1(gb) && !svq3_get_ue_golomb(gb)) avctx->color_trc = AVCOL_TRC_BT709; } } else { idx = source->color_spec_index; avctx->color_primaries = dirac_color_presets[idx].color_primaries; avctx->colorspace = dirac_color_presets[idx].colorspace; avctx->color_trc = dirac_color_presets[idx].color_trc; } return 0; } int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb, dirac_source_params *source) { unsigned version_major, version_minor; unsigned video_format, picture_coding_mode; version_major = svq3_get_ue_golomb(gb); version_minor = svq3_get_ue_golomb(gb); avctx->profile = svq3_get_ue_golomb(gb); avctx->level = svq3_get_ue_golomb(gb); video_format = svq3_get_ue_golomb(gb); if (version_major < 2) av_log(avctx, AV_LOG_WARNING, "Stream is old and may not work\n"); else if (version_major > 2) av_log(avctx, AV_LOG_WARNING, "Stream may have unhandled features\n"); if (video_format > 20) return -1; // Fill in defaults for the source parameters. *source = dirac_source_parameters_defaults[video_format]; // Override the defaults. if (parse_source_parameters(avctx, gb, source)) return -1; if (avcodec_check_dimensions(avctx, source->width, source->height)) return -1; avcodec_set_dimensions(avctx, source->width, source->height); // currently only used to signal field coding picture_coding_mode = svq3_get_ue_golomb(gb); if (picture_coding_mode != 0) { av_log(avctx, AV_LOG_ERROR, "Unsupported picture coding mode %d", picture_coding_mode); return -1; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/dirac.c
C
asf20
9,656
LIBAVCODEC_$MAJOR { global: *; };
123linslouis-android-video-cutter
jni/libavcodec/libavcodec.v
Verilog
asf20
42
/* * Interface to libmp3lame for mp3 encoding * Copyright (c) 2002 Lennert Buytenhek <buytenh@gnu.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Interface to libmp3lame for mp3 encoding. */ #include "avcodec.h" #include "mpegaudio.h" #include <lame/lame.h> #define BUFFER_SIZE (7200 + 2*MPA_FRAME_SIZE + MPA_FRAME_SIZE/4) typedef struct Mp3AudioContext { lame_global_flags *gfp; int stereo; uint8_t buffer[BUFFER_SIZE]; int buffer_index; } Mp3AudioContext; static av_cold int MP3lame_encode_init(AVCodecContext *avctx) { Mp3AudioContext *s = avctx->priv_data; if (avctx->channels > 2) return -1; s->stereo = avctx->channels > 1 ? 1 : 0; if ((s->gfp = lame_init()) == NULL) goto err; lame_set_in_samplerate(s->gfp, avctx->sample_rate); lame_set_out_samplerate(s->gfp, avctx->sample_rate); lame_set_num_channels(s->gfp, avctx->channels); if(avctx->compression_level == FF_COMPRESSION_DEFAULT) { lame_set_quality(s->gfp, 5); } else { lame_set_quality(s->gfp, avctx->compression_level); } /* lame 3.91 doesn't work in mono */ lame_set_mode(s->gfp, JOINT_STEREO); lame_set_brate(s->gfp, avctx->bit_rate/1000); if(avctx->flags & CODEC_FLAG_QSCALE) { lame_set_brate(s->gfp, 0); lame_set_VBR(s->gfp, vbr_default); lame_set_VBR_q(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA); } lame_set_bWriteVbrTag(s->gfp,0); lame_set_disable_reservoir(s->gfp, avctx->flags2 & CODEC_FLAG2_BIT_RESERVOIR ? 0 : 1); if (lame_init_params(s->gfp) < 0) goto err_close; avctx->frame_size = lame_get_framesize(s->gfp); avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; err_close: lame_close(s->gfp); err: return -1; } static const int sSampleRates[] = { 44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000, 0 }; static const int sBitRates[2][3][15] = { { { 0, 32, 64, 96,128,160,192,224,256,288,320,352,384,416,448}, { 0, 32, 48, 56, 64, 80, 96,112,128,160,192,224,256,320,384}, { 0, 32, 40, 48, 56, 64, 80, 96,112,128,160,192,224,256,320} }, { { 0, 32, 48, 56, 64, 80, 96,112,128,144,160,176,192,224,256}, { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160}, { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160} }, }; static const int sSamplesPerFrame[2][3] = { { 384, 1152, 1152 }, { 384, 1152, 576 } }; static const int sBitsPerSlot[3] = { 32, 8, 8 }; static int mp3len(void *data, int *samplesPerFrame, int *sampleRate) { uint32_t header = AV_RB32(data); int layerID = 3 - ((header >> 17) & 0x03); int bitRateID = ((header >> 12) & 0x0f); int sampleRateID = ((header >> 10) & 0x03); int bitsPerSlot = sBitsPerSlot[layerID]; int isPadded = ((header >> 9) & 0x01); static int const mode_tab[4]= {2,3,1,0}; int mode= mode_tab[(header >> 19) & 0x03]; int mpeg_id= mode>0; int temp0, temp1, bitRate; if ( (( header >> 21 ) & 0x7ff) != 0x7ff || mode == 3 || layerID==3 || sampleRateID==3) { return -1; } if(!samplesPerFrame) samplesPerFrame= &temp0; if(!sampleRate ) sampleRate = &temp1; // *isMono = ((header >> 6) & 0x03) == 0x03; *sampleRate = sSampleRates[sampleRateID]>>mode; bitRate = sBitRates[mpeg_id][layerID][bitRateID] * 1000; *samplesPerFrame = sSamplesPerFrame[mpeg_id][layerID]; //av_log(NULL, AV_LOG_DEBUG, "sr:%d br:%d spf:%d l:%d m:%d\n", *sampleRate, bitRate, *samplesPerFrame, layerID, mode); return *samplesPerFrame * bitRate / (bitsPerSlot * *sampleRate) + isPadded; } static int MP3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { Mp3AudioContext *s = avctx->priv_data; int len; int lame_result; /* lame 3.91 dies on '1-channel interleaved' data */ if(data){ if (s->stereo) { lame_result = lame_encode_buffer_interleaved( s->gfp, data, avctx->frame_size, s->buffer + s->buffer_index, BUFFER_SIZE - s->buffer_index ); } else { lame_result = lame_encode_buffer( s->gfp, data, data, avctx->frame_size, s->buffer + s->buffer_index, BUFFER_SIZE - s->buffer_index ); } }else{ lame_result= lame_encode_flush( s->gfp, s->buffer + s->buffer_index, BUFFER_SIZE - s->buffer_index ); } if(lame_result < 0){ if(lame_result==-1) { /* output buffer too small */ av_log(avctx, AV_LOG_ERROR, "lame: output buffer too small (buffer index: %d, free bytes: %d)\n", s->buffer_index, BUFFER_SIZE - s->buffer_index); } return -1; } s->buffer_index += lame_result; if(s->buffer_index<4) return 0; len= mp3len(s->buffer, NULL, NULL); //av_log(avctx, AV_LOG_DEBUG, "in:%d packet-len:%d index:%d\n", avctx->frame_size, len, s->buffer_index); if(len <= s->buffer_index){ memcpy(frame, s->buffer, len); s->buffer_index -= len; memmove(s->buffer, s->buffer+len, s->buffer_index); //FIXME fix the audio codec API, so we do not need the memcpy() /*for(i=0; i<len; i++){ av_log(avctx, AV_LOG_DEBUG, "%2X ", frame[i]); }*/ return len; }else return 0; } static av_cold int MP3lame_encode_close(AVCodecContext *avctx) { Mp3AudioContext *s = avctx->priv_data; av_freep(&avctx->coded_frame); lame_close(s->gfp); return 0; } AVCodec libmp3lame_encoder = { "libmp3lame", AVMEDIA_TYPE_AUDIO, CODEC_ID_MP3, sizeof(Mp3AudioContext), MP3lame_encode_init, MP3lame_encode_frame, MP3lame_encode_close, .capabilities= CODEC_CAP_DELAY, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, .supported_samplerates= sSampleRates, .long_name= NULL_IF_CONFIG_SMALL("libmp3lame MP3 (MPEG audio layer 3)"), };
123linslouis-android-video-cutter
jni/libavcodec/libmp3lame.c
C
asf20
7,008
/* * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * simple arithmetic expression evaluator */ #ifndef AVCODEC_EVAL_H #define AVCODEC_EVAL_H typedef struct AVExpr AVExpr; /** * Parses and evaluates an expression. * Note, this is significantly slower than ff_eval_expr(). * * @param s expression as a zero terminated string for example "1+2^3+5*5+sin(2/3)" * @param func1 NULL terminated array of function pointers for functions which take 1 argument * @param func2 NULL terminated array of function pointers for functions which take 2 arguments * @param const_name NULL terminated array of zero terminated strings of constant identifers for example {"PI", "E", 0} * @param func1_name NULL terminated array of zero terminated strings of func1 identifers * @param func2_name NULL terminated array of zero terminated strings of func2 identifers * @param error pointer to a char* which is set to an error message if something goes wrong * @param const_value a zero terminated array of values for the identifers from const_name * @param opaque a pointer which will be passed to all functions from func1 and func2 * @return the value of the expression */ double ff_parse_and_eval_expr(const char *s, const double *const_value, const char * const *const_name, double (* const *func1)(void *, double), const char * const *func1_name, double (* const *func2)(void *, double, double), const char * const *func2_name, void *opaque, const char **error); /** * Parses an expression. * * @param s expression as a zero terminated string for example "1+2^3+5*5+sin(2/3)" * @param func1 NULL terminated array of function pointers for functions which take 1 argument * @param func2 NULL terminated array of function pointers for functions which take 2 arguments * @param const_name NULL terminated array of zero terminated strings of constant identifers for example {"PI", "E", 0} * @param func1_name NULL terminated array of zero terminated strings of func1 identifers * @param func2_name NULL terminated array of zero terminated strings of func2 identifers * @param error pointer to a char* which is set to an error message if something goes wrong * @return AVExpr which must be freed with ff_free_expr() by the user when it is not needed anymore * NULL if anything went wrong */ AVExpr *ff_parse_expr(const char *s, const char * const *const_name, double (* const *func1)(void *, double), const char * const *func1_name, double (* const *func2)(void *, double, double), const char * const *func2_name, const char **error); /** * Evaluates a previously parsed expression. * * @param const_value a zero terminated array of values for the identifers from ff_parse const_name * @param opaque a pointer which will be passed to all functions from func1 and func2 * @return the value of the expression */ double ff_eval_expr(AVExpr * e, const double *const_value, void *opaque); /** * Frees a parsed expression previously created with ff_parse(). */ void ff_free_expr(AVExpr *e); /** * Parses the string in numstr and returns its value as a double. If * the string is empty, contains only whitespaces, or does not contain * an initial substring that has the expected syntax for a * floating-point number, no conversion is performed. In this case, * returns a value of zero and the value returned in tail is the value * of numstr. * * @param numstr a string representing a number, may contain one of * the International System number postfixes, for example 'K', 'M', * 'G'. If 'i' is appended after the postfix, powers of 2 are used * instead of powers of 10. The 'B' postfix multiplies the value for * 8, and can be appended after another postfix or used alone. This * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. * @param tail if non-NULL puts here the pointer to the char next * after the last parsed character */ double av_strtod(const char *numstr, char **tail); #endif /* AVCODEC_EVAL_H */
123linslouis-android-video-cutter
jni/libavcodec/eval.h
C
asf20
4,838
/* * Header file for hardcoded mpegaudiodec tables * * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef MPEGAUDIO_TABLEGEN_H #define MPEGAUDIO_TABLEGEN_H #include <stdint.h> // do not use libavutil/mathematics.h since this is compiled both // for the host and the target and config.h is only valid for the target #include <math.h> #define TABLE_4_3_SIZE (8191 + 16)*4 #if CONFIG_HARDCODED_TABLES #define mpegaudio_tableinit() #include "libavcodec/mpegaudio_tables.h" #else static int8_t table_4_3_exp[TABLE_4_3_SIZE]; static uint32_t table_4_3_value[TABLE_4_3_SIZE]; static uint32_t exp_table[512]; static uint32_t expval_table[512][16]; static void mpegaudio_tableinit(void) { int i, value, exponent; for (i = 1; i < TABLE_4_3_SIZE; i++) { double value = i / 4; double f, fm; int e, m; f = value * cbrtf(value) * pow(2, (i & 3) * 0.25); fm = frexp(f, &e); m = (uint32_t)(fm * (1LL << 31) + 0.5); e += FRAC_BITS - 31 + 5 - 100; /* normalized to FRAC_BITS */ table_4_3_value[i] = m; table_4_3_exp[i] = -e; } for (exponent = 0; exponent < 512; exponent++) { for (value = 0; value < 16; value++) { double f = (double)value * cbrtf(value) * pow(2, (exponent - 400) * 0.25 + FRAC_BITS + 5); expval_table[exponent][value] = llrint(f); } exp_table[exponent] = expval_table[exponent][1]; } } #endif /* CONFIG_HARDCODED_TABLES */ #endif /* MPEGAUDIO_TABLEGEN_H */
123linslouis-android-video-cutter
jni/libavcodec/mpegaudio_tablegen.h
C
asf20
2,305
/* * Flash Screen Video encoder * Copyright (C) 2004 Alex Beregszaszi * Copyright (C) 2006 Benjamin Larsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* Encoding development sponsored by http://fh-campuswien.ac.at */ /** * @file * Flash Screen Video encoder * @author Alex Beregszaszi * @author Benjamin Larsson */ /* Bitstream description * The picture is divided into blocks that are zlib-compressed. * * The decoder is fed complete frames, the frameheader contains: * 4bits of block width * 12bits of frame width * 4bits of block height * 12bits of frame height * * Directly after the header are the compressed blocks. The blocks * have their compressed size represented with 16bits in the beginig. * If the size = 0 then the block is unchanged from the previous frame. * All blocks are decompressed until the buffer is consumed. * * Encoding ideas, a basic encoder would just use a fixed block size. * Block sizes can be multipels of 16, from 16 to 256. The blocks don't * have to be quadratic. A brute force search with a set of different * block sizes should give a better result than to just use a fixed size. */ /* TODO: * Don't reencode the frame in brute force mode if the frame is a dupe. Speed up. * Make the difference check faster. */ #include <stdio.h> #include <stdlib.h> #include <zlib.h> #include "avcodec.h" #include "put_bits.h" #include "bytestream.h" typedef struct FlashSVContext { AVCodecContext *avctx; uint8_t *previous_frame; AVFrame frame; int image_width, image_height; int block_width, block_height; uint8_t* tmpblock; uint8_t* encbuffer; int block_size; z_stream zstream; int last_key_frame; } FlashSVContext; static int copy_region_enc(uint8_t *sptr, uint8_t *dptr, int dx, int dy, int h, int w, int stride, uint8_t *pfptr) { int i,j; uint8_t *nsptr; uint8_t *npfptr; int diff = 0; for (i = dx+h; i > dx; i--) { nsptr = sptr+(i*stride)+dy*3; npfptr = pfptr+(i*stride)+dy*3; for (j=0 ; j<w*3 ; j++) { diff |=npfptr[j]^nsptr[j]; dptr[j] = nsptr[j]; } dptr += w*3; } if (diff) return 1; return 0; } static av_cold int flashsv_encode_init(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; s->avctx = avctx; if ((avctx->width > 4095) || (avctx->height > 4095)) { av_log(avctx, AV_LOG_ERROR, "Input dimensions too large, input must be max 4096x4096 !\n"); return -1; } // Needed if zlib unused or init aborted before deflateInit memset(&(s->zstream), 0, sizeof(z_stream)); s->last_key_frame=0; s->image_width = avctx->width; s->image_height = avctx->height; s->tmpblock = av_mallocz(3*256*256); s->encbuffer = av_mallocz(s->image_width*s->image_height*3); if (!s->tmpblock || !s->encbuffer) { av_log(avctx, AV_LOG_ERROR, "Memory allocation failed.\n"); return -1; } return 0; } static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf, int buf_size, int block_width, int block_height, uint8_t *previous_frame, int* I_frame) { PutBitContext pb; int h_blocks, v_blocks, h_part, v_part, i, j; int buf_pos, res; int pred_blocks = 0; init_put_bits(&pb, buf, buf_size*8); put_bits(&pb, 4, (block_width/16)-1); put_bits(&pb, 12, s->image_width); put_bits(&pb, 4, (block_height/16)-1); put_bits(&pb, 12, s->image_height); flush_put_bits(&pb); buf_pos=4; h_blocks = s->image_width / block_width; h_part = s->image_width % block_width; v_blocks = s->image_height / block_height; v_part = s->image_height % block_height; /* loop over all block columns */ for (j = 0; j < v_blocks + (v_part?1:0); j++) { int hp = j*block_height; // horiz position in frame int hs = (j<v_blocks)?block_height:v_part; // size of block /* loop over all block rows */ for (i = 0; i < h_blocks + (h_part?1:0); i++) { int wp = i*block_width; // vert position in frame int ws = (i<h_blocks)?block_width:h_part; // size of block int ret=Z_OK; uint8_t *ptr; ptr = buf+buf_pos; //copy the block to the temp buffer before compression (if it differs from the previous frame's block) res = copy_region_enc(p->data[0], s->tmpblock, s->image_height-(hp+hs+1), wp, hs, ws, p->linesize[0], previous_frame); if (res || *I_frame) { unsigned long zsize; zsize = 3*block_width*block_height; ret = compress2(ptr+2, &zsize, s->tmpblock, 3*ws*hs, 9); //ret = deflateReset(&(s->zstream)); if (ret != Z_OK) av_log(s->avctx, AV_LOG_ERROR, "error while compressing block %dx%d\n", i, j); bytestream_put_be16(&ptr,(unsigned int)zsize); buf_pos += zsize+2; //av_log(avctx, AV_LOG_ERROR, "buf_pos = %d\n", buf_pos); } else { pred_blocks++; bytestream_put_be16(&ptr,0); buf_pos += 2; } } } if (pred_blocks) *I_frame = 0; else *I_frame = 1; return buf_pos; } static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data) { FlashSVContext * const s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p = &s->frame; uint8_t *pfptr; int res; int I_frame = 0; int opt_w, opt_h; *p = *pict; /* First frame needs to be a keyframe */ if (avctx->frame_number == 0) { s->previous_frame = av_mallocz(FFABS(p->linesize[0])*s->image_height); if (!s->previous_frame) { av_log(avctx, AV_LOG_ERROR, "Memory allocation failed.\n"); return -1; } I_frame = 1; } if (p->linesize[0] < 0) pfptr = s->previous_frame - ((s->image_height-1) * p->linesize[0]); else pfptr = s->previous_frame; /* Check the placement of keyframes */ if (avctx->gop_size > 0) { if (avctx->frame_number >= s->last_key_frame + avctx->gop_size) { I_frame = 1; } } opt_w=4; opt_h=4; if (buf_size < s->image_width*s->image_height*3) { //Conservative upper bound check for compressed data av_log(avctx, AV_LOG_ERROR, "buf_size %d < %d\n", buf_size, s->image_width*s->image_height*3); return -1; } res = encode_bitstream(s, p, buf, buf_size, opt_w*16, opt_h*16, pfptr, &I_frame); //save the current frame if(p->linesize[0] > 0) memcpy(s->previous_frame, p->data[0], s->image_height*p->linesize[0]); else memcpy(s->previous_frame, p->data[0] + p->linesize[0] * (s->image_height-1), s->image_height*FFABS(p->linesize[0])); //mark the frame type so the muxer can mux it correctly if (I_frame) { p->pict_type = FF_I_TYPE; p->key_frame = 1; s->last_key_frame = avctx->frame_number; av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d\n",avctx->frame_number); } else { p->pict_type = FF_P_TYPE; p->key_frame = 0; } avctx->coded_frame = p; return res; } static av_cold int flashsv_encode_end(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; deflateEnd(&(s->zstream)); av_free(s->encbuffer); av_free(s->previous_frame); av_free(s->tmpblock); return 0; } AVCodec flashsv_encoder = { "flashsv", AVMEDIA_TYPE_VIDEO, CODEC_ID_FLASHSV, sizeof(FlashSVContext), flashsv_encode_init, flashsv_encode_frame, flashsv_encode_end, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"), };
123linslouis-android-video-cutter
jni/libavcodec/flashsvenc.c
C
asf20
8,630
/* * FLAC common code * Copyright (c) 2009 Justin Ruggles * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "flac.h" int ff_flac_get_max_frame_size(int blocksize, int ch, int bps) { /* Technically, there is no limit to FLAC frame size, but an encoder should not write a frame that is larger than if verbatim encoding mode were to be used. */ int count; count = 16; /* frame header */ count += ch * ((7+bps+7)/8); /* subframe headers */ if (ch == 2) { /* for stereo, need to account for using decorrelation */ count += (( 2*bps+1) * blocksize + 7) / 8; } else { count += ( ch*bps * blocksize + 7) / 8; } count += 2; /* frame footer */ return count; }
123linslouis-android-video-cutter
jni/libavcodec/flac.c
C
asf20
1,473
/* * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ //#define DEBUG // Ported by Vlad Stelmahovsky #include "avcodec.h" #define INCL_DOS #define INCL_DOSERRORS #define INCL_DOSDEVIOCTL #include <os2.h> typedef struct ThreadContext{ AVCodecContext *avctx; int thread; HEV work_sem; HEV done_sem; int (*func)(AVCodecContext *c, void *arg); void *arg; int ret; }ThreadContext; static void attribute_align_arg thread_func(void *v){ ThreadContext *c= v; for(;;){ //printf("thread_func %X enter wait\n", (int)v); fflush(stdout); DosWaitEventSem(c->work_sem, SEM_INDEFINITE_WAIT); // WaitForSingleObject(c->work_sem, INFINITE); //printf("thread_func %X after wait (func=%X)\n", (int)v, (int)c->func); fflush(stdout); if(c->func) c->ret= c->func(c->avctx, c->arg); else return; //printf("thread_func %X signal complete\n", (int)v); fflush(stdout); DosPostEventSem(c->done_sem); // ReleaseSemaphore(c->done_sem, 1, 0); } return; } /** * free what has been allocated by avcodec_thread_init(). * must be called after decoding has finished, especially do not call while avcodec_thread_execute() is running */ void avcodec_thread_free(AVCodecContext *s){ ThreadContext *c= s->thread_opaque; int i; for(i=0; i<s->thread_count; i++){ c[i].func= NULL; DosPostEventSem(c[i].work_sem); // ReleaseSemaphore(c[i].work_sem, 1, 0); DosWaitThread((PTID)&c[i].thread,DCWW_WAIT); // WaitForSingleObject(c[i].thread, INFINITE); if(c[i].work_sem) DosCloseEventSem(c[i].work_sem);//CloseHandle(c[i].work_sem); if(c[i].done_sem) DosCloseEventSem(c[i].done_sem);//CloseHandle(c[i].done_sem); } av_freep(&s->thread_opaque); } static int avcodec_thread_execute(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){ ThreadContext *c= s->thread_opaque; int i; assert(s == c->avctx); assert(count <= s->thread_count); /* note, we can be certain that this is not called with the same AVCodecContext by different threads at the same time */ for(i=0; i<count; i++){ c[i].arg= (char*)arg + i*size; c[i].func= func; c[i].ret= 12345; DosPostEventSem(c[i].work_sem); // ReleaseSemaphore(c[i].work_sem, 1, 0); } for(i=0; i<count; i++){ DosWaitEventSem(c[i].done_sem,SEM_INDEFINITE_WAIT); // WaitForSingleObject(c[i].done_sem, INFINITE); c[i].func= NULL; if(ret) ret[i]= c[i].ret; } return 0; } int avcodec_thread_init(AVCodecContext *s, int thread_count){ int i; ThreadContext *c; uint32_t threadid; s->thread_count= thread_count; if (thread_count <= 1) return 0; assert(!s->thread_opaque); c= av_mallocz(sizeof(ThreadContext)*thread_count); s->thread_opaque= c; for(i=0; i<thread_count; i++){ //printf("init semaphors %d\n", i); fflush(stdout); c[i].avctx= s; if (DosCreateEventSem(NULL,&c[i].work_sem,DC_SEM_SHARED,0)) goto fail; if (DosCreateEventSem(NULL,&c[i].done_sem,DC_SEM_SHARED,0)) goto fail; //printf("create thread %d\n", i); fflush(stdout); // c[i].thread = (HANDLE)_beginthreadex(NULL, 0, thread_func, &c[i], 0, &threadid ); c[i].thread = _beginthread(thread_func, NULL, 0x10000, &c[i]); if( c[i].thread <= 0 ) goto fail; } //printf("init done\n"); fflush(stdout); s->execute= avcodec_thread_execute; return 0; fail: avcodec_thread_free(s); return -1; }
123linslouis-android-video-cutter
jni/libavcodec/os2thread.c
C
asf20
4,440
/* * Electronic Arts TGQ Video Decoder * Copyright (c) 2007-2008 Peter Ross <pross@xvid.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Electronic Arts TGQ Video Decoder * @author Peter Ross <pross@xvid.org> * * Technical details here: * http://wiki.multimedia.cx/index.php?title=Electronic_Arts_TGQ */ #include "avcodec.h" #define ALT_BITSTREAM_READER_LE #include "get_bits.h" #include "bytestream.h" #include "dsputil.h" #include "aandcttab.h" typedef struct TgqContext { AVCodecContext *avctx; DSPContext dsp; AVFrame frame; int width,height; ScanTable scantable; int qtable[64]; DECLARE_ALIGNED(16, DCTELEM, block)[6][64]; } TgqContext; static av_cold int tgq_decode_init(AVCodecContext *avctx){ TgqContext *s = avctx->priv_data; s->avctx = avctx; if(avctx->idct_algo==FF_IDCT_AUTO) avctx->idct_algo=FF_IDCT_EA; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); avctx->time_base = (AVRational){1, 15}; avctx->pix_fmt = PIX_FMT_YUV420P; return 0; } static void tgq_decode_block(TgqContext *s, DCTELEM block[64], GetBitContext *gb){ uint8_t *perm = s->scantable.permutated; int i,j,value; block[0] = get_sbits(gb,8) * s->qtable[0]; for(i=1; i<64; ) { switch(show_bits(gb,3)) { case 4: block[perm[i++]] = 0; case 0: block[perm[i++]] = 0; skip_bits(gb,3); break; case 5: case 1: skip_bits(gb,2); value = get_bits(gb,6); for(j=0; j<value; j++) block[perm[i++]] = 0; break; case 6: skip_bits(gb,3); block[perm[i]] = -s->qtable[perm[i]]; i++; break; case 2: skip_bits(gb,3); block[perm[i]] = s->qtable[perm[i]]; i++; break; case 7: // 111b case 3: // 011b skip_bits(gb,2); if (show_bits(gb,6)==0x3F) { skip_bits(gb, 6); block[perm[i]] = get_sbits(gb,8)*s->qtable[perm[i]]; }else{ block[perm[i]] = get_sbits(gb,6)*s->qtable[perm[i]]; } i++; break; } } block[0] += 128<<4; } static void tgq_idct_put_mb(TgqContext *s, DCTELEM (*block)[64], int mb_x, int mb_y){ int linesize= s->frame.linesize[0]; uint8_t *dest_y = s->frame.data[0] + (mb_y * 16* linesize ) + mb_x * 16; uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8; uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8; s->dsp.idct_put(dest_y , linesize, block[0]); s->dsp.idct_put(dest_y + 8, linesize, block[1]); s->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]); s->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]); if(!(s->avctx->flags&CODEC_FLAG_GRAY)){ s->dsp.idct_put(dest_cb, s->frame.linesize[1], block[4]); s->dsp.idct_put(dest_cr, s->frame.linesize[2], block[5]); } } static inline void tgq_dconly(TgqContext *s, unsigned char *dst, int dst_stride, int dc){ int level = av_clip_uint8((dc*s->qtable[0] + 2056)>>4); int j; for(j=0;j<8;j++) memset(dst+j*dst_stride, level, 8); } static void tgq_idct_put_mb_dconly(TgqContext *s, int mb_x, int mb_y, const int8_t *dc) { int linesize= s->frame.linesize[0]; uint8_t *dest_y = s->frame.data[0] + (mb_y * 16* linesize ) + mb_x * 16; uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8; uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8; tgq_dconly(s,dest_y , linesize, dc[0]); tgq_dconly(s,dest_y + 8, linesize, dc[1]); tgq_dconly(s,dest_y + 8*linesize , linesize, dc[2]); tgq_dconly(s,dest_y + 8*linesize + 8, linesize, dc[3]); if(!(s->avctx->flags&CODEC_FLAG_GRAY)) { tgq_dconly(s,dest_cb, s->frame.linesize[1], dc[4]); tgq_dconly(s,dest_cr, s->frame.linesize[2], dc[5]); } } static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x, const uint8_t **bs, const uint8_t *buf_end){ int mode; int i; int8_t dc[6]; mode = bytestream_get_byte(bs); if (mode>buf_end-*bs) { av_log(s->avctx, AV_LOG_ERROR, "truncated macroblock\n"); return; } if (mode>12) { GetBitContext gb; init_get_bits(&gb, *bs, mode*8); for(i=0; i<6; i++) tgq_decode_block(s, s->block[i], &gb); tgq_idct_put_mb(s, s->block, mb_x, mb_y); }else{ if (mode==3) { memset(dc, (*bs)[0], 4); dc[4] = (*bs)[1]; dc[5] = (*bs)[2]; }else if (mode==6) { memcpy(dc, *bs, 6); }else if (mode==12) { for(i=0; i<6; i++) dc[i] = (*bs)[i*2]; }else{ av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode); } tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc); } *bs += mode; } static void tgq_calculate_qtable(TgqContext *s, int quant){ int i,j; const int a = (14*(100-quant))/100 + 1; const int b = (11*(100-quant))/100 + 4; for(j=0;j<8;j++) for(i=0;i<8;i++) if (s->avctx->idct_algo==FF_IDCT_EA) s->qtable[j*8+i] = ((a*(j+i)/(7+7) + b)*ff_inv_aanscales[j*8+i])>>(14-4); else s->qtable[j*8+i] = (a*(j+i)/(7+7) + b)<<3; } static int tgq_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){ const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_start = buf; const uint8_t *buf_end = buf + buf_size; TgqContext *s = avctx->priv_data; int x,y; int big_endian = AV_RL32(&buf[4]) > 0x000FFFFF; buf += 8; if(8>buf_end-buf) { av_log(avctx, AV_LOG_WARNING, "truncated header\n"); return -1; } s->width = big_endian ? AV_RB16(&buf[0]) : AV_RL16(&buf[0]); s->height = big_endian ? AV_RB16(&buf[2]) : AV_RL16(&buf[2]); if (s->avctx->width!=s->width || s->avctx->height!=s->height) { avcodec_set_dimensions(s->avctx, s->width, s->height); if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); } tgq_calculate_qtable(s, buf[4]); buf += 8; if (!s->frame.data[0]) { s->frame.key_frame = 1; s->frame.pict_type = FF_I_TYPE; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID; if (avctx->get_buffer(avctx, &s->frame)) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } } for (y=0; y<(avctx->height+15)/16; y++) for (x=0; x<(avctx->width+15)/16; x++) tgq_decode_mb(s, y, x, &buf, buf_end); *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf-buf_start; } static av_cold int tgq_decode_end(AVCodecContext *avctx){ TgqContext *s = avctx->priv_data; if (s->frame.data[0]) s->avctx->release_buffer(avctx, &s->frame); return 0; } AVCodec eatgq_decoder = { "eatgq", AVMEDIA_TYPE_VIDEO, CODEC_ID_TGQ, sizeof(TgqContext), tgq_decode_init, NULL, tgq_decode_end, tgq_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"), };
123linslouis-android-video-cutter
jni/libavcodec/eatgq.c
C
asf20
8,287
/* * DSP functions for Indeo Video Interactive codecs (Indeo4 and Indeo5) * * Copyright (c) 2009 Maxim Poliakovski * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * DSP functions (inverse transforms, motion compensations, wavelet recompostion) * for Indeo Video Interactive codecs. */ #ifndef AVCODEC_IVI_DSP_H #define AVCODEC_IVI_DSP_H #include "avcodec.h" #include "ivi_common.h" /** * 5/3 wavelet recomposition filter for Indeo5 * * @param plane [in] pointer to the descriptor of the plane being processed * @param dst [out] pointer to the destination buffer * @param dst_pitch [in] pitch of the destination buffer * @param num_bands [in] number of wavelet bands to be processed */ void ff_ivi_recompose53(const IVIPlaneDesc *plane, uint8_t *dst, const int dst_pitch, const int num_bands); /** * two-dimensional inverse slant 8x8 transform * * @param in [in] pointer to the vector of transform coefficients * @param out [out] pointer to the output buffer (frame) * @param pitch [in] pitch to move to the next y line * @param flags [in] pointer to the array of column flags: * != 0 - non_empty column, 0 - empty one * (this array must be filled by caller) */ void ff_ivi_inverse_slant_8x8(const int32_t *in, int16_t *out, uint32_t pitch, const uint8_t *flags); /** * two-dimensional inverse slant 4x4 transform * * @param in [in] pointer to the vector of transform coefficients * @param out [out] pointer to the output buffer (frame) * @param pitch [in] pitch to move to the next y line * @param flags [in] pointer to the array of column flags: * != 0 - non_empty column, 0 - empty one * (this array must be filled by caller) */ void ff_ivi_inverse_slant_4x4(const int32_t *in, int16_t *out, uint32_t pitch, const uint8_t *flags); /** * DC-only two-dimensional inverse slant transform. * Performing the inverse slant transform in this case is equivalent to * spreading (DC_coeff + 1)/2 over the whole block. * It works much faster than performing the slant transform on a vector of zeroes. * * @param in [in] pointer to the dc coefficient * @param out [out] pointer to the output buffer (frame) * @param pitch [in] pitch to move to the next y line * @param blk_size [in] transform block size */ void ff_ivi_dc_slant_2d(const int32_t *in, int16_t *out, uint32_t pitch, int blk_size); /** * inverse 1D row slant transform * * @param in [in] pointer to the vector of transform coefficients * @param out [out] pointer to the output buffer (frame) * @param pitch [in] pitch to move to the next y line * @param flags [in] pointer to the array of column flags (unused here) */ void ff_ivi_row_slant8(const int32_t *in, int16_t *out, uint32_t pitch, const uint8_t *flags); /** * inverse 1D column slant transform * * @param in [in] pointer to the vector of transform coefficients * @param out [out] pointer to the output buffer (frame) * @param pitch [in] pitch to move to the next y line * @param flags [in] pointer to the array of column flags: * != 0 - non_empty column, 0 - empty one * (this array must be filled by caller) */ void ff_ivi_col_slant8(const int32_t *in, int16_t *out, uint32_t pitch, const uint8_t *flags); /** * DC-only inverse row slant transform */ void ff_ivi_dc_row_slant(const int32_t *in, int16_t *out, uint32_t pitch, int blk_size); /** * DC-only inverse column slant transform */ void ff_ivi_dc_col_slant(const int32_t *in, int16_t *out, uint32_t pitch, int blk_size); /** * Copies the pixels into the frame buffer. */ void ff_ivi_put_pixels_8x8(const int32_t *in, int16_t *out, uint32_t pitch, const uint8_t *flags); /** * Copies the DC coefficient into the first pixel of the block and * zeroes all others. */ void ff_ivi_put_dc_pixel_8x8(const int32_t *in, int16_t *out, uint32_t pitch, int blk_size); /** * 8x8 block motion compensation with adding delta * * @param buf [in,out] pointer to the block in the current frame buffer containing delta * @param ref_buf [in] pointer to the corresponding block in the reference frame * @param pitch [in] pitch for moving to the next y line * @param mc_type [in] interpolation type */ void ff_ivi_mc_8x8_delta(int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); /** * 4x4 block motion compensation with adding delta * * @param buf [in,out] pointer to the block in the current frame buffer containing delta * @param ref_buf [in] pointer to the corresponding block in the reference frame * @param pitch [in] pitch for moving to the next y line * @param mc_type [in] interpolation type */ void ff_ivi_mc_4x4_delta(int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); /** * motion compensation without adding delta * * @param buf [in,out] pointer to the block in the current frame receiving the result * @param ref_buf [in] pointer to the corresponding block in the reference frame * @param pitch [in] pitch for moving to the next y line * @param mc_type [in] interpolation type */ void ff_ivi_mc_8x8_no_delta(int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); /** * 4x4 block motion compensation without adding delta * * @param buf [in,out] pointer to the block in the current frame receiving the result * @param ref_buf [in] pointer to the corresponding block in the reference frame * @param pitch [in] pitch for moving to the next y line * @param mc_type [in] interpolation type */ void ff_ivi_mc_4x4_no_delta(int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); #endif /* AVCODEC_IVI_DSP_H */
123linslouis-android-video-cutter
jni/libavcodec/ivi_dsp.h
C
asf20
6,814
/* * AC-3 tables * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_AC3TAB_H #define AVCODEC_AC3TAB_H #include "libavutil/common.h" extern const uint16_t ff_ac3_frame_size_tab[38][3]; extern const uint8_t ff_ac3_channels_tab[8]; extern const uint16_t ff_ac3_channel_layout_tab[8]; extern const uint8_t ff_ac3_enc_channel_map[8][2][6]; extern const uint8_t ff_ac3_dec_channel_map[8][2][6]; extern const uint16_t ff_ac3_sample_rate_tab[3]; extern const uint16_t ff_ac3_bitrate_tab[19]; extern const int16_t ff_ac3_window[256]; extern const uint8_t ff_ac3_log_add_tab[260]; extern const uint16_t ff_ac3_hearing_threshold_tab[50][3]; extern const uint8_t ff_ac3_bap_tab[64]; extern const uint8_t ff_ac3_slow_decay_tab[4]; extern const uint8_t ff_ac3_fast_decay_tab[4]; extern const uint16_t ff_ac3_slow_gain_tab[4]; extern const uint16_t ff_ac3_db_per_bit_tab[4]; extern const int16_t ff_ac3_floor_tab[8]; extern const uint16_t ff_ac3_fast_gain_tab[8]; extern const uint8_t ff_ac3_critical_band_size_tab[50]; extern const uint16_t ff_eac3_default_chmap[8]; /** Custom channel map locations bitmask * Other channels described in documentation: * Lc/Rc pair, Lrs/Rrs pair, Ts, Lsd/Rsd pair, * Lw/Rw pair, Lvh/Rvh pair, Cvh, Reserved, LFE2 */ enum CustomChannelMapLocation{ AC3_CHMAP_L= 1<<(15-0), AC3_CHMAP_C= 1<<(15-1), AC3_CHMAP_R= 1<<(15-2), AC3_CHMAP_L_SUR= 1<<(15-3), AC3_CHMAP_R_SUR = 1<<(15-4), AC3_CHMAP_C_SUR= 1<<(15-7), AC3_CHMAP_LFE = 1<<(15-15) }; #endif /* AVCODEC_AC3TAB_H */
123linslouis-android-video-cutter
jni/libavcodec/ac3tab.h
C
asf20
2,354
/* * copyright (c) 2002 Leon van Stuivenberg * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_PS2_MMI_H #define AVCODEC_PS2_MMI_H #define align16 __attribute__ ((aligned (16))) /* #define r0 $zero #define r1 $at //assembler! #define r2 $v0 //return #define r3 $v1 //return #define r4 $a0 //arg #define r5 $a1 //arg #define r6 $a2 //arg #define r7 $a3 //arg #define r8 $t0 //temp #define r9 $t1 //temp #define r10 $t2 //temp #define r11 $t3 //temp #define r12 $t4 //temp #define r13 $t5 //temp #define r14 $t6 //temp #define r15 $t7 //temp #define r16 $s0 //saved temp #define r17 $s1 //saved temp #define r18 $s2 //saved temp #define r19 $s3 //saved temp #define r20 $s4 //saved temp #define r21 $s5 //saved temp #define r22 $s6 //saved temp #define r23 $s7 //saved temp #define r24 $t8 //temp #define r25 $t9 //temp #define r26 $k0 //kernel #define r27 $k1 //kernel #define r28 $gp //global ptr #define r29 $sp //stack ptr #define r30 $fp //frame ptr #define r31 $ra //return addr */ #define lq(base, off, reg) \ __asm__ volatile ("lq " #reg ", %0("#base ")" : : "i" (off) ) #define lq2(mem, reg) \ __asm__ volatile ("lq " #reg ", %0" : : "r" (mem)) #define sq(reg, off, base) \ __asm__ volatile ("sq " #reg ", %0("#base ")" : : "i" (off) ) /* #define ld(base, off, reg) \ __asm__ volatile ("ld " #reg ", " #off "("#base ")") */ #define ld3(base, off, reg) \ __asm__ volatile (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off))) #define ldr3(base, off, reg) \ __asm__ volatile (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off))) #define ldl3(base, off, reg) \ __asm__ volatile (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off))) /* #define sd(reg, off, base) \ __asm__ volatile ("sd " #reg ", " #off "("#base ")") */ //seems assembler has bug encoding mnemonic 'sd', so DIY #define sd3(reg, off, base) \ __asm__ volatile (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off))) #define sw(reg, off, base) \ __asm__ volatile ("sw " #reg ", " #off "("#base ")") #define sq2(reg, mem) \ __asm__ volatile ("sq " #reg ", %0" : : "m" (*(mem))) #define pinth(rs, rt, rd) \ __asm__ volatile ("pinth " #rd ", " #rs ", " #rt ) #define phmadh(rs, rt, rd) \ __asm__ volatile ("phmadh " #rd ", " #rs ", " #rt ) #define pcpyud(rs, rt, rd) \ __asm__ volatile ("pcpyud " #rd ", " #rs ", " #rt ) #define pcpyld(rs, rt, rd) \ __asm__ volatile ("pcpyld " #rd ", " #rs ", " #rt ) #define pcpyh(rt, rd) \ __asm__ volatile ("pcpyh " #rd ", " #rt ) #define paddw(rs, rt, rd) \ __asm__ volatile ("paddw " #rd ", " #rs ", " #rt ) #define pextlw(rs, rt, rd) \ __asm__ volatile ("pextlw " #rd ", " #rs ", " #rt ) #define pextuw(rs, rt, rd) \ __asm__ volatile ("pextuw " #rd ", " #rs ", " #rt ) #define pextlh(rs, rt, rd) \ __asm__ volatile ("pextlh " #rd ", " #rs ", " #rt ) #define pextuh(rs, rt, rd) \ __asm__ volatile ("pextuh " #rd ", " #rs ", " #rt ) #define psubw(rs, rt, rd) \ __asm__ volatile ("psubw " #rd ", " #rs ", " #rt ) #define psraw(rt, sa, rd) \ __asm__ volatile ("psraw " #rd ", " #rt ", %0" : : "i"(sa) ) #define ppach(rs, rt, rd) \ __asm__ volatile ("ppach " #rd ", " #rs ", " #rt ) #define ppacb(rs, rt, rd) \ __asm__ volatile ("ppacb " #rd ", " #rs ", " #rt ) #define prevh(rt, rd) \ __asm__ volatile ("prevh " #rd ", " #rt ) #define pmulth(rs, rt, rd) \ __asm__ volatile ("pmulth " #rd ", " #rs ", " #rt ) #define pmaxh(rs, rt, rd) \ __asm__ volatile ("pmaxh " #rd ", " #rs ", " #rt ) #define pminh(rs, rt, rd) \ __asm__ volatile ("pminh " #rd ", " #rs ", " #rt ) #define pinteh(rs, rt, rd) \ __asm__ volatile ("pinteh " #rd ", " #rs ", " #rt ) #define paddh(rs, rt, rd) \ __asm__ volatile ("paddh " #rd ", " #rs ", " #rt ) #define psubh(rs, rt, rd) \ __asm__ volatile ("psubh " #rd ", " #rs ", " #rt ) #define psrah(rt, sa, rd) \ __asm__ volatile ("psrah " #rd ", " #rt ", %0" : : "i"(sa) ) #define pmfhl_uw(rd) \ __asm__ volatile ("pmfhl.uw " #rd) #define pextlb(rs, rt, rd) \ __asm__ volatile ("pextlb " #rd ", " #rs ", " #rt ) #endif /* AVCODEC_PS2_MMI_H */
123linslouis-android-video-cutter
jni/libavcodec/ps2/mmi.h
C
asf20
5,695
/* * Copyright (c) 2000,2001 Fabrice Bellard * * MMI optimization by Leon van Stuivenberg * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/avcodec.h" #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" static void dct_unquantize_h263_mmi(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int level=0, qmul, qadd; int nCoeffs; assert(s->block_last_index[n]>=0); qadd = (qscale - 1) | 1; qmul = qscale << 1; if (s->mb_intra) { if (!s->h263_aic) { if (n < 4) level = block[0] * s->y_dc_scale; else level = block[0] * s->c_dc_scale; }else { qadd = 0; level = block[0]; } nCoeffs= 63; //does not always use zigzag table } else { nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; } __asm__ volatile( "add $14, $0, %3 \n\t" "pcpyld $8, %0, %0 \n\t" "pcpyh $8, $8 \n\t" //r8 = qmul "pcpyld $9, %1, %1 \n\t" "pcpyh $9, $9 \n\t" //r9 = qadd ".p2align 2 \n\t" "1: \n\t" "lq $10, 0($14) \n\t" //r10 = level "addi $14, $14, 16 \n\t" //block+=8 "addi %2, %2, -8 \n\t" "pcgth $11, $0, $10 \n\t" //r11 = level < 0 ? -1 : 0 "pcgth $12, $10, $0 \n\t" //r12 = level > 0 ? -1 : 0 "por $12, $11, $12 \n\t" "pmulth $10, $10, $8 \n\t" "paddh $13, $9, $11 \n\t" "pxor $13, $13, $11 \n\t" //r13 = level < 0 ? -qadd : qadd "pmfhl.uw $11 \n\t" "pinteh $10, $11, $10 \n\t" //r10 = level * qmul "paddh $10, $10, $13 \n\t" "pand $10, $10, $12 \n\t" "sq $10, -16($14) \n\t" "bgez %2, 1b \n\t" :: "r"(qmul), "r" (qadd), "r" (nCoeffs), "r" (block) : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "memory" ); if(s->mb_intra) block[0]= level; } void MPV_common_init_mmi(MpegEncContext *s) { s->dct_unquantize_h263_intra = s->dct_unquantize_h263_inter = dct_unquantize_h263_mmi; }
123linslouis-android-video-cutter
jni/libavcodec/ps2/mpegvideo_mmi.c
C
asf20
2,990
/* * Originally provided by Intel at Application Note AP-922. * * Column code adapted from Peter Gubanov. * Copyright (c) 2000-2001 Peter Gubanov <peter@elecard.net.ru> * http://www.elecard.com/peter/idct.shtml * rounding trick copyright (c) 2000 Michel Lespinasse <walken@zoy.org> * * MMI port and (c) 2002 by Leon van Stuivenberg * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/common.h" #include "libavcodec/dsputil.h" #include "mmi.h" #define BITS_INV_ACC 5 // 4 or 5 for IEEE #define SHIFT_INV_ROW (16 - BITS_INV_ACC) #define SHIFT_INV_COL (1 + BITS_INV_ACC) #define TG1 6518 #define TG2 13573 #define TG3 21895 #define CS4 23170 #define ROUNDER_0 0 #define ROUNDER_1 16 #define TAB_i_04 (32+0) #define TAB_i_17 (32+64) #define TAB_i_26 (32+128) #define TAB_i_35 (32+192) #define TG_1_16 (32+256+0) #define TG_2_16 (32+256+16) #define TG_3_16 (32+256+32) #define COS_4_16 (32+256+48) #define CLIPMAX (32+256+64+0) static short consttable[] align16 = { /* rounder 0*/ // assume SHIFT_INV_ROW == 11 0x3ff, 1, 0x3ff, 1, 0x3ff, 1, 0x3ff, 1, /* rounder 1*/ 0x3ff, 0, 0x3ff, 0, 0x3ff, 0, 0x3ff, 0, /* row 0/4*/ 16384, 21407, -16384, -21407, 22725, 19266, -22725, -12873, 8867, 16384, 8867, 16384, 4520, 12873, -4520, 19266, 16384, -8867, 16384, -8867, 12873, -22725, 19266, -22725, 21407, -16384, -21407, 16384, 19266, 4520, -12873, 4520, /* row 1/7*/ 22725, 29692, -22725, -29692, 31521, 26722, -31521, -17855, 12299, 22725, 12299, 22725, 6270, 17855, -6270, 26722, 22725, -12299, 22725, -12299, 17855, -31521, 26722, -31521, 29692, -22725, -29692, 22725, 26722, 6270, -17855, 6270, /* row 2/6*/ 21407, 27969, -21407, -27969, 29692, 25172, -29692, -16819, 11585, 21407, 11585, 21407, 5906, 16819, -5906, 25172, 21407, -11585, 21407, -11585, 16819, -29692, 25172, -29692, 27969, -21407, -27969, 21407, 25172, 5906, -16819, 5906, /*row 3/5*/ 19266, 25172, -19266, -25172, 26722, 22654, -26722, -15137, 10426, 19266, 10426, 19266, 5315, 15137, -5315, 22654, 19266, -10426, 19266, -10426, 15137, -26722, 22654, -26722, 25172, -19266, -25172, 19266, 22654, 5315, -15137, 5315, /*column constants*/ TG1, TG1, TG1, TG1, TG1, TG1, TG1, TG1, TG2, TG2, TG2, TG2, TG2, TG2, TG2, TG2, TG3, TG3, TG3, TG3, TG3, TG3, TG3, TG3, CS4, CS4, CS4, CS4, CS4, CS4, CS4, CS4, /* clamp */ 255, 255, 255, 255, 255, 255, 255, 255 }; #define DCT_8_INV_ROW1(blk, rowoff, taboff, rnd, outreg) { \ lq(blk, rowoff, $16); /* r16 = x7 x5 x3 x1 x6 x4 x2 x0 */ \ /*slot*/ \ lq($24, 0+taboff, $17); /* r17 = w */ \ /*delay slot $16*/ \ lq($24, 16+taboff, $18);/* r18 = w */ \ prevh($16, $2); /* r2 = x1 x3 x5 x7 x0 x2 x4 x6 */ \ lq($24, 32+taboff, $19);/* r19 = w */ \ phmadh($17, $16, $17); /* r17 = b1"b0'a1"a0' */ \ lq($24, 48+taboff, $20);/* r20 = w */ \ phmadh($18, $2, $18); /* r18 = b1'b0"a1'a0" */ \ phmadh($19, $16, $19); /* r19 = b3"b2'a3"a2' */ \ phmadh($20, $2, $20); /* r20 = b3'b2"a3'a2" */ \ paddw($17, $18, $17); /* r17 = (b1)(b0)(a1)(a0) */ \ paddw($19, $20, $19); /* r19 = (b3)(b2)(a3)(a2) */ \ pcpyld($19, $17, $18); /* r18 = (a3)(a2)(a1)(a0) */ \ pcpyud($17, $19, $20); /* r20 = (b3)(b2)(b1)(b0) */ \ paddw($18, rnd, $18); /* r18 = (a3)(a2)(a1)(a0) */\ paddw($18, $20, $17); /* r17 = ()()()(a0+b0) */ \ psubw($18, $20, $20); /* r20 = ()()()(a0-b0) */ \ psraw($17, SHIFT_INV_ROW, $17); /* r17 = (y3 y2 y1 y0) */ \ psraw($20, SHIFT_INV_ROW, $20); /* r20 = (y4 y5 y6 y7) */ \ ppach($20, $17, outreg);/* out = y4 y5 y6 y7 y3 y2 y1 y0 Note order */ \ \ prevh(outreg, $2); \ pcpyud($2, $2, $2); \ pcpyld($2, outreg, outreg); \ } #define DCT_8_INV_COL8() \ \ lq($24, TG_3_16, $2); /* r2 = tn3 */ \ \ pmulth($11, $2, $17); /* r17 = x3 * tn3 (6420) */ \ psraw($17, 15, $17); \ pmfhl_uw($3); /* r3 = 7531 */ \ psraw($3, 15, $3); \ pinteh($3, $17, $17); /* r17 = x3 * tn3 */ \ psubh($17, $13, $17); /* r17 = tm35 */ \ \ pmulth($13, $2, $18); /* r18 = x5 * tn3 (6420) */ \ psraw($18, 15, $18); \ pmfhl_uw($3); /* r3 = 7531 */ \ psraw($3, 15, $3); \ pinteh($3, $18, $18); /* r18 = x5 * tn3 */ \ paddh($18, $11, $18); /* r18 = tp35 */ \ \ lq($24, TG_1_16, $2); /* r2 = tn1 */ \ \ pmulth($15, $2, $19); /* r19 = x7 * tn1 (6420) */ \ psraw($19, 15, $19); \ pmfhl_uw($3); /* r3 = 7531 */ \ psraw($3, 15, $3); \ pinteh($3, $19, $19); /* r19 = x7 * tn1 */ \ paddh($19, $9, $19); /* r19 = tp17 */ \ \ pmulth($9, $2, $20); /* r20 = x1 * tn1 (6420) */ \ psraw($20, 15, $20); \ pmfhl_uw($3); /* r3 = 7531 */ \ psraw($3, 15, $3); \ pinteh($3, $20, $20); /* r20 = x1 * tn1 */ \ psubh($20, $15, $20); /* r20 = tm17 */ \ \ psubh($19, $18, $3); /* r3 = t1 */ \ paddh($20, $17, $16); /* r16 = t2 */ \ psubh($20, $17, $23); /* r23 = b3 */ \ paddh($19, $18, $20); /* r20 = b0 */ \ \ lq($24, COS_4_16, $2); /* r2 = cs4 */ \ \ paddh($3, $16, $21); /* r21 = t1+t2 */ \ psubh($3, $16, $22); /* r22 = t1-t2 */ \ \ pmulth($21, $2, $21); /* r21 = cs4 * (t1+t2) 6420 */ \ psraw($21, 15, $21); \ pmfhl_uw($3); /* r3 = 7531 */ \ psraw($3, 15, $3); \ pinteh($3, $21, $21); /* r21 = b1 */ \ \ pmulth($22, $2, $22); /* r22 = cs4 * (t1-t2) 6420 */ \ psraw($22, 15, $22); \ pmfhl_uw($3); /* r3 = 7531 */ \ psraw($3, 15, $3); \ pinteh($3, $22, $22); /* r22 = b2 */ \ \ lq($24, TG_2_16, $2); /* r2 = tn2 */ \ \ pmulth($10, $2, $17); /* r17 = x2 * tn2 (6420) */ \ psraw($17, 15, $17); \ pmfhl_uw($3); /* r3 = 7531 */ \ psraw($3, 15, $3); \ pinteh($3, $17, $17); /* r17 = x3 * tn3 */ \ psubh($17, $14, $17); /* r17 = tm26 */ \ \ pmulth($14, $2, $18); /* r18 = x6 * tn2 (6420) */ \ psraw($18, 15, $18); \ pmfhl_uw($3); /* r3 = 7531 */ \ psraw($3, 15, $3); \ pinteh($3, $18, $18); /* r18 = x6 * tn2 */ \ paddh($18, $10, $18); /* r18 = tp26 */ \ \ paddh($8, $12, $2); /* r2 = tp04 */ \ psubh($8, $12, $3); /* r3 = tm04 */ \ \ paddh($2, $18, $16); /* r16 = a0 */ \ psubh($2, $18, $19); /* r19 = a3 */ \ psubh($3, $17, $18); /* r18 = a2 */ \ paddh($3, $17, $17); /* r17 = a1 */ #define DCT_8_INV_COL8_STORE(blk) \ \ paddh($16, $20, $2); /* y0 a0+b0 */ \ psubh($16, $20, $16); /* y7 a0-b0 */ \ psrah($2, SHIFT_INV_COL, $2); \ psrah($16, SHIFT_INV_COL, $16); \ sq($2, 0, blk); \ sq($16, 112, blk); \ \ paddh($17, $21, $3); /* y1 a1+b1 */ \ psubh($17, $21, $17); /* y6 a1-b1 */ \ psrah($3, SHIFT_INV_COL, $3); \ psrah($17, SHIFT_INV_COL, $17); \ sq($3, 16, blk); \ sq($17, 96, blk); \ \ paddh($18, $22, $2); /* y2 a2+b2 */ \ psubh($18, $22, $18); /* y5 a2-b2 */ \ psrah($2, SHIFT_INV_COL, $2); \ psrah($18, SHIFT_INV_COL, $18); \ sq($2, 32, blk); \ sq($18, 80, blk); \ \ paddh($19, $23, $3); /* y3 a3+b3 */ \ psubh($19, $23, $19); /* y4 a3-b3 */ \ psrah($3, SHIFT_INV_COL, $3); \ psrah($19, SHIFT_INV_COL, $19); \ sq($3, 48, blk); \ sq($19, 64, blk); #define DCT_8_INV_COL8_PMS() \ paddh($16, $20, $2); /* y0 a0+b0 */ \ psubh($16, $20, $20); /* y7 a0-b0 */ \ psrah($2, SHIFT_INV_COL, $16); \ psrah($20, SHIFT_INV_COL, $20); \ \ paddh($17, $21, $3); /* y1 a1+b1 */ \ psubh($17, $21, $21); /* y6 a1-b1 */ \ psrah($3, SHIFT_INV_COL, $17); \ psrah($21, SHIFT_INV_COL, $21); \ \ paddh($18, $22, $2); /* y2 a2+b2 */ \ psubh($18, $22, $22); /* y5 a2-b2 */ \ psrah($2, SHIFT_INV_COL, $18); \ psrah($22, SHIFT_INV_COL, $22); \ \ paddh($19, $23, $3); /* y3 a3+b3 */ \ psubh($19, $23, $23); /* y4 a3-b3 */ \ psrah($3, SHIFT_INV_COL, $19); \ psrah($23, SHIFT_INV_COL, $23); #define PUT(rs) \ pminh(rs, $11, $2); \ pmaxh($2, $0, $2); \ ppacb($0, $2, $2); \ sd3(2, 0, 4); \ __asm__ volatile ("add $4, $5, $4"); #define DCT_8_INV_COL8_PUT() \ PUT($16); \ PUT($17); \ PUT($18); \ PUT($19); \ PUT($23); \ PUT($22); \ PUT($21); \ PUT($20); #define ADD(rs) \ ld3(4, 0, 2); \ pextlb($0, $2, $2); \ paddh($2, rs, $2); \ pminh($2, $11, $2); \ pmaxh($2, $0, $2); \ ppacb($0, $2, $2); \ sd3(2, 0, 4); \ __asm__ volatile ("add $4, $5, $4"); /*fixme: schedule*/ #define DCT_8_INV_COL8_ADD() \ ADD($16); \ ADD($17); \ ADD($18); \ ADD($19); \ ADD($23); \ ADD($22); \ ADD($21); \ ADD($20); void ff_mmi_idct(int16_t * block) { /* $4 = block */ __asm__ volatile("la $24, %0"::"m"(consttable[0])); lq($24, ROUNDER_0, $8); lq($24, ROUNDER_1, $7); DCT_8_INV_ROW1($4, 0, TAB_i_04, $8, $8); DCT_8_INV_ROW1($4, 16, TAB_i_17, $7, $9); DCT_8_INV_ROW1($4, 32, TAB_i_26, $7, $10); DCT_8_INV_ROW1($4, 48, TAB_i_35, $7, $11); DCT_8_INV_ROW1($4, 64, TAB_i_04, $7, $12); DCT_8_INV_ROW1($4, 80, TAB_i_35, $7, $13); DCT_8_INV_ROW1($4, 96, TAB_i_26, $7, $14); DCT_8_INV_ROW1($4, 112, TAB_i_17, $7, $15); DCT_8_INV_COL8(); DCT_8_INV_COL8_STORE($4); //let savedtemp regs be saved __asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); } void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block) { /* $4 = dest, $5 = line_size, $6 = block */ __asm__ volatile("la $24, %0"::"m"(consttable[0])); lq($24, ROUNDER_0, $8); lq($24, ROUNDER_1, $7); DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8); DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9); DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10); DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11); DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12); DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13); DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14); DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15); DCT_8_INV_COL8(); lq($24, CLIPMAX, $11); DCT_8_INV_COL8_PMS(); DCT_8_INV_COL8_PUT(); //let savedtemp regs be saved __asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); } void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block) { /* $4 = dest, $5 = line_size, $6 = block */ __asm__ volatile("la $24, %0"::"m"(consttable[0])); lq($24, ROUNDER_0, $8); lq($24, ROUNDER_1, $7); DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8); DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9); DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10); DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11); DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12); DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13); DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14); DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15); DCT_8_INV_COL8(); lq($24, CLIPMAX, $11); DCT_8_INV_COL8_PMS(); DCT_8_INV_COL8_ADD(); //let savedtemp regs be saved __asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); }
123linslouis-android-video-cutter
jni/libavcodec/ps2/idct_mmi.c
C
asf20
13,703
/* * MMI optimized DSP utils * Copyright (c) 2000, 2001 Fabrice Bellard * * MMI optimization by Leon van Stuivenberg * clear_blocks_mmi() by BroadQ * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "mmi.h" void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block); void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block); void ff_mmi_idct(DCTELEM *block); static void clear_blocks_mmi(DCTELEM * blocks) { __asm__ volatile( ".set noreorder \n" "addiu $9, %0, 768 \n" "nop \n" "1: \n" "sq $0, 0(%0) \n" "move $8, %0 \n" "addi %0, %0, 64 \n" "sq $0, 16($8) \n" "slt $10, %0, $9 \n" "sq $0, 32($8) \n" "bnez $10, 1b \n" "sq $0, 48($8) \n" ".set reorder \n" : "+r" (blocks) :: "$8", "$9", "memory" ); } static void get_pixels_mmi(DCTELEM *block, const uint8_t *pixels, int line_size) { __asm__ volatile( ".set push \n\t" ".set mips3 \n\t" "ld $8, 0(%0) \n\t" "add %0, %0, %2 \n\t" "ld $9, 0(%0) \n\t" "add %0, %0, %2 \n\t" "ld $10, 0(%0) \n\t" "pextlb $8, $0, $8 \n\t" "sq $8, 0(%1) \n\t" "add %0, %0, %2 \n\t" "ld $8, 0(%0) \n\t" "pextlb $9, $0, $9 \n\t" "sq $9, 16(%1) \n\t" "add %0, %0, %2 \n\t" "ld $9, 0(%0) \n\t" "pextlb $10, $0, $10 \n\t" "sq $10, 32(%1) \n\t" "add %0, %0, %2 \n\t" "ld $10, 0(%0) \n\t" "pextlb $8, $0, $8 \n\t" "sq $8, 48(%1) \n\t" "add %0, %0, %2 \n\t" "ld $8, 0(%0) \n\t" "pextlb $9, $0, $9 \n\t" "sq $9, 64(%1) \n\t" "add %0, %0, %2 \n\t" "ld $9, 0(%0) \n\t" "pextlb $10, $0, $10 \n\t" "sq $10, 80(%1) \n\t" "pextlb $8, $0, $8 \n\t" "sq $8, 96(%1) \n\t" "pextlb $9, $0, $9 \n\t" "sq $9, 112(%1) \n\t" ".set pop \n\t" : "+r" (pixels) : "r" (block), "r" (line_size) : "$8", "$9", "$10", "memory" ); } static void put_pixels8_mmi(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile( ".set push \n\t" ".set mips3 \n\t" "1: \n\t" "ldr $8, 0(%1) \n\t" "addiu %2, %2, -1 \n\t" "ldl $8, 7(%1) \n\t" "add %1, %1, %3 \n\t" "sd $8, 0(%0) \n\t" "add %0, %0, %3 \n\t" "bgtz %2, 1b \n\t" ".set pop \n\t" : "+r" (block), "+r" (pixels), "+r" (h) : "r" (line_size) : "$8", "memory" ); } static void put_pixels16_mmi(uint8_t *block, const uint8_t *pixels, int line_size, int h) { __asm__ volatile ( ".set push \n\t" ".set mips3 \n\t" "1: \n\t" "ldr $8, 0(%1) \n\t" "add $11, %1, %3 \n\t" "ldl $8, 7(%1) \n\t" "add $10, %0, %3 \n\t" "ldr $9, 8(%1) \n\t" "ldl $9, 15(%1) \n\t" "ldr $12, 0($11) \n\t" "add %1, $11, %3 \n\t" "ldl $12, 7($11) \n\t" "pcpyld $8, $9, $8 \n\t" "sq $8, 0(%0) \n\t" "ldr $13, 8($11) \n\t" "addiu %2, %2, -2 \n\t" "ldl $13, 15($11) \n\t" "add %0, $10, %3 \n\t" "pcpyld $12, $13, $12 \n\t" "sq $12, 0($10) \n\t" "bgtz %2, 1b \n\t" ".set pop \n\t" : "+r" (block), "+r" (pixels), "+r" (h) : "r" (line_size) : "$8", "$9", "$10", "$11", "$12", "$13", "memory" ); } void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx) { const int idct_algo= avctx->idct_algo; c->clear_blocks = clear_blocks_mmi; c->put_pixels_tab[1][0] = put_pixels8_mmi; c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmi; c->put_pixels_tab[0][0] = put_pixels16_mmi; c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmi; c->get_pixels = get_pixels_mmi; if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_PS2){ c->idct_put= ff_mmi_idct_put; c->idct_add= ff_mmi_idct_add; c->idct = ff_mmi_idct; c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; } }
123linslouis-android-video-cutter
jni/libavcodec/ps2/dsputil_mmi.c
C
asf20
5,510
/* * gain code, gain pitch and pitch delay decoding * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "dsputil.h" #include "acelp_pitch_delay.h" #include "celp_math.h" int ff_acelp_decode_8bit_to_1st_delay3(int ac_index) { ac_index += 58; if(ac_index > 254) ac_index = 3 * ac_index - 510; return ac_index; } int ff_acelp_decode_4bit_to_2nd_delay3( int ac_index, int pitch_delay_min) { if(ac_index < 4) return 3 * (ac_index + pitch_delay_min); else if(ac_index < 12) return 3 * pitch_delay_min + ac_index + 6; else return 3 * (ac_index + pitch_delay_min) - 18; } int ff_acelp_decode_5_6_bit_to_2nd_delay3( int ac_index, int pitch_delay_min) { return 3 * pitch_delay_min + ac_index - 2; } int ff_acelp_decode_9bit_to_1st_delay6(int ac_index) { if(ac_index < 463) return ac_index + 105; else return 6 * (ac_index - 368); } int ff_acelp_decode_6bit_to_2nd_delay6( int ac_index, int pitch_delay_min) { return 6 * pitch_delay_min + ac_index - 3; } void ff_acelp_update_past_gain( int16_t* quant_energy, int gain_corr_factor, int log2_ma_pred_order, int erasure) { int i; int avg_gain=quant_energy[(1 << log2_ma_pred_order) - 1]; // (5.10) for(i=(1 << log2_ma_pred_order) - 1; i>0; i--) { avg_gain += quant_energy[i-1]; quant_energy[i] = quant_energy[i-1]; } if(erasure) quant_energy[0] = FFMAX(avg_gain >> log2_ma_pred_order, -10240) - 4096; // -10 and -4 in (5.10) else quant_energy[0] = (6165 * ((ff_log2(gain_corr_factor) >> 2) - (13 << 13))) >> 13; } int16_t ff_acelp_decode_gain_code( DSPContext *dsp, int gain_corr_factor, const int16_t* fc_v, int mr_energy, const int16_t* quant_energy, const int16_t* ma_prediction_coeff, int subframe_size, int ma_pred_order) { int i; mr_energy <<= 10; for(i=0; i<ma_pred_order; i++) mr_energy += quant_energy[i] * ma_prediction_coeff[i]; #ifdef G729_BITEXACT mr_energy += (((-6165LL * ff_log2(dsp->scalarproduct_int16(fc_v, fc_v, subframe_size, 0))) >> 3) & ~0x3ff); mr_energy = (5439 * (mr_energy >> 15)) >> 8; // (0.15) = (0.15) * (7.23) return bidir_sal( ((ff_exp2(mr_energy & 0x7fff) + 16) >> 5) * (gain_corr_factor >> 1), (mr_energy >> 15) - 25 ); #else mr_energy = gain_corr_factor * exp(M_LN10 / (20 << 23) * mr_energy) / sqrt(dsp->scalarproduct_int16(fc_v, fc_v, subframe_size, 0)); return mr_energy >> 12; #endif } float ff_amr_set_fixed_gain(float fixed_gain_factor, float fixed_mean_energy, float *prediction_error, float energy_mean, const float *pred_table) { // Equations 66-69: // ^g_c = ^gamma_gc * 100.05 (predicted dB + mean dB - dB of fixed vector) // Note 10^(0.05 * -10log(average x2)) = 1/sqrt((average x2)). float val = fixed_gain_factor * exp2f(M_LOG2_10 * 0.05 * (ff_dot_productf(pred_table, prediction_error, 4) + energy_mean)) / sqrtf(fixed_mean_energy); // update quantified prediction error energy history memmove(&prediction_error[0], &prediction_error[1], 3 * sizeof(prediction_error[0])); prediction_error[3] = 20.0 * log10f(fixed_gain_factor); return val; } void ff_decode_pitch_lag(int *lag_int, int *lag_frac, int pitch_index, const int prev_lag_int, const int subframe, int third_as_first, int resolution) { /* Note n * 10923 >> 15 is floor(x/3) for 0 <= n <= 32767 */ if (subframe == 0 || (subframe == 2 && third_as_first)) { if (pitch_index < 197) pitch_index += 59; else pitch_index = 3 * pitch_index - 335; } else { if (resolution == 4) { int search_range_min = av_clip(prev_lag_int - 5, PITCH_DELAY_MIN, PITCH_DELAY_MAX - 9); // decoding with 4-bit resolution if (pitch_index < 4) { // integer only precision for [search_range_min, search_range_min+3] pitch_index = 3 * (pitch_index + search_range_min) + 1; } else if (pitch_index < 12) { // 1/3 fractional precision for [search_range_min+3 1/3, search_range_min+5 2/3] pitch_index += 3 * search_range_min + 7; } else { // integer only precision for [search_range_min+6, search_range_min+9] pitch_index = 3 * (pitch_index + search_range_min - 6) + 1; } } else { // decoding with 5 or 6 bit resolution, 1/3 fractional precision pitch_index--; if (resolution == 5) { pitch_index += 3 * av_clip(prev_lag_int - 10, PITCH_DELAY_MIN, PITCH_DELAY_MAX - 19); } else pitch_index += 3 * av_clip(prev_lag_int - 5, PITCH_DELAY_MIN, PITCH_DELAY_MAX - 9); } } *lag_int = pitch_index * 10923 >> 15; *lag_frac = pitch_index - 3 * *lag_int - 1; }
123linslouis-android-video-cutter
jni/libavcodec/acelp_pitch_delay.c
C
asf20
6,091
/* * Quicktime Planar RGB (8BPS) Video Decoder * Copyright (C) 2003 Roberto Togni * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * QT 8BPS Video Decoder by Roberto Togni * For more information about the 8BPS format, visit: * http://www.pcisys.net/~melanson/codecs/ * * Supports: PAL8 (RGB 8bpp, paletted) * : BGR24 (RGB 24bpp) (can also output it as RGB32) * : RGB32 (RGB 32bpp, 4th plane is probably alpha and it's ignored) * */ #include <stdio.h> #include <stdlib.h> #include "libavutil/intreadwrite.h" #include "avcodec.h" static const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE}; /* * Decoder context */ typedef struct EightBpsContext { AVCodecContext *avctx; AVFrame pic; unsigned char planes; unsigned char planemap[4]; } EightBpsContext; /* * * Decode a frame * */ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; EightBpsContext * const c = avctx->priv_data; const unsigned char *encoded = buf; unsigned char *pixptr, *pixptr_end; unsigned int height = avctx->height; // Real image height unsigned int dlen, p, row; const unsigned char *lp, *dp; unsigned char count; unsigned int px_inc; unsigned int planes = c->planes; unsigned char *planemap = c->planemap; if(c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); c->pic.reference = 0; c->pic.buffer_hints = FF_BUFFER_HINTS_VALID; if(avctx->get_buffer(avctx, &c->pic) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } /* Set data pointer after line lengths */ dp = encoded + planes * (height << 1); /* Ignore alpha plane, don't know what to do with it */ if (planes == 4) planes--; px_inc = planes + (avctx->pix_fmt == PIX_FMT_RGB32); for (p = 0; p < planes; p++) { /* Lines length pointer for this plane */ lp = encoded + p * (height << 1); /* Decode a plane */ for(row = 0; row < height; row++) { pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p]; pixptr_end = pixptr + c->pic.linesize[0]; dlen = be2me_16(*(const unsigned short *)(lp+row*2)); /* Decode a row of this plane */ while(dlen > 0) { if(dp + 1 >= buf+buf_size) return -1; if ((count = *dp++) <= 127) { count++; dlen -= count + 1; if (pixptr + count * px_inc > pixptr_end) break; if(dp + count > buf+buf_size) return -1; while(count--) { *pixptr = *dp++; pixptr += px_inc; } } else { count = 257 - count; if (pixptr + count * px_inc > pixptr_end) break; while(count--) { *pixptr = *dp; pixptr += px_inc; } dp++; dlen -= 2; } } } } if (avctx->palctrl) { memcpy (c->pic.data[1], avctx->palctrl->palette, AVPALETTE_SIZE); if (avctx->palctrl->palette_changed) { c->pic.palette_has_changed = 1; avctx->palctrl->palette_changed = 0; } else c->pic.palette_has_changed = 0; } *data_size = sizeof(AVFrame); *(AVFrame*)data = c->pic; /* always report that the buffer was completely consumed */ return buf_size; } /* * * Init 8BPS decoder * */ static av_cold int decode_init(AVCodecContext *avctx) { EightBpsContext * const c = avctx->priv_data; c->avctx = avctx; c->pic.data[0] = NULL; switch (avctx->bits_per_coded_sample) { case 8: avctx->pix_fmt = PIX_FMT_PAL8; c->planes = 1; c->planemap[0] = 0; // 1st plane is palette indexes if (avctx->palctrl == NULL) { av_log(avctx, AV_LOG_ERROR, "Error: PAL8 format but no palette from demuxer.\n"); return -1; } break; case 24: avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24); c->planes = 3; c->planemap[0] = 2; // 1st plane is red c->planemap[1] = 1; // 2nd plane is green c->planemap[2] = 0; // 3rd plane is blue break; case 32: avctx->pix_fmt = PIX_FMT_RGB32; c->planes = 4; #if HAVE_BIGENDIAN c->planemap[0] = 1; // 1st plane is red c->planemap[1] = 2; // 2nd plane is green c->planemap[2] = 3; // 3rd plane is blue c->planemap[3] = 0; // 4th plane is alpha??? #else c->planemap[0] = 2; // 1st plane is red c->planemap[1] = 1; // 2nd plane is green c->planemap[2] = 0; // 3rd plane is blue c->planemap[3] = 3; // 4th plane is alpha??? #endif break; default: av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n", avctx->bits_per_coded_sample); return -1; } return 0; } /* * * Uninit 8BPS decoder * */ static av_cold int decode_end(AVCodecContext *avctx) { EightBpsContext * const c = avctx->priv_data; if (c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); return 0; } AVCodec eightbps_decoder = { "8bps", AVMEDIA_TYPE_VIDEO, CODEC_ID_8BPS, sizeof(EightBpsContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"), };
123linslouis-android-video-cutter
jni/libavcodec/8bps.c
C
asf20
7,822
/* * Windows Media Audio Voice decoder. * Copyright (c) 2009 Ronald S. Bultje * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Windows Media Audio Voice compatible decoder * @author Ronald S. Bultje <rsbultje@gmail.com> */ #include <math.h> #include "avcodec.h" #include "get_bits.h" #include "put_bits.h" #include "wmavoice_data.h" #include "celp_math.h" #include "celp_filters.h" #include "acelp_vectors.h" #include "acelp_filters.h" #include "lsp.h" #include "libavutil/lzo.h" #include "avfft.h" #include "fft.h" #define MAX_BLOCKS 8 ///< maximum number of blocks per frame #define MAX_LSPS 16 ///< maximum filter order #define MAX_LSPS_ALIGN16 16 ///< same as #MAX_LSPS; needs to be multiple ///< of 16 for ASM input buffer alignment #define MAX_FRAMES 3 ///< maximum number of frames per superframe #define MAX_FRAMESIZE 160 ///< maximum number of samples per frame #define MAX_SIGNAL_HISTORY 416 ///< maximum excitation signal history #define MAX_SFRAMESIZE (MAX_FRAMESIZE * MAX_FRAMES) ///< maximum number of samples per superframe #define SFRAME_CACHE_MAXSIZE 256 ///< maximum cache size for frame data that ///< was split over two packets #define VLC_NBITS 6 ///< number of bits to read per VLC iteration /** * Frame type VLC coding. */ static VLC frame_type_vlc; /** * Adaptive codebook types. */ enum { ACB_TYPE_NONE = 0, ///< no adaptive codebook (only hardcoded fixed) ACB_TYPE_ASYMMETRIC = 1, ///< adaptive codebook with per-frame pitch, which ///< we interpolate to get a per-sample pitch. ///< Signal is generated using an asymmetric sinc ///< window function ///< @note see #wmavoice_ipol1_coeffs ACB_TYPE_HAMMING = 2 ///< Per-block pitch with signal generation using ///< a Hamming sinc window function ///< @note see #wmavoice_ipol2_coeffs }; /** * Fixed codebook types. */ enum { FCB_TYPE_SILENCE = 0, ///< comfort noise during silence ///< generated from a hardcoded (fixed) codebook ///< with per-frame (low) gain values FCB_TYPE_HARDCODED = 1, ///< hardcoded (fixed) codebook with per-block ///< gain values FCB_TYPE_AW_PULSES = 2, ///< Pitch-adaptive window (AW) pulse signals, ///< used in particular for low-bitrate streams FCB_TYPE_EXC_PULSES = 3, ///< Innovation (fixed) codebook pulse sets in ///< combinations of either single pulses or ///< pulse pairs }; /** * Description of frame types. */ static const struct frame_type_desc { uint8_t n_blocks; ///< amount of blocks per frame (each block ///< (contains 160/#n_blocks samples) uint8_t log_n_blocks; ///< log2(#n_blocks) uint8_t acb_type; ///< Adaptive codebook type (ACB_TYPE_*) uint8_t fcb_type; ///< Fixed codebook type (FCB_TYPE_*) uint8_t dbl_pulses; ///< how many pulse vectors have pulse pairs ///< (rather than just one single pulse) ///< only if #fcb_type == #FCB_TYPE_EXC_PULSES uint16_t frame_size; ///< the amount of bits that make up the block ///< data (per frame) } frame_descs[17] = { { 1, 0, ACB_TYPE_NONE, FCB_TYPE_SILENCE, 0, 0 }, { 2, 1, ACB_TYPE_NONE, FCB_TYPE_HARDCODED, 0, 28 }, { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_AW_PULSES, 0, 46 }, { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 2, 80 }, { 2, 1, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 5, 104 }, { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 0, 108 }, { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 2, 132 }, { 4, 2, ACB_TYPE_ASYMMETRIC, FCB_TYPE_EXC_PULSES, 5, 168 }, { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 64 }, { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 80 }, { 2, 1, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 104 }, { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 108 }, { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 132 }, { 4, 2, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 168 }, { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 0, 176 }, { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 2, 208 }, { 8, 3, ACB_TYPE_HAMMING, FCB_TYPE_EXC_PULSES, 5, 256 } }; /** * WMA Voice decoding context. */ typedef struct { /** * @defgroup struct_global Global values * Global values, specified in the stream header / extradata or used * all over. * @{ */ GetBitContext gb; ///< packet bitreader. During decoder init, ///< it contains the extradata from the ///< demuxer. During decoding, it contains ///< packet data. int8_t vbm_tree[25]; ///< converts VLC codes to frame type int spillover_bitsize; ///< number of bits used to specify ///< #spillover_nbits in the packet header ///< = ceil(log2(ctx->block_align << 3)) int history_nsamples; ///< number of samples in history for signal ///< prediction (through ACB) /* postfilter specific values */ int do_apf; ///< whether to apply the averaged ///< projection filter (APF) int denoise_strength; ///< strength of denoising in Wiener filter ///< [0-11] int denoise_tilt_corr; ///< Whether to apply tilt correction to the ///< Wiener filter coefficients (postfilter) int dc_level; ///< Predicted amount of DC noise, based ///< on which a DC removal filter is used int lsps; ///< number of LSPs per frame [10 or 16] int lsp_q_mode; ///< defines quantizer defaults [0, 1] int lsp_def_mode; ///< defines different sets of LSP defaults ///< [0, 1] int frame_lsp_bitsize; ///< size (in bits) of LSPs, when encoded ///< per-frame (independent coding) int sframe_lsp_bitsize; ///< size (in bits) of LSPs, when encoded ///< per superframe (residual coding) int min_pitch_val; ///< base value for pitch parsing code int max_pitch_val; ///< max value + 1 for pitch parsing int pitch_nbits; ///< number of bits used to specify the ///< pitch value in the frame header int block_pitch_nbits; ///< number of bits used to specify the ///< first block's pitch value int block_pitch_range; ///< range of the block pitch int block_delta_pitch_nbits; ///< number of bits used to specify the ///< delta pitch between this and the last ///< block's pitch value, used in all but ///< first block int block_delta_pitch_hrange; ///< 1/2 range of the delta (full range is ///< from -this to +this-1) uint16_t block_conv_table[4]; ///< boundaries for block pitch unit/scale ///< conversion /** * @} * @defgroup struct_packet Packet values * Packet values, specified in the packet header or related to a packet. * A packet is considered to be a single unit of data provided to this * decoder by the demuxer. * @{ */ int spillover_nbits; ///< number of bits of the previous packet's ///< last superframe preceeding this ///< packet's first full superframe (useful ///< for re-synchronization also) int has_residual_lsps; ///< if set, superframes contain one set of ///< LSPs that cover all frames, encoded as ///< independent and residual LSPs; if not ///< set, each frame contains its own, fully ///< independent, LSPs int skip_bits_next; ///< number of bits to skip at the next call ///< to #wmavoice_decode_packet() (since ///< they're part of the previous superframe) uint8_t sframe_cache[SFRAME_CACHE_MAXSIZE + FF_INPUT_BUFFER_PADDING_SIZE]; ///< cache for superframe data split over ///< multiple packets int sframe_cache_size; ///< set to >0 if we have data from an ///< (incomplete) superframe from a previous ///< packet that spilled over in the current ///< packet; specifies the amount of bits in ///< #sframe_cache PutBitContext pb; ///< bitstream writer for #sframe_cache /** * @} * @defgroup struct_frame Frame and superframe values * Superframe and frame data - these can change from frame to frame, * although some of them do in that case serve as a cache / history for * the next frame or superframe. * @{ */ double prev_lsps[MAX_LSPS]; ///< LSPs of the last frame of the previous ///< superframe int last_pitch_val; ///< pitch value of the previous frame int last_acb_type; ///< frame type [0-2] of the previous frame int pitch_diff_sh16; ///< ((cur_pitch_val - #last_pitch_val) ///< << 16) / #MAX_FRAMESIZE float silence_gain; ///< set for use in blocks if #ACB_TYPE_NONE int aw_idx_is_ext; ///< whether the AW index was encoded in ///< 8 bits (instead of 6) int aw_pulse_range; ///< the range over which #aw_pulse_set1() ///< can apply the pulse, relative to the ///< value in aw_first_pulse_off. The exact ///< position of the first AW-pulse is within ///< [pulse_off, pulse_off + this], and ///< depends on bitstream values; [16 or 24] int aw_n_pulses[2]; ///< number of AW-pulses in each block; note ///< that this number can be negative (in ///< which case it basically means "zero") int aw_first_pulse_off[2]; ///< index of first sample to which to ///< apply AW-pulses, or -0xff if unset int aw_next_pulse_off_cache; ///< the position (relative to start of the ///< second block) at which pulses should ///< start to be positioned, serves as a ///< cache for pitch-adaptive window pulses ///< between blocks int frame_cntr; ///< current frame index [0 - 0xFFFE]; is ///< only used for comfort noise in #pRNG() float gain_pred_err[6]; ///< cache for gain prediction float excitation_history[MAX_SIGNAL_HISTORY]; ///< cache of the signal of previous ///< superframes, used as a history for ///< signal generation float synth_history[MAX_LSPS]; ///< see #excitation_history /** * @} * @defgroup post_filter Postfilter values * Varibales used for postfilter implementation, mostly history for * smoothing and so on, and context variables for FFT/iFFT. * @{ */ RDFTContext rdft, irdft; ///< contexts for FFT-calculation in the ///< postfilter (for denoise filter) DCTContext dct, dst; ///< contexts for phase shift (in Hilbert ///< transform, part of postfilter) float sin[511], cos[511]; ///< 8-bit cosine/sine windows over [-pi,pi] ///< range float postfilter_agc; ///< gain control memory, used in ///< #adaptive_gain_control() float dcf_mem[2]; ///< DC filter history float zero_exc_pf[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE]; ///< zero filter output (i.e. excitation) ///< by postfilter float denoise_filter_cache[MAX_FRAMESIZE]; int denoise_filter_cache_size; ///< samples in #denoise_filter_cache DECLARE_ALIGNED(16, float, tilted_lpcs_pf)[0x80]; ///< aligned buffer for LPC tilting DECLARE_ALIGNED(16, float, denoise_coeffs_pf)[0x80]; ///< aligned buffer for denoise coefficients DECLARE_ALIGNED(16, float, synth_filter_out_buf)[0x80 + MAX_LSPS_ALIGN16]; ///< aligned buffer for postfilter speech ///< synthesis /** * @} */ } WMAVoiceContext; /** * Sets up the variable bit mode (VBM) tree from container extradata. * @param gb bit I/O context. * The bit context (s->gb) should be loaded with byte 23-46 of the * container extradata (i.e. the ones containing the VBM tree). * @param vbm_tree pointer to array to which the decoded VBM tree will be * written. * @return 0 on success, <0 on error. */ static av_cold int decode_vbmtree(GetBitContext *gb, int8_t vbm_tree[25]) { static const uint8_t bits[] = { 2, 2, 2, 4, 4, 4, 6, 6, 6, 8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14, 14, 14 }; static const uint16_t codes[] = { 0x0000, 0x0001, 0x0002, // 00/01/10 0x000c, 0x000d, 0x000e, // 11+00/01/10 0x003c, 0x003d, 0x003e, // 1111+00/01/10 0x00fc, 0x00fd, 0x00fe, // 111111+00/01/10 0x03fc, 0x03fd, 0x03fe, // 11111111+00/01/10 0x0ffc, 0x0ffd, 0x0ffe, // 1111111111+00/01/10 0x3ffc, 0x3ffd, 0x3ffe, 0x3fff // 111111111111+xx }; int cntr[8], n, res; memset(vbm_tree, 0xff, sizeof(vbm_tree)); memset(cntr, 0, sizeof(cntr)); for (n = 0; n < 17; n++) { res = get_bits(gb, 3); if (cntr[res] > 3) // should be >= 3 + (res == 7)) return -1; vbm_tree[res * 3 + cntr[res]++] = n; } INIT_VLC_STATIC(&frame_type_vlc, VLC_NBITS, sizeof(bits), bits, 1, 1, codes, 2, 2, 132); return 0; } /** * Set up decoder with parameters from demuxer (extradata etc.). */ static av_cold int wmavoice_decode_init(AVCodecContext *ctx) { int n, flags, pitch_range, lsp16_flag; WMAVoiceContext *s = ctx->priv_data; /** * Extradata layout: * - byte 0-18: WMAPro-in-WMAVoice extradata (see wmaprodec.c), * - byte 19-22: flags field (annoyingly in LE; see below for known * values), * - byte 23-46: variable bitmode tree (really just 17 * 3 bits, * rest is 0). */ if (ctx->extradata_size != 46) { av_log(ctx, AV_LOG_ERROR, "Invalid extradata size %d (should be 46)\n", ctx->extradata_size); return -1; } flags = AV_RL32(ctx->extradata + 18); s->spillover_bitsize = 3 + av_ceil_log2(ctx->block_align); s->do_apf = flags & 0x1; if (s->do_apf) { ff_rdft_init(&s->rdft, 7, DFT_R2C); ff_rdft_init(&s->irdft, 7, IDFT_C2R); ff_dct_init(&s->dct, 6, DCT_I); ff_dct_init(&s->dst, 6, DST_I); ff_sine_window_init(s->cos, 256); memcpy(&s->sin[255], s->cos, 256 * sizeof(s->cos[0])); for (n = 0; n < 255; n++) { s->sin[n] = -s->sin[510 - n]; s->cos[510 - n] = s->cos[n]; } } s->denoise_strength = (flags >> 2) & 0xF; if (s->denoise_strength >= 12) { av_log(ctx, AV_LOG_ERROR, "Invalid denoise filter strength %d (max=11)\n", s->denoise_strength); return -1; } s->denoise_tilt_corr = !!(flags & 0x40); s->dc_level = (flags >> 7) & 0xF; s->lsp_q_mode = !!(flags & 0x2000); s->lsp_def_mode = !!(flags & 0x4000); lsp16_flag = flags & 0x1000; if (lsp16_flag) { s->lsps = 16; s->frame_lsp_bitsize = 34; s->sframe_lsp_bitsize = 60; } else { s->lsps = 10; s->frame_lsp_bitsize = 24; s->sframe_lsp_bitsize = 48; } for (n = 0; n < s->lsps; n++) s->prev_lsps[n] = M_PI * (n + 1.0) / (s->lsps + 1.0); init_get_bits(&s->gb, ctx->extradata + 22, (ctx->extradata_size - 22) << 3); if (decode_vbmtree(&s->gb, s->vbm_tree) < 0) { av_log(ctx, AV_LOG_ERROR, "Invalid VBM tree; broken extradata?\n"); return -1; } s->min_pitch_val = ((ctx->sample_rate << 8) / 400 + 50) >> 8; s->max_pitch_val = ((ctx->sample_rate << 8) * 37 / 2000 + 50) >> 8; pitch_range = s->max_pitch_val - s->min_pitch_val; s->pitch_nbits = av_ceil_log2(pitch_range); s->last_pitch_val = 40; s->last_acb_type = ACB_TYPE_NONE; s->history_nsamples = s->max_pitch_val + 8; if (s->min_pitch_val < 1 || s->history_nsamples > MAX_SIGNAL_HISTORY) { int min_sr = ((((1 << 8) - 50) * 400) + 0xFF) >> 8, max_sr = ((((MAX_SIGNAL_HISTORY - 8) << 8) + 205) * 2000 / 37) >> 8; av_log(ctx, AV_LOG_ERROR, "Unsupported samplerate %d (min=%d, max=%d)\n", ctx->sample_rate, min_sr, max_sr); // 322-22097 Hz return -1; } s->block_conv_table[0] = s->min_pitch_val; s->block_conv_table[1] = (pitch_range * 25) >> 6; s->block_conv_table[2] = (pitch_range * 44) >> 6; s->block_conv_table[3] = s->max_pitch_val - 1; s->block_delta_pitch_hrange = (pitch_range >> 3) & ~0xF; s->block_delta_pitch_nbits = 1 + av_ceil_log2(s->block_delta_pitch_hrange); s->block_pitch_range = s->block_conv_table[2] + s->block_conv_table[3] + 1 + 2 * (s->block_conv_table[1] - 2 * s->min_pitch_val); s->block_pitch_nbits = av_ceil_log2(s->block_pitch_range); ctx->sample_fmt = SAMPLE_FMT_FLT; return 0; } /** * @defgroup postfilter Postfilter functions * Postfilter functions (gain control, wiener denoise filter, DC filter, * kalman smoothening, plus surrounding code to wrap it) * @{ */ /** * Adaptive gain control (as used in postfilter). * * Identical to #ff_adaptive_gain_control() in acelp_vectors.c, except * that the energy here is calculated using sum(abs(...)), whereas the * other codecs (e.g. AMR-NB, SIPRO) use sqrt(dotproduct(...)). * * @param out output buffer for filtered samples * @param in input buffer containing the samples as they are after the * postfilter steps so far * @param speech_synth input buffer containing speech synth before postfilter * @param size input buffer size * @param alpha exponential filter factor * @param gain_mem pointer to filter memory (single float) */ static void adaptive_gain_control(float *out, const float *in, const float *speech_synth, int size, float alpha, float *gain_mem) { int i; float speech_energy = 0.0, postfilter_energy = 0.0, gain_scale_factor; float mem = *gain_mem; for (i = 0; i < size; i++) { speech_energy += fabsf(speech_synth[i]); postfilter_energy += fabsf(in[i]); } gain_scale_factor = (1.0 - alpha) * speech_energy / postfilter_energy; for (i = 0; i < size; i++) { mem = alpha * mem + gain_scale_factor; out[i] = in[i] * mem; } *gain_mem = mem; } /** * Kalman smoothing function. * * This function looks back pitch +/- 3 samples back into history to find * the best fitting curve (that one giving the optimal gain of the two * signals, i.e. the highest dot product between the two), and then * uses that signal history to smoothen the output of the speech synthesis * filter. * * @param s WMA Voice decoding context * @param pitch pitch of the speech signal * @param in input speech signal * @param out output pointer for smoothened signal * @param size input/output buffer size * * @returns -1 if no smoothening took place, e.g. because no optimal * fit could be found, or 0 on success. */ static int kalman_smoothen(WMAVoiceContext *s, int pitch, const float *in, float *out, int size) { int n; float optimal_gain = 0, dot; const float *ptr = &in[-FFMAX(s->min_pitch_val, pitch - 3)], *end = &in[-FFMIN(s->max_pitch_val, pitch + 3)], *best_hist_ptr; /* find best fitting point in history */ do { dot = ff_dot_productf(in, ptr, size); if (dot > optimal_gain) { optimal_gain = dot; best_hist_ptr = ptr; } } while (--ptr >= end); if (optimal_gain <= 0) return -1; dot = ff_dot_productf(best_hist_ptr, best_hist_ptr, size); if (dot <= 0) // would be 1.0 return -1; if (optimal_gain <= dot) { dot = dot / (dot + 0.6 * optimal_gain); // 0.625-1.000 } else dot = 0.625; /* actual smoothing */ for (n = 0; n < size; n++) out[n] = best_hist_ptr[n] + dot * (in[n] - best_hist_ptr[n]); return 0; } /** * Get the tilt factor of a formant filter from its transfer function * @see #tilt_factor() in amrnbdec.c, which does essentially the same, * but somehow (??) it does a speech synthesis filter in the * middle, which is missing here * * @param lpcs LPC coefficients * @param n_lpcs Size of LPC buffer * @returns the tilt factor */ static float tilt_factor(const float *lpcs, int n_lpcs) { float rh0, rh1; rh0 = 1.0 + ff_dot_productf(lpcs, lpcs, n_lpcs); rh1 = lpcs[0] + ff_dot_productf(lpcs, &lpcs[1], n_lpcs - 1); return rh1 / rh0; } /** * Derive denoise filter coefficients (in real domain) from the LPCs. */ static void calc_input_response(WMAVoiceContext *s, float *lpcs, int fcb_type, float *coeffs, int remainder) { float last_coeff, min = 15.0, max = -15.0; float irange, angle_mul, gain_mul, range, sq; int n, idx; /* Create frequency power spectrum of speech input (i.e. RDFT of LPCs) */ ff_rdft_calc(&s->rdft, lpcs); #define log_range(var, assign) do { \ float tmp = log10f(assign); var = tmp; \ max = FFMAX(max, tmp); min = FFMIN(min, tmp); \ } while (0) log_range(last_coeff, lpcs[1] * lpcs[1]); for (n = 1; n < 64; n++) log_range(lpcs[n], lpcs[n * 2] * lpcs[n * 2] + lpcs[n * 2 + 1] * lpcs[n * 2 + 1]); log_range(lpcs[0], lpcs[0] * lpcs[0]); #undef log_range range = max - min; lpcs[64] = last_coeff; /* Now, use this spectrum to pick out these frequencies with higher * (relative) power/energy (which we then take to be "not noise"), * and set up a table (still in lpc[]) of (relative) gains per frequency. * These frequencies will be maintained, while others ("noise") will be * decreased in the filter output. */ irange = 64.0 / range; // so irange*(max-value) is in the range [0, 63] gain_mul = range * (fcb_type == FCB_TYPE_HARDCODED ? (5.0 / 13.0) : (5.0 / 14.7)); angle_mul = gain_mul * (8.0 * M_LN10 / M_PI); for (n = 0; n <= 64; n++) { float pow; idx = FFMAX(0, lrint((max - lpcs[n]) * irange) - 1); pow = wmavoice_denoise_power_table[s->denoise_strength][idx]; lpcs[n] = angle_mul * pow; /* 70.57 =~ 1/log10(1.0331663) */ idx = (pow * gain_mul - 0.0295) * 70.570526123; if (idx > 127) { // fallback if index falls outside table range coeffs[n] = wmavoice_energy_table[127] * powf(1.0331663, idx - 127); } else coeffs[n] = wmavoice_energy_table[FFMAX(0, idx)]; } /* calculate the Hilbert transform of the gains, which we do (since this * is a sinus input) by doing a phase shift (in theory, H(sin())=cos()). * Hilbert_Transform(RDFT(x)) = Laplace_Transform(x), which calculates the * "moment" of the LPCs in this filter. */ ff_dct_calc(&s->dct, lpcs); ff_dct_calc(&s->dst, lpcs); /* Split out the coefficient indexes into phase/magnitude pairs */ idx = 255 + av_clip(lpcs[64], -255, 255); coeffs[0] = coeffs[0] * s->cos[idx]; idx = 255 + av_clip(lpcs[64] - 2 * lpcs[63], -255, 255); last_coeff = coeffs[64] * s->cos[idx]; for (n = 63;; n--) { idx = 255 + av_clip(-lpcs[64] - 2 * lpcs[n - 1], -255, 255); coeffs[n * 2 + 1] = coeffs[n] * s->sin[idx]; coeffs[n * 2] = coeffs[n] * s->cos[idx]; if (!--n) break; idx = 255 + av_clip( lpcs[64] - 2 * lpcs[n - 1], -255, 255); coeffs[n * 2 + 1] = coeffs[n] * s->sin[idx]; coeffs[n * 2] = coeffs[n] * s->cos[idx]; } coeffs[1] = last_coeff; /* move into real domain */ ff_rdft_calc(&s->irdft, coeffs); /* tilt correction and normalize scale */ memset(&coeffs[remainder], 0, sizeof(coeffs[0]) * (128 - remainder)); if (s->denoise_tilt_corr) { float tilt_mem = 0; coeffs[remainder - 1] = 0; ff_tilt_compensation(&tilt_mem, -1.8 * tilt_factor(coeffs, remainder - 1), coeffs, remainder); } sq = (1.0 / 64.0) * sqrtf(1 / ff_dot_productf(coeffs, coeffs, remainder)); for (n = 0; n < remainder; n++) coeffs[n] *= sq; } /** * This function applies a Wiener filter on the (noisy) speech signal as * a means to denoise it. * * - take RDFT of LPCs to get the power spectrum of the noise + speech; * - using this power spectrum, calculate (for each frequency) the Wiener * filter gain, which depends on the frequency power and desired level * of noise subtraction (when set too high, this leads to artifacts) * We can do this symmetrically over the X-axis (so 0-4kHz is the inverse * of 4-8kHz); * - by doing a phase shift, calculate the Hilbert transform of this array * of per-frequency filter-gains to get the filtering coefficients; * - smoothen/normalize/de-tilt these filter coefficients as desired; * - take RDFT of noisy sound, apply the coefficients and take its IRDFT * to get the denoised speech signal; * - the leftover (i.e. output of the IRDFT on denoised speech data beyond * the frame boundary) are saved and applied to subsequent frames by an * overlap-add method (otherwise you get clicking-artifacts). * * @param s WMA Voice decoding context * @param s fcb_type Frame (codebook) type * @param synth_pf input: the noisy speech signal, output: denoised speech * data; should be 16-byte aligned (for ASM purposes) * @param size size of the speech data * @param lpcs LPCs used to synthesize this frame's speech data */ static void wiener_denoise(WMAVoiceContext *s, int fcb_type, float *synth_pf, int size, const float *lpcs) { int remainder, lim, n; if (fcb_type != FCB_TYPE_SILENCE) { float *tilted_lpcs = s->tilted_lpcs_pf, *coeffs = s->denoise_coeffs_pf, tilt_mem = 0; tilted_lpcs[0] = 1.0; memcpy(&tilted_lpcs[1], lpcs, sizeof(lpcs[0]) * s->lsps); memset(&tilted_lpcs[s->lsps + 1], 0, sizeof(tilted_lpcs[0]) * (128 - s->lsps - 1)); ff_tilt_compensation(&tilt_mem, 0.7 * tilt_factor(lpcs, s->lsps), tilted_lpcs, s->lsps + 2); /* The IRDFT output (127 samples for 7-bit filter) beyond the frame * size is applied to the next frame. All input beyond this is zero, * and thus all output beyond this will go towards zero, hence we can * limit to min(size-1, 127-size) as a performance consideration. */ remainder = FFMIN(127 - size, size - 1); calc_input_response(s, tilted_lpcs, fcb_type, coeffs, remainder); /* apply coefficients (in frequency spectrum domain), i.e. complex * number multiplication */ memset(&synth_pf[size], 0, sizeof(synth_pf[0]) * (128 - size)); ff_rdft_calc(&s->rdft, synth_pf); ff_rdft_calc(&s->rdft, coeffs); synth_pf[0] *= coeffs[0]; synth_pf[1] *= coeffs[1]; for (n = 1; n < 64; n++) { float v1 = synth_pf[n * 2], v2 = synth_pf[n * 2 + 1]; synth_pf[n * 2] = v1 * coeffs[n * 2] - v2 * coeffs[n * 2 + 1]; synth_pf[n * 2 + 1] = v2 * coeffs[n * 2] + v1 * coeffs[n * 2 + 1]; } ff_rdft_calc(&s->irdft, synth_pf); } /* merge filter output with the history of previous runs */ if (s->denoise_filter_cache_size) { lim = FFMIN(s->denoise_filter_cache_size, size); for (n = 0; n < lim; n++) synth_pf[n] += s->denoise_filter_cache[n]; s->denoise_filter_cache_size -= lim; memmove(s->denoise_filter_cache, &s->denoise_filter_cache[size], sizeof(s->denoise_filter_cache[0]) * s->denoise_filter_cache_size); } /* move remainder of filter output into a cache for future runs */ if (fcb_type != FCB_TYPE_SILENCE) { lim = FFMIN(remainder, s->denoise_filter_cache_size); for (n = 0; n < lim; n++) s->denoise_filter_cache[n] += synth_pf[size + n]; if (lim < remainder) { memcpy(&s->denoise_filter_cache[lim], &synth_pf[size + lim], sizeof(s->denoise_filter_cache[0]) * (remainder - lim)); s->denoise_filter_cache_size = remainder; } } } /** * Averaging projection filter, the postfilter used in WMAVoice. * * This uses the following steps: * - A zero-synthesis filter (generate excitation from synth signal) * - Kalman smoothing on excitation, based on pitch * - Re-synthesized smoothened output * - Iterative Wiener denoise filter * - Adaptive gain filter * - DC filter * * @param s WMAVoice decoding context * @param synth Speech synthesis output (before postfilter) * @param samples Output buffer for filtered samples * @param size Buffer size of synth & samples * @param lpcs Generated LPCs used for speech synthesis * @param fcb_type Frame type (silence, hardcoded, AW-pulses or FCB-pulses) * @param pitch Pitch of the input signal */ static void postfilter(WMAVoiceContext *s, const float *synth, float *samples, int size, const float *lpcs, float *zero_exc_pf, int fcb_type, int pitch) { float synth_filter_in_buf[MAX_FRAMESIZE / 2], *synth_pf = &s->synth_filter_out_buf[MAX_LSPS_ALIGN16], *synth_filter_in = zero_exc_pf; assert(size <= MAX_FRAMESIZE / 2); /* generate excitation from input signal */ ff_celp_lp_zero_synthesis_filterf(zero_exc_pf, lpcs, synth, size, s->lsps); if (fcb_type >= FCB_TYPE_AW_PULSES && !kalman_smoothen(s, pitch, zero_exc_pf, synth_filter_in_buf, size)) synth_filter_in = synth_filter_in_buf; /* re-synthesize speech after smoothening, and keep history */ ff_celp_lp_synthesis_filterf(synth_pf, lpcs, synth_filter_in, size, s->lsps); memcpy(&synth_pf[-s->lsps], &synth_pf[size - s->lsps], sizeof(synth_pf[0]) * s->lsps); wiener_denoise(s, fcb_type, synth_pf, size, lpcs); adaptive_gain_control(samples, synth_pf, synth, size, 0.99, &s->postfilter_agc); if (s->dc_level > 8) { /* remove ultra-low frequency DC noise / highpass filter; * coefficients are identical to those used in SIPR decoding, * and very closely resemble those used in AMR-NB decoding. */ ff_acelp_apply_order_2_transfer_function(samples, samples, (const float[2]) { -1.99997, 1.0 }, (const float[2]) { -1.9330735188, 0.93589198496 }, 0.93980580475, s->dcf_mem, size); } } /** * @} */ /** * Dequantize LSPs * @param lsps output pointer to the array that will hold the LSPs * @param num number of LSPs to be dequantized * @param values quantized values, contains n_stages values * @param sizes range (i.e. max value) of each quantized value * @param n_stages number of dequantization runs * @param table dequantization table to be used * @param mul_q LSF multiplier * @param base_q base (lowest) LSF values */ static void dequant_lsps(double *lsps, int num, const uint16_t *values, const uint16_t *sizes, int n_stages, const uint8_t *table, const double *mul_q, const double *base_q) { int n, m; memset(lsps, 0, num * sizeof(*lsps)); for (n = 0; n < n_stages; n++) { const uint8_t *t_off = &table[values[n] * num]; double base = base_q[n], mul = mul_q[n]; for (m = 0; m < num; m++) lsps[m] += base + mul * t_off[m]; table += sizes[n] * num; } } /** * @defgroup lsp_dequant LSP dequantization routines * LSP dequantization routines, for 10/16LSPs and independent/residual coding. * @note we assume enough bits are available, caller should check. * lsp10i() consumes 24 bits; lsp10r() consumes an additional 24 bits; * lsp16i() consumes 34 bits; lsp16r() consumes an additional 26 bits. * @{ */ /** * Parse 10 independently-coded LSPs. */ static void dequant_lsp10i(GetBitContext *gb, double *lsps) { static const uint16_t vec_sizes[4] = { 256, 64, 32, 32 }; static const double mul_lsf[4] = { 5.2187144800e-3, 1.4626986422e-3, 9.6179549166e-4, 1.1325736225e-3 }; static const double base_lsf[4] = { M_PI * -2.15522e-1, M_PI * -6.1646e-2, M_PI * -3.3486e-2, M_PI * -5.7408e-2 }; uint16_t v[4]; v[0] = get_bits(gb, 8); v[1] = get_bits(gb, 6); v[2] = get_bits(gb, 5); v[3] = get_bits(gb, 5); dequant_lsps(lsps, 10, v, vec_sizes, 4, wmavoice_dq_lsp10i, mul_lsf, base_lsf); } /** * Parse 10 independently-coded LSPs, and then derive the tables to * generate LSPs for the other frames from them (residual coding). */ static void dequant_lsp10r(GetBitContext *gb, double *i_lsps, const double *old, double *a1, double *a2, int q_mode) { static const uint16_t vec_sizes[3] = { 128, 64, 64 }; static const double mul_lsf[3] = { 2.5807601174e-3, 1.2354460219e-3, 1.1763821673e-3 }; static const double base_lsf[3] = { M_PI * -1.07448e-1, M_PI * -5.2706e-2, M_PI * -5.1634e-2 }; const float (*ipol_tab)[2][10] = q_mode ? wmavoice_lsp10_intercoeff_b : wmavoice_lsp10_intercoeff_a; uint16_t interpol, v[3]; int n; dequant_lsp10i(gb, i_lsps); interpol = get_bits(gb, 5); v[0] = get_bits(gb, 7); v[1] = get_bits(gb, 6); v[2] = get_bits(gb, 6); for (n = 0; n < 10; n++) { double delta = old[n] - i_lsps[n]; a1[n] = ipol_tab[interpol][0][n] * delta + i_lsps[n]; a1[10 + n] = ipol_tab[interpol][1][n] * delta + i_lsps[n]; } dequant_lsps(a2, 20, v, vec_sizes, 3, wmavoice_dq_lsp10r, mul_lsf, base_lsf); } /** * Parse 16 independently-coded LSPs. */ static void dequant_lsp16i(GetBitContext *gb, double *lsps) { static const uint16_t vec_sizes[5] = { 256, 64, 128, 64, 128 }; static const double mul_lsf[5] = { 3.3439586280e-3, 6.9908173703e-4, 3.3216608306e-3, 1.0334960326e-3, 3.1899104283e-3 }; static const double base_lsf[5] = { M_PI * -1.27576e-1, M_PI * -2.4292e-2, M_PI * -1.28094e-1, M_PI * -3.2128e-2, M_PI * -1.29816e-1 }; uint16_t v[5]; v[0] = get_bits(gb, 8); v[1] = get_bits(gb, 6); v[2] = get_bits(gb, 7); v[3] = get_bits(gb, 6); v[4] = get_bits(gb, 7); dequant_lsps( lsps, 5, v, vec_sizes, 2, wmavoice_dq_lsp16i1, mul_lsf, base_lsf); dequant_lsps(&lsps[5], 5, &v[2], &vec_sizes[2], 2, wmavoice_dq_lsp16i2, &mul_lsf[2], &base_lsf[2]); dequant_lsps(&lsps[10], 6, &v[4], &vec_sizes[4], 1, wmavoice_dq_lsp16i3, &mul_lsf[4], &base_lsf[4]); } /** * Parse 16 independently-coded LSPs, and then derive the tables to * generate LSPs for the other frames from them (residual coding). */ static void dequant_lsp16r(GetBitContext *gb, double *i_lsps, const double *old, double *a1, double *a2, int q_mode) { static const uint16_t vec_sizes[3] = { 128, 128, 128 }; static const double mul_lsf[3] = { 1.2232979501e-3, 1.4062241527e-3, 1.6114744851e-3 }; static const double base_lsf[3] = { M_PI * -5.5830e-2, M_PI * -5.2908e-2, M_PI * -5.4776e-2 }; const float (*ipol_tab)[2][16] = q_mode ? wmavoice_lsp16_intercoeff_b : wmavoice_lsp16_intercoeff_a; uint16_t interpol, v[3]; int n; dequant_lsp16i(gb, i_lsps); interpol = get_bits(gb, 5); v[0] = get_bits(gb, 7); v[1] = get_bits(gb, 7); v[2] = get_bits(gb, 7); for (n = 0; n < 16; n++) { double delta = old[n] - i_lsps[n]; a1[n] = ipol_tab[interpol][0][n] * delta + i_lsps[n]; a1[16 + n] = ipol_tab[interpol][1][n] * delta + i_lsps[n]; } dequant_lsps( a2, 10, v, vec_sizes, 1, wmavoice_dq_lsp16r1, mul_lsf, base_lsf); dequant_lsps(&a2[10], 10, &v[1], &vec_sizes[1], 1, wmavoice_dq_lsp16r2, &mul_lsf[1], &base_lsf[1]); dequant_lsps(&a2[20], 12, &v[2], &vec_sizes[2], 1, wmavoice_dq_lsp16r3, &mul_lsf[2], &base_lsf[2]); } /** * @} * @defgroup aw Pitch-adaptive window coding functions * The next few functions are for pitch-adaptive window coding. * @{ */ /** * Parse the offset of the first pitch-adaptive window pulses, and * the distribution of pulses between the two blocks in this frame. * @param s WMA Voice decoding context private data * @param gb bit I/O context * @param pitch pitch for each block in this frame */ static void aw_parse_coords(WMAVoiceContext *s, GetBitContext *gb, const int *pitch) { static const int16_t start_offset[94] = { -11, -9, -7, -5, -3, -1, 1, 3, 5, 7, 9, 11, 13, 15, 18, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159 }; int bits, offset; /* position of pulse */ s->aw_idx_is_ext = 0; if ((bits = get_bits(gb, 6)) >= 54) { s->aw_idx_is_ext = 1; bits += (bits - 54) * 3 + get_bits(gb, 2); } /* for a repeated pulse at pulse_off with a pitch_lag of pitch[], count * the distribution of the pulses in each block contained in this frame. */ s->aw_pulse_range = FFMIN(pitch[0], pitch[1]) > 32 ? 24 : 16; for (offset = start_offset[bits]; offset < 0; offset += pitch[0]) ; s->aw_n_pulses[0] = (pitch[0] - 1 + MAX_FRAMESIZE / 2 - offset) / pitch[0]; s->aw_first_pulse_off[0] = offset - s->aw_pulse_range / 2; offset += s->aw_n_pulses[0] * pitch[0]; s->aw_n_pulses[1] = (pitch[1] - 1 + MAX_FRAMESIZE - offset) / pitch[1]; s->aw_first_pulse_off[1] = offset - (MAX_FRAMESIZE + s->aw_pulse_range) / 2; /* if continuing from a position before the block, reset position to * start of block (when corrected for the range over which it can be * spread in aw_pulse_set1()). */ if (start_offset[bits] < MAX_FRAMESIZE / 2) { while (s->aw_first_pulse_off[1] - pitch[1] + s->aw_pulse_range > 0) s->aw_first_pulse_off[1] -= pitch[1]; if (start_offset[bits] < 0) while (s->aw_first_pulse_off[0] - pitch[0] + s->aw_pulse_range > 0) s->aw_first_pulse_off[0] -= pitch[0]; } } /** * Apply second set of pitch-adaptive window pulses. * @param s WMA Voice decoding context private data * @param gb bit I/O context * @param block_idx block index in frame [0, 1] * @param fcb structure containing fixed codebook vector info */ static void aw_pulse_set2(WMAVoiceContext *s, GetBitContext *gb, int block_idx, AMRFixed *fcb) { uint16_t use_mask[7]; // only 5 are used, rest is padding /* in this function, idx is the index in the 80-bit (+ padding) use_mask * bit-array. Since use_mask consists of 16-bit values, the lower 4 bits * of idx are the position of the bit within a particular item in the * array (0 being the most significant bit, and 15 being the least * significant bit), and the remainder (>> 4) is the index in the * use_mask[]-array. This is faster and uses less memory than using a * 80-byte/80-int array. */ int pulse_off = s->aw_first_pulse_off[block_idx], pulse_start, n, idx, range, aidx, start_off = 0; /* set offset of first pulse to within this block */ if (s->aw_n_pulses[block_idx] > 0) while (pulse_off + s->aw_pulse_range < 1) pulse_off += fcb->pitch_lag; /* find range per pulse */ if (s->aw_n_pulses[0] > 0) { if (block_idx == 0) { range = 32; } else /* block_idx = 1 */ { range = 8; if (s->aw_n_pulses[block_idx] > 0) pulse_off = s->aw_next_pulse_off_cache; } } else range = 16; pulse_start = s->aw_n_pulses[block_idx] > 0 ? pulse_off - range / 2 : 0; /* aw_pulse_set1() already applies pulses around pulse_off (to be exactly, * in the range of [pulse_off, pulse_off + s->aw_pulse_range], and thus * we exclude that range from being pulsed again in this function. */ memset( use_mask, -1, 5 * sizeof(use_mask[0])); memset(&use_mask[5], 0, 2 * sizeof(use_mask[0])); if (s->aw_n_pulses[block_idx] > 0) for (idx = pulse_off; idx < MAX_FRAMESIZE / 2; idx += fcb->pitch_lag) { int excl_range = s->aw_pulse_range; // always 16 or 24 uint16_t *use_mask_ptr = &use_mask[idx >> 4]; int first_sh = 16 - (idx & 15); *use_mask_ptr++ &= 0xFFFF << first_sh; excl_range -= first_sh; if (excl_range >= 16) { *use_mask_ptr++ = 0; *use_mask_ptr &= 0xFFFF >> (excl_range - 16); } else *use_mask_ptr &= 0xFFFF >> excl_range; } /* find the 'aidx'th offset that is not excluded */ aidx = get_bits(gb, s->aw_n_pulses[0] > 0 ? 5 - 2 * block_idx : 4); for (n = 0; n <= aidx; pulse_start++) { for (idx = pulse_start; idx < 0; idx += fcb->pitch_lag) ; if (idx >= MAX_FRAMESIZE / 2) { // find from zero if (use_mask[0]) idx = 0x0F; else if (use_mask[1]) idx = 0x1F; else if (use_mask[2]) idx = 0x2F; else if (use_mask[3]) idx = 0x3F; else if (use_mask[4]) idx = 0x4F; else return; idx -= av_log2_16bit(use_mask[idx >> 4]); } if (use_mask[idx >> 4] & (0x8000 >> (idx & 15))) { use_mask[idx >> 4] &= ~(0x8000 >> (idx & 15)); n++; start_off = idx; } } fcb->x[fcb->n] = start_off; fcb->y[fcb->n] = get_bits1(gb) ? -1.0 : 1.0; fcb->n++; /* set offset for next block, relative to start of that block */ n = (MAX_FRAMESIZE / 2 - start_off) % fcb->pitch_lag; s->aw_next_pulse_off_cache = n ? fcb->pitch_lag - n : 0; } /** * Apply first set of pitch-adaptive window pulses. * @param s WMA Voice decoding context private data * @param gb bit I/O context * @param block_idx block index in frame [0, 1] * @param fcb storage location for fixed codebook pulse info */ static void aw_pulse_set1(WMAVoiceContext *s, GetBitContext *gb, int block_idx, AMRFixed *fcb) { int val = get_bits(gb, 12 - 2 * (s->aw_idx_is_ext && !block_idx)); float v; if (s->aw_n_pulses[block_idx] > 0) { int n, v_mask, i_mask, sh, n_pulses; if (s->aw_pulse_range == 24) { // 3 pulses, 1:sign + 3:index each n_pulses = 3; v_mask = 8; i_mask = 7; sh = 4; } else { // 4 pulses, 1:sign + 2:index each n_pulses = 4; v_mask = 4; i_mask = 3; sh = 3; } for (n = n_pulses - 1; n >= 0; n--, val >>= sh) { fcb->y[fcb->n] = (val & v_mask) ? -1.0 : 1.0; fcb->x[fcb->n] = (val & i_mask) * n_pulses + n + s->aw_first_pulse_off[block_idx]; while (fcb->x[fcb->n] < 0) fcb->x[fcb->n] += fcb->pitch_lag; if (fcb->x[fcb->n] < MAX_FRAMESIZE / 2) fcb->n++; } } else { int num2 = (val & 0x1FF) >> 1, delta, idx; if (num2 < 1 * 79) { delta = 1; idx = num2 + 1; } else if (num2 < 2 * 78) { delta = 3; idx = num2 + 1 - 1 * 77; } else if (num2 < 3 * 77) { delta = 5; idx = num2 + 1 - 2 * 76; } else { delta = 7; idx = num2 + 1 - 3 * 75; } v = (val & 0x200) ? -1.0 : 1.0; fcb->no_repeat_mask |= 3 << fcb->n; fcb->x[fcb->n] = idx - delta; fcb->y[fcb->n] = v; fcb->x[fcb->n + 1] = idx; fcb->y[fcb->n + 1] = (val & 1) ? -v : v; fcb->n += 2; } } /** * @} * * Generate a random number from frame_cntr and block_idx, which will lief * in the range [0, 1000 - block_size] (so it can be used as an index in a * table of size 1000 of which you want to read block_size entries). * * @param frame_cntr current frame number * @param block_num current block index * @param block_size amount of entries we want to read from a table * that has 1000 entries * @return a (non-)random number in the [0, 1000 - block_size] range. */ static int pRNG(int frame_cntr, int block_num, int block_size) { /* array to simplify the calculation of z: * y = (x % 9) * 5 + 6; * z = (49995 * x) / y; * Since y only has 9 values, we can remove the division by using a * LUT and using FASTDIV-style divisions. For each of the 9 values * of y, we can rewrite z as: * z = x * (49995 / y) + x * ((49995 % y) / y) * In this table, each col represents one possible value of y, the * first number is 49995 / y, and the second is the FASTDIV variant * of 49995 % y / y. */ static const unsigned int div_tbl[9][2] = { { 8332, 3 * 715827883U }, // y = 6 { 4545, 0 * 390451573U }, // y = 11 { 3124, 11 * 268435456U }, // y = 16 { 2380, 15 * 204522253U }, // y = 21 { 1922, 23 * 165191050U }, // y = 26 { 1612, 23 * 138547333U }, // y = 31 { 1388, 27 * 119304648U }, // y = 36 { 1219, 16 * 104755300U }, // y = 41 { 1086, 39 * 93368855U } // y = 46 }; unsigned int z, y, x = MUL16(block_num, 1877) + frame_cntr; if (x >= 0xFFFF) x -= 0xFFFF; // max value of x is 8*1877+0xFFFE=0x13AA6, // so this is effectively a modulo (%) y = x - 9 * MULH(477218589, x); // x % 9 z = (uint16_t) (x * div_tbl[y][0] + UMULH(x, div_tbl[y][1])); // z = x * 49995 / (y * 5 + 6) return z % (1000 - block_size); } /** * Parse hardcoded signal for a single block. * @note see #synth_block(). */ static void synth_block_hardcoded(WMAVoiceContext *s, GetBitContext *gb, int block_idx, int size, const struct frame_type_desc *frame_desc, float *excitation) { float gain; int n, r_idx; assert(size <= MAX_FRAMESIZE); /* Set the offset from which we start reading wmavoice_std_codebook */ if (frame_desc->fcb_type == FCB_TYPE_SILENCE) { r_idx = pRNG(s->frame_cntr, block_idx, size); gain = s->silence_gain; } else /* FCB_TYPE_HARDCODED */ { r_idx = get_bits(gb, 8); gain = wmavoice_gain_universal[get_bits(gb, 6)]; } /* Clear gain prediction parameters */ memset(s->gain_pred_err, 0, sizeof(s->gain_pred_err)); /* Apply gain to hardcoded codebook and use that as excitation signal */ for (n = 0; n < size; n++) excitation[n] = wmavoice_std_codebook[r_idx + n] * gain; } /** * Parse FCB/ACB signal for a single block. * @note see #synth_block(). */ static void synth_block_fcb_acb(WMAVoiceContext *s, GetBitContext *gb, int block_idx, int size, int block_pitch_sh2, const struct frame_type_desc *frame_desc, float *excitation) { static const float gain_coeff[6] = { 0.8169, -0.06545, 0.1726, 0.0185, -0.0359, 0.0458 }; float pulses[MAX_FRAMESIZE / 2], pred_err, acb_gain, fcb_gain; int n, idx, gain_weight; AMRFixed fcb; assert(size <= MAX_FRAMESIZE / 2); memset(pulses, 0, sizeof(*pulses) * size); fcb.pitch_lag = block_pitch_sh2 >> 2; fcb.pitch_fac = 1.0; fcb.no_repeat_mask = 0; fcb.n = 0; /* For the other frame types, this is where we apply the innovation * (fixed) codebook pulses of the speech signal. */ if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) { aw_pulse_set1(s, gb, block_idx, &fcb); aw_pulse_set2(s, gb, block_idx, &fcb); } else /* FCB_TYPE_EXC_PULSES */ { int offset_nbits = 5 - frame_desc->log_n_blocks; fcb.no_repeat_mask = -1; /* similar to ff_decode_10_pulses_35bits(), but with single pulses * (instead of double) for a subset of pulses */ for (n = 0; n < 5; n++) { float sign; int pos1, pos2; sign = get_bits1(gb) ? 1.0 : -1.0; pos1 = get_bits(gb, offset_nbits); fcb.x[fcb.n] = n + 5 * pos1; fcb.y[fcb.n++] = sign; if (n < frame_desc->dbl_pulses) { pos2 = get_bits(gb, offset_nbits); fcb.x[fcb.n] = n + 5 * pos2; fcb.y[fcb.n++] = (pos1 < pos2) ? -sign : sign; } } } ff_set_fixed_vector(pulses, &fcb, 1.0, size); /* Calculate gain for adaptive & fixed codebook signal. * see ff_amr_set_fixed_gain(). */ idx = get_bits(gb, 7); fcb_gain = expf(ff_dot_productf(s->gain_pred_err, gain_coeff, 6) - 5.2409161640 + wmavoice_gain_codebook_fcb[idx]); acb_gain = wmavoice_gain_codebook_acb[idx]; pred_err = av_clipf(wmavoice_gain_codebook_fcb[idx], -2.9957322736 /* log(0.05) */, 1.6094379124 /* log(5.0) */); gain_weight = 8 >> frame_desc->log_n_blocks; memmove(&s->gain_pred_err[gain_weight], s->gain_pred_err, sizeof(*s->gain_pred_err) * (6 - gain_weight)); for (n = 0; n < gain_weight; n++) s->gain_pred_err[n] = pred_err; /* Calculation of adaptive codebook */ if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) { int len; for (n = 0; n < size; n += len) { int next_idx_sh16; int abs_idx = block_idx * size + n; int pitch_sh16 = (s->last_pitch_val << 16) + s->pitch_diff_sh16 * abs_idx; int pitch = (pitch_sh16 + 0x6FFF) >> 16; int idx_sh16 = ((pitch << 16) - pitch_sh16) * 8 + 0x58000; idx = idx_sh16 >> 16; if (s->pitch_diff_sh16) { if (s->pitch_diff_sh16 > 0) { next_idx_sh16 = (idx_sh16) &~ 0xFFFF; } else next_idx_sh16 = (idx_sh16 + 0x10000) &~ 0xFFFF; len = av_clip((idx_sh16 - next_idx_sh16) / s->pitch_diff_sh16 / 8, 1, size - n); } else len = size; ff_acelp_interpolatef(&excitation[n], &excitation[n - pitch], wmavoice_ipol1_coeffs, 17, idx, 9, len); } } else /* ACB_TYPE_HAMMING */ { int block_pitch = block_pitch_sh2 >> 2; idx = block_pitch_sh2 & 3; if (idx) { ff_acelp_interpolatef(excitation, &excitation[-block_pitch], wmavoice_ipol2_coeffs, 4, idx, 8, size); } else av_memcpy_backptr(excitation, sizeof(float) * block_pitch, sizeof(float) * size); } /* Interpolate ACB/FCB and use as excitation signal */ ff_weighted_vector_sumf(excitation, excitation, pulses, acb_gain, fcb_gain, size); } /** * Parse data in a single block. * @note we assume enough bits are available, caller should check. * * @param s WMA Voice decoding context private data * @param gb bit I/O context * @param block_idx index of the to-be-read block * @param size amount of samples to be read in this block * @param block_pitch_sh2 pitch for this block << 2 * @param lsps LSPs for (the end of) this frame * @param prev_lsps LSPs for the last frame * @param frame_desc frame type descriptor * @param excitation target memory for the ACB+FCB interpolated signal * @param synth target memory for the speech synthesis filter output * @return 0 on success, <0 on error. */ static void synth_block(WMAVoiceContext *s, GetBitContext *gb, int block_idx, int size, int block_pitch_sh2, const double *lsps, const double *prev_lsps, const struct frame_type_desc *frame_desc, float *excitation, float *synth) { double i_lsps[MAX_LSPS]; float lpcs[MAX_LSPS]; float fac; int n; if (frame_desc->acb_type == ACB_TYPE_NONE) synth_block_hardcoded(s, gb, block_idx, size, frame_desc, excitation); else synth_block_fcb_acb(s, gb, block_idx, size, block_pitch_sh2, frame_desc, excitation); /* convert interpolated LSPs to LPCs */ fac = (block_idx + 0.5) / frame_desc->n_blocks; for (n = 0; n < s->lsps; n++) // LSF -> LSP i_lsps[n] = cos(prev_lsps[n] + fac * (lsps[n] - prev_lsps[n])); ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1); /* Speech synthesis */ ff_celp_lp_synthesis_filterf(synth, lpcs, excitation, size, s->lsps); } /** * Synthesize output samples for a single frame. * @note we assume enough bits are available, caller should check. * * @param ctx WMA Voice decoder context * @param gb bit I/O context (s->gb or one for cross-packet superframes) * @param frame_idx Frame number within superframe [0-2] * @param samples pointer to output sample buffer, has space for at least 160 * samples * @param lsps LSP array * @param prev_lsps array of previous frame's LSPs * @param excitation target buffer for excitation signal * @param synth target buffer for synthesized speech data * @return 0 on success, <0 on error. */ static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx, float *samples, const double *lsps, const double *prev_lsps, float *excitation, float *synth) { WMAVoiceContext *s = ctx->priv_data; int n, n_blocks_x2, log_n_blocks_x2, cur_pitch_val; int pitch[MAX_BLOCKS], last_block_pitch; /* Parse frame type ("frame header"), see frame_descs */ int bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)], block_nsamples = MAX_FRAMESIZE / frame_descs[bd_idx].n_blocks; if (bd_idx < 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame type VLC code, skipping\n"); return -1; } /* Pitch calculation for ACB_TYPE_ASYMMETRIC ("pitch-per-frame") */ if (frame_descs[bd_idx].acb_type == ACB_TYPE_ASYMMETRIC) { /* Pitch is provided per frame, which is interpreted as the pitch of * the last sample of the last block of this frame. We can interpolate * the pitch of other blocks (and even pitch-per-sample) by gradually * incrementing/decrementing prev_frame_pitch to cur_pitch_val. */ n_blocks_x2 = frame_descs[bd_idx].n_blocks << 1; log_n_blocks_x2 = frame_descs[bd_idx].log_n_blocks + 1; cur_pitch_val = s->min_pitch_val + get_bits(gb, s->pitch_nbits); cur_pitch_val = FFMIN(cur_pitch_val, s->max_pitch_val - 1); if (s->last_acb_type == ACB_TYPE_NONE || 20 * abs(cur_pitch_val - s->last_pitch_val) > (cur_pitch_val + s->last_pitch_val)) s->last_pitch_val = cur_pitch_val; /* pitch per block */ for (n = 0; n < frame_descs[bd_idx].n_blocks; n++) { int fac = n * 2 + 1; pitch[n] = (MUL16(fac, cur_pitch_val) + MUL16((n_blocks_x2 - fac), s->last_pitch_val) + frame_descs[bd_idx].n_blocks) >> log_n_blocks_x2; } /* "pitch-diff-per-sample" for calculation of pitch per sample */ s->pitch_diff_sh16 = ((cur_pitch_val - s->last_pitch_val) << 16) / MAX_FRAMESIZE; } /* Global gain (if silence) and pitch-adaptive window coordinates */ switch (frame_descs[bd_idx].fcb_type) { case FCB_TYPE_SILENCE: s->silence_gain = wmavoice_gain_silence[get_bits(gb, 8)]; break; case FCB_TYPE_AW_PULSES: aw_parse_coords(s, gb, pitch); break; } for (n = 0; n < frame_descs[bd_idx].n_blocks; n++) { int bl_pitch_sh2; /* Pitch calculation for ACB_TYPE_HAMMING ("pitch-per-block") */ switch (frame_descs[bd_idx].acb_type) { case ACB_TYPE_HAMMING: { /* Pitch is given per block. Per-block pitches are encoded as an * absolute value for the first block, and then delta values * relative to this value) for all subsequent blocks. The scale of * this pitch value is semi-logaritmic compared to its use in the * decoder, so we convert it to normal scale also. */ int block_pitch, t1 = (s->block_conv_table[1] - s->block_conv_table[0]) << 2, t2 = (s->block_conv_table[2] - s->block_conv_table[1]) << 1, t3 = s->block_conv_table[3] - s->block_conv_table[2] + 1; if (n == 0) { block_pitch = get_bits(gb, s->block_pitch_nbits); } else block_pitch = last_block_pitch - s->block_delta_pitch_hrange + get_bits(gb, s->block_delta_pitch_nbits); /* Convert last_ so that any next delta is within _range */ last_block_pitch = av_clip(block_pitch, s->block_delta_pitch_hrange, s->block_pitch_range - s->block_delta_pitch_hrange); /* Convert semi-log-style scale back to normal scale */ if (block_pitch < t1) { bl_pitch_sh2 = (s->block_conv_table[0] << 2) + block_pitch; } else { block_pitch -= t1; if (block_pitch < t2) { bl_pitch_sh2 = (s->block_conv_table[1] << 2) + (block_pitch << 1); } else { block_pitch -= t2; if (block_pitch < t3) { bl_pitch_sh2 = (s->block_conv_table[2] + block_pitch) << 2; } else bl_pitch_sh2 = s->block_conv_table[3] << 2; } } pitch[n] = bl_pitch_sh2 >> 2; break; } case ACB_TYPE_ASYMMETRIC: { bl_pitch_sh2 = pitch[n] << 2; break; } default: // ACB_TYPE_NONE has no pitch bl_pitch_sh2 = 0; break; } synth_block(s, gb, n, block_nsamples, bl_pitch_sh2, lsps, prev_lsps, &frame_descs[bd_idx], &excitation[n * block_nsamples], &synth[n * block_nsamples]); } /* Averaging projection filter, if applicable. Else, just copy samples * from synthesis buffer */ if (s->do_apf) { double i_lsps[MAX_LSPS]; float lpcs[MAX_LSPS]; for (n = 0; n < s->lsps; n++) // LSF -> LSP i_lsps[n] = cos(0.5 * (prev_lsps[n] + lsps[n])); ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1); postfilter(s, synth, samples, 80, lpcs, &s->zero_exc_pf[s->history_nsamples + MAX_FRAMESIZE * frame_idx], frame_descs[bd_idx].fcb_type, pitch[0]); for (n = 0; n < s->lsps; n++) // LSF -> LSP i_lsps[n] = cos(lsps[n]); ff_acelp_lspd2lpc(i_lsps, lpcs, s->lsps >> 1); postfilter(s, &synth[80], &samples[80], 80, lpcs, &s->zero_exc_pf[s->history_nsamples + MAX_FRAMESIZE * frame_idx + 80], frame_descs[bd_idx].fcb_type, pitch[0]); } else memcpy(samples, synth, 160 * sizeof(synth[0])); /* Cache values for next frame */ s->frame_cntr++; if (s->frame_cntr >= 0xFFFF) s->frame_cntr -= 0xFFFF; // i.e. modulo (%) s->last_acb_type = frame_descs[bd_idx].acb_type; switch (frame_descs[bd_idx].acb_type) { case ACB_TYPE_NONE: s->last_pitch_val = 0; break; case ACB_TYPE_ASYMMETRIC: s->last_pitch_val = cur_pitch_val; break; case ACB_TYPE_HAMMING: s->last_pitch_val = pitch[frame_descs[bd_idx].n_blocks - 1]; break; } return 0; } /** * Ensure minimum value for first item, maximum value for last value, * proper spacing between each value and proper ordering. * * @param lsps array of LSPs * @param num size of LSP array * * @note basically a double version of #ff_acelp_reorder_lsf(), might be * useful to put in a generic location later on. Parts are also * present in #ff_set_min_dist_lsf() + #ff_sort_nearly_sorted_floats(), * which is in float. */ static void stabilize_lsps(double *lsps, int num) { int n, m, l; /* set minimum value for first, maximum value for last and minimum * spacing between LSF values. * Very similar to ff_set_min_dist_lsf(), but in double. */ lsps[0] = FFMAX(lsps[0], 0.0015 * M_PI); for (n = 1; n < num; n++) lsps[n] = FFMAX(lsps[n], lsps[n - 1] + 0.0125 * M_PI); lsps[num - 1] = FFMIN(lsps[num - 1], 0.9985 * M_PI); /* reorder (looks like one-time / non-recursed bubblesort). * Very similar to ff_sort_nearly_sorted_floats(), but in double. */ for (n = 1; n < num; n++) { if (lsps[n] < lsps[n - 1]) { for (m = 1; m < num; m++) { double tmp = lsps[m]; for (l = m - 1; l >= 0; l--) { if (lsps[l] <= tmp) break; lsps[l + 1] = lsps[l]; } lsps[l + 1] = tmp; } break; } } } /** * Test if there's enough bits to read 1 superframe. * * @param orig_gb bit I/O context used for reading. This function * does not modify the state of the bitreader; it * only uses it to copy the current stream position * @param s WMA Voice decoding context private data * @return -1 if unsupported, 1 on not enough bits or 0 if OK. */ static int check_bits_for_superframe(GetBitContext *orig_gb, WMAVoiceContext *s) { GetBitContext s_gb, *gb = &s_gb; int n, need_bits, bd_idx; const struct frame_type_desc *frame_desc; /* initialize a copy */ init_get_bits(gb, orig_gb->buffer, orig_gb->size_in_bits); skip_bits_long(gb, get_bits_count(orig_gb)); assert(get_bits_left(gb) == get_bits_left(orig_gb)); /* superframe header */ if (get_bits_left(gb) < 14) return 1; if (!get_bits1(gb)) return -1; // WMAPro-in-WMAVoice superframe if (get_bits1(gb)) skip_bits(gb, 12); // number of samples in superframe if (s->has_residual_lsps) { // residual LSPs (for all frames) if (get_bits_left(gb) < s->sframe_lsp_bitsize) return 1; skip_bits_long(gb, s->sframe_lsp_bitsize); } /* frames */ for (n = 0; n < MAX_FRAMES; n++) { int aw_idx_is_ext = 0; if (!s->has_residual_lsps) { // independent LSPs (per-frame) if (get_bits_left(gb) < s->frame_lsp_bitsize) return 1; skip_bits_long(gb, s->frame_lsp_bitsize); } bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)]; if (bd_idx < 0) return -1; // invalid frame type VLC code frame_desc = &frame_descs[bd_idx]; if (frame_desc->acb_type == ACB_TYPE_ASYMMETRIC) { if (get_bits_left(gb) < s->pitch_nbits) return 1; skip_bits_long(gb, s->pitch_nbits); } if (frame_desc->fcb_type == FCB_TYPE_SILENCE) { skip_bits(gb, 8); } else if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) { int tmp = get_bits(gb, 6); if (tmp >= 0x36) { skip_bits(gb, 2); aw_idx_is_ext = 1; } } /* blocks */ if (frame_desc->acb_type == ACB_TYPE_HAMMING) { need_bits = s->block_pitch_nbits + (frame_desc->n_blocks - 1) * s->block_delta_pitch_nbits; } else if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) { need_bits = 2 * !aw_idx_is_ext; } else need_bits = 0; need_bits += frame_desc->frame_size; if (get_bits_left(gb) < need_bits) return 1; skip_bits_long(gb, need_bits); } return 0; } /** * Synthesize output samples for a single superframe. If we have any data * cached in s->sframe_cache, that will be used instead of whatever is loaded * in s->gb. * * WMA Voice superframes contain 3 frames, each containing 160 audio samples, * to give a total of 480 samples per frame. See #synth_frame() for frame * parsing. In addition to 3 frames, superframes can also contain the LSPs * (if these are globally specified for all frames (residually); they can * also be specified individually per-frame. See the s->has_residual_lsps * option), and can specify the number of samples encoded in this superframe * (if less than 480), usually used to prevent blanks at track boundaries. * * @param ctx WMA Voice decoder context * @param samples pointer to output buffer for voice samples * @param data_size pointer containing the size of #samples on input, and the * amount of #samples filled on output * @return 0 on success, <0 on error or 1 if there was not enough data to * fully parse the superframe */ static int synth_superframe(AVCodecContext *ctx, float *samples, int *data_size) { WMAVoiceContext *s = ctx->priv_data; GetBitContext *gb = &s->gb, s_gb; int n, res, n_samples = 480; double lsps[MAX_FRAMES][MAX_LSPS]; const double *mean_lsf = s->lsps == 16 ? wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode]; float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12]; float synth[MAX_LSPS + MAX_SFRAMESIZE]; memcpy(synth, s->synth_history, s->lsps * sizeof(*synth)); memcpy(excitation, s->excitation_history, s->history_nsamples * sizeof(*excitation)); if (s->sframe_cache_size > 0) { gb = &s_gb; init_get_bits(gb, s->sframe_cache, s->sframe_cache_size); s->sframe_cache_size = 0; } if ((res = check_bits_for_superframe(gb, s)) == 1) return 1; /* First bit is speech/music bit, it differentiates between WMAVoice * speech samples (the actual codec) and WMAVoice music samples, which * are really WMAPro-in-WMAVoice-superframes. I've never seen those in * the wild yet. */ if (!get_bits1(gb)) { av_log_missing_feature(ctx, "WMAPro-in-WMAVoice support", 1); return -1; } /* (optional) nr. of samples in superframe; always <= 480 and >= 0 */ if (get_bits1(gb)) { if ((n_samples = get_bits(gb, 12)) > 480) { av_log(ctx, AV_LOG_ERROR, "Superframe encodes >480 samples (%d), not allowed\n", n_samples); return -1; } } /* Parse LSPs, if global for the superframe (can also be per-frame). */ if (s->has_residual_lsps) { double prev_lsps[MAX_LSPS], a1[MAX_LSPS * 2], a2[MAX_LSPS * 2]; for (n = 0; n < s->lsps; n++) prev_lsps[n] = s->prev_lsps[n] - mean_lsf[n]; if (s->lsps == 10) { dequant_lsp10r(gb, lsps[2], prev_lsps, a1, a2, s->lsp_q_mode); } else /* s->lsps == 16 */ dequant_lsp16r(gb, lsps[2], prev_lsps, a1, a2, s->lsp_q_mode); for (n = 0; n < s->lsps; n++) { lsps[0][n] = mean_lsf[n] + (a1[n] - a2[n * 2]); lsps[1][n] = mean_lsf[n] + (a1[s->lsps + n] - a2[n * 2 + 1]); lsps[2][n] += mean_lsf[n]; } for (n = 0; n < 3; n++) stabilize_lsps(lsps[n], s->lsps); } /* Parse frames, optionally preceeded by per-frame (independent) LSPs. */ for (n = 0; n < 3; n++) { if (!s->has_residual_lsps) { int m; if (s->lsps == 10) { dequant_lsp10i(gb, lsps[n]); } else /* s->lsps == 16 */ dequant_lsp16i(gb, lsps[n]); for (m = 0; m < s->lsps; m++) lsps[n][m] += mean_lsf[m]; stabilize_lsps(lsps[n], s->lsps); } if ((res = synth_frame(ctx, gb, n, &samples[n * MAX_FRAMESIZE], lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1], &excitation[s->history_nsamples + n * MAX_FRAMESIZE], &synth[s->lsps + n * MAX_FRAMESIZE]))) return res; } /* Statistics? FIXME - we don't check for length, a slight overrun * will be caught by internal buffer padding, and anything else * will be skipped, not read. */ if (get_bits1(gb)) { res = get_bits(gb, 4); skip_bits(gb, 10 * (res + 1)); } /* Specify nr. of output samples */ *data_size = n_samples * sizeof(float); /* Update history */ memcpy(s->prev_lsps, lsps[2], s->lsps * sizeof(*s->prev_lsps)); memcpy(s->synth_history, &synth[MAX_SFRAMESIZE], s->lsps * sizeof(*synth)); memcpy(s->excitation_history, &excitation[MAX_SFRAMESIZE], s->history_nsamples * sizeof(*excitation)); if (s->do_apf) memmove(s->zero_exc_pf, &s->zero_exc_pf[MAX_SFRAMESIZE], s->history_nsamples * sizeof(*s->zero_exc_pf)); return 0; } /** * Parse the packet header at the start of each packet (input data to this * decoder). * * @param s WMA Voice decoding context private data * @return 1 if not enough bits were available, or 0 on success. */ static int parse_packet_header(WMAVoiceContext *s) { GetBitContext *gb = &s->gb; unsigned int res; if (get_bits_left(gb) < 11) return 1; skip_bits(gb, 4); // packet sequence number s->has_residual_lsps = get_bits1(gb); do { res = get_bits(gb, 6); // number of superframes per packet // (minus first one if there is spillover) if (get_bits_left(gb) < 6 * (res == 0x3F) + s->spillover_bitsize) return 1; } while (res == 0x3F); s->spillover_nbits = get_bits(gb, s->spillover_bitsize); return 0; } /** * Copy (unaligned) bits from gb/data/size to pb. * * @param pb target buffer to copy bits into * @param data source buffer to copy bits from * @param size size of the source data, in bytes * @param gb bit I/O context specifying the current position in the source. * data. This function might use this to align the bit position to * a whole-byte boundary before calling #ff_copy_bits() on aligned * source data * @param nbits the amount of bits to copy from source to target * * @note after calling this function, the current position in the input bit * I/O context is undefined. */ static void copy_bits(PutBitContext *pb, const uint8_t *data, int size, GetBitContext *gb, int nbits) { int rmn_bytes, rmn_bits; rmn_bits = rmn_bytes = get_bits_left(gb); if (rmn_bits < nbits) return; rmn_bits &= 7; rmn_bytes >>= 3; if ((rmn_bits = FFMIN(rmn_bits, nbits)) > 0) put_bits(pb, rmn_bits, get_bits(gb, rmn_bits)); ff_copy_bits(pb, data + size - rmn_bytes, FFMIN(nbits - rmn_bits, rmn_bytes << 3)); } /** * Packet decoding: a packet is anything that the (ASF) demuxer contains, * and we expect that the demuxer / application provides it to us as such * (else you'll probably get garbage as output). Every packet has a size of * ctx->block_align bytes, starts with a packet header (see * #parse_packet_header()), and then a series of superframes. Superframe * boundaries may exceed packets, i.e. superframes can split data over * multiple (two) packets. * * For more information about frames, see #synth_superframe(). */ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data, int *data_size, AVPacket *avpkt) { WMAVoiceContext *s = ctx->priv_data; GetBitContext *gb = &s->gb; int size, res, pos; if (*data_size < 480 * sizeof(float)) { av_log(ctx, AV_LOG_ERROR, "Output buffer too small (%d given - %lu needed)\n", *data_size, 480 * sizeof(float)); return -1; } *data_size = 0; /* Packets are sometimes a multiple of ctx->block_align, with a packet * header at each ctx->block_align bytes. However, FFmpeg's ASF demuxer * feeds us ASF packets, which may concatenate multiple "codec" packets * in a single "muxer" packet, so we artificially emulate that by * capping the packet size at ctx->block_align. */ for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align); if (!size) return 0; init_get_bits(&s->gb, avpkt->data, size << 3); /* size == ctx->block_align is used to indicate whether we are dealing with * a new packet or a packet of which we already read the packet header * previously. */ if (size == ctx->block_align) { // new packet header if ((res = parse_packet_header(s)) < 0) return res; /* If the packet header specifies a s->spillover_nbits, then we want * to push out all data of the previous packet (+ spillover) before * continuing to parse new superframes in the current packet. */ if (s->spillover_nbits > 0) { if (s->sframe_cache_size > 0) { int cnt = get_bits_count(gb); copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits); flush_put_bits(&s->pb); s->sframe_cache_size += s->spillover_nbits; if ((res = synth_superframe(ctx, data, data_size)) == 0 && *data_size > 0) { cnt += s->spillover_nbits; s->skip_bits_next = cnt & 7; return cnt >> 3; } else skip_bits_long (gb, s->spillover_nbits - cnt + get_bits_count(gb)); // resync } else skip_bits_long(gb, s->spillover_nbits); // resync } } else if (s->skip_bits_next) skip_bits(gb, s->skip_bits_next); /* Try parsing superframes in current packet */ s->sframe_cache_size = 0; s->skip_bits_next = 0; pos = get_bits_left(gb); if ((res = synth_superframe(ctx, data, data_size)) < 0) { return res; } else if (*data_size > 0) { int cnt = get_bits_count(gb); s->skip_bits_next = cnt & 7; return cnt >> 3; } else if ((s->sframe_cache_size = pos) > 0) { /* rewind bit reader to start of last (incomplete) superframe... */ init_get_bits(gb, avpkt->data, size << 3); skip_bits_long(gb, (size << 3) - pos); assert(get_bits_left(gb) == pos); /* ...and cache it for spillover in next packet */ init_put_bits(&s->pb, s->sframe_cache, SFRAME_CACHE_MAXSIZE); copy_bits(&s->pb, avpkt->data, size, gb, s->sframe_cache_size); // FIXME bad - just copy bytes as whole and add use the // skip_bits_next field } return size; } static av_cold int wmavoice_decode_end(AVCodecContext *ctx) { WMAVoiceContext *s = ctx->priv_data; if (s->do_apf) { ff_rdft_end(&s->rdft); ff_rdft_end(&s->irdft); ff_dct_end(&s->dct); ff_dct_end(&s->dst); } return 0; } static av_cold void wmavoice_flush(AVCodecContext *ctx) { WMAVoiceContext *s = ctx->priv_data; int n; s->postfilter_agc = 0; s->sframe_cache_size = 0; s->skip_bits_next = 0; for (n = 0; n < s->lsps; n++) s->prev_lsps[n] = M_PI * (n + 1.0) / (s->lsps + 1.0); memset(s->excitation_history, 0, sizeof(*s->excitation_history) * MAX_SIGNAL_HISTORY); memset(s->synth_history, 0, sizeof(*s->synth_history) * MAX_LSPS); memset(s->gain_pred_err, 0, sizeof(s->gain_pred_err)); if (s->do_apf) { memset(&s->synth_filter_out_buf[MAX_LSPS_ALIGN16 - s->lsps], 0, sizeof(*s->synth_filter_out_buf) * s->lsps); memset(s->dcf_mem, 0, sizeof(*s->dcf_mem) * 2); memset(s->zero_exc_pf, 0, sizeof(*s->zero_exc_pf) * s->history_nsamples); memset(s->denoise_filter_cache, 0, sizeof(s->denoise_filter_cache)); } } AVCodec wmavoice_decoder = { "wmavoice", AVMEDIA_TYPE_AUDIO, CODEC_ID_WMAVOICE, sizeof(WMAVoiceContext), wmavoice_decode_init, NULL, wmavoice_decode_end, wmavoice_decode_packet, CODEC_CAP_SUBFRAMES, .flush = wmavoice_flush, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"), };
123linslouis-android-video-cutter
jni/libavcodec/wmavoice.c
C
asf20
81,757
/* * Sun mediaLib optimized DSP utils * Copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/dsputil.h" #include "libavcodec/mpegvideo.h" #include <mlib_types.h> #include <mlib_status.h> #include <mlib_sys.h> #include <mlib_algebra.h> #include <mlib_video.h> /* misc */ static void get_pixels_mlib(DCTELEM *restrict block, const uint8_t *pixels, int line_size) { int i; for (i=0;i<8;i++) { mlib_VectorConvert_S16_U8_Mod((mlib_s16 *)block, (mlib_u8 *)pixels, 8); pixels += line_size; block += 8; } } static void diff_pixels_mlib(DCTELEM *restrict block, const uint8_t *s1, const uint8_t *s2, int line_size) { int i; for (i=0;i<8;i++) { mlib_VectorSub_S16_U8_Mod((mlib_s16 *)block, (mlib_u8 *)s1, (mlib_u8 *)s2, 8); s1 += line_size; s2 += line_size; block += 8; } } static void add_pixels_clamped_mlib(const DCTELEM *block, uint8_t *pixels, int line_size) { mlib_VideoAddBlock_U8_S16(pixels, (mlib_s16 *)block, line_size); } /* put block, width 16 pixel, height 8/16 */ static void put_pixels16_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 8: mlib_VideoCopyRef_U8_U8_16x8(dest, (uint8_t *)ref, stride); break; case 16: mlib_VideoCopyRef_U8_U8_16x16(dest, (uint8_t *)ref, stride); break; default: assert(0); } } static void put_pixels16_x2_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 8: mlib_VideoInterpX_U8_U8_16x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpX_U8_U8_16x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } static void put_pixels16_y2_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 8: mlib_VideoInterpY_U8_U8_16x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpY_U8_U8_16x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } static void put_pixels16_xy2_mlib(uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 8: mlib_VideoInterpXY_U8_U8_16x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpXY_U8_U8_16x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } /* put block, width 8 pixel, height 4/8/16 */ static void put_pixels8_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 4: mlib_VideoCopyRef_U8_U8_8x4(dest, (uint8_t *)ref, stride); break; case 8: mlib_VideoCopyRef_U8_U8_8x8(dest, (uint8_t *)ref, stride); break; case 16: mlib_VideoCopyRef_U8_U8_8x16(dest, (uint8_t *)ref, stride); break; default: assert(0); } } static void put_pixels8_x2_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 4: mlib_VideoInterpX_U8_U8_8x4(dest, (uint8_t *)ref, stride, stride); break; case 8: mlib_VideoInterpX_U8_U8_8x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpX_U8_U8_8x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } static void put_pixels8_y2_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 4: mlib_VideoInterpY_U8_U8_8x4(dest, (uint8_t *)ref, stride, stride); break; case 8: mlib_VideoInterpY_U8_U8_8x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpY_U8_U8_8x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } static void put_pixels8_xy2_mlib(uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 4: mlib_VideoInterpXY_U8_U8_8x4(dest, (uint8_t *)ref, stride, stride); break; case 8: mlib_VideoInterpXY_U8_U8_8x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpXY_U8_U8_8x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } /* average block, width 16 pixel, height 8/16 */ static void avg_pixels16_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 8: mlib_VideoCopyRefAve_U8_U8_16x8(dest, (uint8_t *)ref, stride); break; case 16: mlib_VideoCopyRefAve_U8_U8_16x16(dest, (uint8_t *)ref, stride); break; default: assert(0); } } static void avg_pixels16_x2_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 8: mlib_VideoInterpAveX_U8_U8_16x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpAveX_U8_U8_16x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } static void avg_pixels16_y2_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 8: mlib_VideoInterpAveY_U8_U8_16x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpAveY_U8_U8_16x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } static void avg_pixels16_xy2_mlib(uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 8: mlib_VideoInterpAveXY_U8_U8_16x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpAveXY_U8_U8_16x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } /* average block, width 8 pixel, height 4/8/16 */ static void avg_pixels8_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 4: mlib_VideoCopyRefAve_U8_U8_8x4(dest, (uint8_t *)ref, stride); break; case 8: mlib_VideoCopyRefAve_U8_U8_8x8(dest, (uint8_t *)ref, stride); break; case 16: mlib_VideoCopyRefAve_U8_U8_8x16(dest, (uint8_t *)ref, stride); break; default: assert(0); } } static void avg_pixels8_x2_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 4: mlib_VideoInterpAveX_U8_U8_8x4(dest, (uint8_t *)ref, stride, stride); break; case 8: mlib_VideoInterpAveX_U8_U8_8x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpAveX_U8_U8_8x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } static void avg_pixels8_y2_mlib (uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 4: mlib_VideoInterpAveY_U8_U8_8x4(dest, (uint8_t *)ref, stride, stride); break; case 8: mlib_VideoInterpAveY_U8_U8_8x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpAveY_U8_U8_8x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } static void avg_pixels8_xy2_mlib(uint8_t * dest, const uint8_t * ref, int stride, int height) { switch (height) { case 4: mlib_VideoInterpAveXY_U8_U8_8x4(dest, (uint8_t *)ref, stride, stride); break; case 8: mlib_VideoInterpAveXY_U8_U8_8x8(dest, (uint8_t *)ref, stride, stride); break; case 16: mlib_VideoInterpAveXY_U8_U8_8x16(dest, (uint8_t *)ref, stride, stride); break; default: assert(0); } } /* swap byte order of a buffer */ static void bswap_buf_mlib(uint32_t *dst, const uint32_t *src, int w) { mlib_VectorReverseByteOrder_U32_U32(dst, src, w); } /* transformations */ static void ff_idct_put_mlib(uint8_t *dest, int line_size, DCTELEM *data) { int i; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; mlib_VideoIDCT8x8_S16_S16 (data, data); for(i=0;i<8;i++) { dest[0] = cm[data[0]]; dest[1] = cm[data[1]]; dest[2] = cm[data[2]]; dest[3] = cm[data[3]]; dest[4] = cm[data[4]]; dest[5] = cm[data[5]]; dest[6] = cm[data[6]]; dest[7] = cm[data[7]]; dest += line_size; data += 8; } } static void ff_idct_add_mlib(uint8_t *dest, int line_size, DCTELEM *data) { mlib_VideoIDCT8x8_S16_S16 (data, data); mlib_VideoAddBlock_U8_S16(dest, (mlib_s16 *)data, line_size); } static void ff_idct_mlib(DCTELEM *data) { mlib_VideoIDCT8x8_S16_S16 (data, data); } static void ff_fdct_mlib(DCTELEM *data) { mlib_VideoDCT8x8_S16_S16 (data, data); } void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx) { c->get_pixels = get_pixels_mlib; c->diff_pixels = diff_pixels_mlib; c->add_pixels_clamped = add_pixels_clamped_mlib; c->put_pixels_tab[0][0] = put_pixels16_mlib; c->put_pixels_tab[0][1] = put_pixels16_x2_mlib; c->put_pixels_tab[0][2] = put_pixels16_y2_mlib; c->put_pixels_tab[0][3] = put_pixels16_xy2_mlib; c->put_pixels_tab[1][0] = put_pixels8_mlib; c->put_pixels_tab[1][1] = put_pixels8_x2_mlib; c->put_pixels_tab[1][2] = put_pixels8_y2_mlib; c->put_pixels_tab[1][3] = put_pixels8_xy2_mlib; c->avg_pixels_tab[0][0] = avg_pixels16_mlib; c->avg_pixels_tab[0][1] = avg_pixels16_x2_mlib; c->avg_pixels_tab[0][2] = avg_pixels16_y2_mlib; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mlib; c->avg_pixels_tab[1][0] = avg_pixels8_mlib; c->avg_pixels_tab[1][1] = avg_pixels8_x2_mlib; c->avg_pixels_tab[1][2] = avg_pixels8_y2_mlib; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mlib; c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mlib; c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mlib; c->bswap_buf = bswap_buf_mlib; } void MPV_common_init_mlib(MpegEncContext *s) { if(s->avctx->dct_algo==FF_DCT_AUTO || s->avctx->dct_algo==FF_DCT_MLIB){ s->dsp.fdct = ff_fdct_mlib; } if(s->avctx->idct_algo==FF_IDCT_MLIB){ s->dsp.idct_put= ff_idct_put_mlib; s->dsp.idct_add= ff_idct_add_mlib; s->dsp.idct = ff_idct_mlib; s->dsp.idct_permutation_type= FF_NO_IDCT_PERM; } }
123linslouis-android-video-cutter
jni/libavcodec/mlib/dsputil_mlib.c
C
asf20
11,449
/* * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_VP56DSP_H #define AVCODEC_VP56DSP_H #include <stdint.h> typedef struct VP56DSPContext { void (*edge_filter_hor)(uint8_t *yuv, int stride, int t); void (*edge_filter_ver)(uint8_t *yuv, int stride, int t); } VP56DSPContext; void ff_vp56dsp_init(VP56DSPContext *s, enum CodecID codec); void ff_vp56dsp_init_arm(VP56DSPContext *s, enum CodecID codec); #endif /* AVCODEC_VP56DSP_H */
123linslouis-android-video-cutter
jni/libavcodec/vp56dsp.h
C
asf20
1,227
/** * @file * huffman tree builder and VLC generator * Copyright (C) 2007 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_HUFFMAN_H #define AVCODEC_HUFFMAN_H #include "avcodec.h" #include "get_bits.h" typedef struct { int16_t sym; int16_t n0; uint32_t count; } Node; #define FF_HUFFMAN_FLAG_HNODE_FIRST 0x01 #define FF_HUFFMAN_FLAG_ZERO_COUNT 0x02 typedef int (*HuffCmp)(const void *va, const void *vb); int ff_huff_build_tree(AVCodecContext *avctx, VLC *vlc, int nb_codes, Node *nodes, HuffCmp cmp, int flags); #endif /* AVCODEC_HUFFMAN_H */
123linslouis-android-video-cutter
jni/libavcodec/huffman.h
C
asf20
1,357
/* * AAC data declarations * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org ) * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com ) * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAC data declarations * @author Oded Shimon ( ods15 ods15 dyndns org ) * @author Maxim Gavrilov ( maxim.gavrilov gmail com ) */ #ifndef AVCODEC_AACTAB_H #define AVCODEC_AACTAB_H #include "libavutil/mem.h" #include "aac.h" #include "aac_tablegen_decl.h" #include <stdint.h> /* NOTE: * Tables in this file are used by the AAC decoder and will be used by the AAC * encoder. */ /* @name window coefficients * @{ */ DECLARE_ALIGNED(16, extern float, ff_aac_kbd_long_1024)[1024]; DECLARE_ALIGNED(16, extern float, ff_aac_kbd_short_128)[128]; // @} /* @name number of scalefactor window bands for long and short transform windows respectively * @{ */ extern const uint8_t ff_aac_num_swb_1024[]; extern const uint8_t ff_aac_num_swb_128 []; // @} extern const uint8_t ff_aac_pred_sfb_max []; extern const uint32_t ff_aac_scalefactor_code[121]; extern const uint8_t ff_aac_scalefactor_bits[121]; extern const uint16_t * const ff_aac_spectral_codes[11]; extern const uint8_t * const ff_aac_spectral_bits [11]; extern const uint16_t ff_aac_spectral_sizes[11]; extern const float *ff_aac_codebook_vectors[]; extern const float *ff_aac_codebook_vector_vals[]; extern const uint16_t *ff_aac_codebook_vector_idx[]; extern const uint16_t * const ff_swb_offset_1024[13]; extern const uint16_t * const ff_swb_offset_128 [13]; extern const uint8_t ff_tns_max_bands_1024[13]; extern const uint8_t ff_tns_max_bands_128 [13]; #endif /* AVCODEC_AACTAB_H */
123linslouis-android-video-cutter
jni/libavcodec/aactab.h
C
asf20
2,407
/* * H.264 encoding using the x264 library * Copyright (C) 2005 Mans Rullgard <mans@mansr.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include <x264.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> typedef struct X264Context { x264_param_t params; x264_t *enc; x264_picture_t pic; uint8_t *sei; int sei_size; AVFrame out_pic; } X264Context; static void X264_log(void *p, int level, const char *fmt, va_list args) { static const int level_map[] = { [X264_LOG_ERROR] = AV_LOG_ERROR, [X264_LOG_WARNING] = AV_LOG_WARNING, [X264_LOG_INFO] = AV_LOG_INFO, [X264_LOG_DEBUG] = AV_LOG_DEBUG }; if (level < 0 || level > X264_LOG_DEBUG) return; av_vlog(p, level_map[level], fmt, args); } static int encode_nals(AVCodecContext *ctx, uint8_t *buf, int size, x264_nal_t *nals, int nnal, int skip_sei) { X264Context *x4 = ctx->priv_data; uint8_t *p = buf; int i; /* Write the SEI as part of the first frame. */ if (x4->sei_size > 0 && nnal > 0) { memcpy(p, x4->sei, x4->sei_size); p += x4->sei_size; x4->sei_size = 0; } for (i = 0; i < nnal; i++){ /* Don't put the SEI in extradata. */ if (skip_sei && nals[i].i_type == NAL_SEI) { x4->sei_size = nals[i].i_payload; x4->sei = av_malloc(x4->sei_size); memcpy(x4->sei, nals[i].p_payload, nals[i].i_payload); continue; } memcpy(p, nals[i].p_payload, nals[i].i_payload); p += nals[i].i_payload; } return p - buf; } static int X264_frame(AVCodecContext *ctx, uint8_t *buf, int bufsize, void *data) { X264Context *x4 = ctx->priv_data; AVFrame *frame = data; x264_nal_t *nal; int nnal, i; x264_picture_t pic_out; x4->pic.img.i_csp = X264_CSP_I420; x4->pic.img.i_plane = 3; if (frame) { for (i = 0; i < 3; i++) { x4->pic.img.plane[i] = frame->data[i]; x4->pic.img.i_stride[i] = frame->linesize[i]; } x4->pic.i_pts = frame->pts; x4->pic.i_type = X264_TYPE_AUTO; } if (x264_encoder_encode(x4->enc, &nal, &nnal, frame? &x4->pic: NULL, &pic_out) < 0) return -1; bufsize = encode_nals(ctx, buf, bufsize, nal, nnal, 0); if (bufsize < 0) return -1; /* FIXME: libx264 now provides DTS, but AVFrame doesn't have a field for it. */ x4->out_pic.pts = pic_out.i_pts; switch (pic_out.i_type) { case X264_TYPE_IDR: case X264_TYPE_I: x4->out_pic.pict_type = FF_I_TYPE; break; case X264_TYPE_P: x4->out_pic.pict_type = FF_P_TYPE; break; case X264_TYPE_B: case X264_TYPE_BREF: x4->out_pic.pict_type = FF_B_TYPE; break; } x4->out_pic.key_frame = pic_out.b_keyframe; x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA; return bufsize; } static av_cold int X264_close(AVCodecContext *avctx) { X264Context *x4 = avctx->priv_data; av_freep(&avctx->extradata); av_free(x4->sei); if (x4->enc) x264_encoder_close(x4->enc); return 0; } static av_cold int X264_init(AVCodecContext *avctx) { X264Context *x4 = avctx->priv_data; x4->sei_size = 0; x264_param_default(&x4->params); x4->params.pf_log = X264_log; x4->params.p_log_private = avctx; x4->params.i_keyint_max = avctx->gop_size; x4->params.rc.i_bitrate = avctx->bit_rate / 1000; x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; x4->params.rc.b_stat_write = avctx->flags & CODEC_FLAG_PASS1; if (avctx->flags & CODEC_FLAG_PASS2) { x4->params.rc.b_stat_read = 1; } else { if (avctx->crf) { x4->params.rc.i_rc_method = X264_RC_CRF; x4->params.rc.f_rf_constant = avctx->crf; } else if (avctx->cqp > -1) { x4->params.rc.i_rc_method = X264_RC_CQP; x4->params.rc.i_qp_constant = avctx->cqp; } } // if neither crf nor cqp modes are selected we have to enable the RC // we do it this way because we cannot check if the bitrate has been set if (!(avctx->crf || (avctx->cqp > -1))) x4->params.rc.i_rc_method = X264_RC_ABR; x4->params.i_bframe = avctx->max_b_frames; x4->params.b_cabac = avctx->coder_type == FF_CODER_TYPE_AC; x4->params.i_bframe_adaptive = avctx->b_frame_strategy; x4->params.i_bframe_bias = avctx->bframebias; x4->params.i_bframe_pyramid = avctx->flags2 & CODEC_FLAG2_BPYRAMID ? X264_B_PYRAMID_NORMAL : X264_B_PYRAMID_NONE; avctx->has_b_frames = avctx->flags2 & CODEC_FLAG2_BPYRAMID ? 2 : !!avctx->max_b_frames; x4->params.i_keyint_min = avctx->keyint_min; if (x4->params.i_keyint_min > x4->params.i_keyint_max) x4->params.i_keyint_min = x4->params.i_keyint_max; x4->params.i_scenecut_threshold = avctx->scenechange_threshold; x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER; x4->params.i_deblocking_filter_alphac0 = avctx->deblockalpha; x4->params.i_deblocking_filter_beta = avctx->deblockbeta; x4->params.rc.i_qp_min = avctx->qmin; x4->params.rc.i_qp_max = avctx->qmax; x4->params.rc.i_qp_step = avctx->max_qdiff; x4->params.rc.f_qcompress = avctx->qcompress; /* 0.0 => cbr, 1.0 => constant qp */ x4->params.rc.f_qblur = avctx->qblur; /* temporally blur quants */ x4->params.rc.f_complexity_blur = avctx->complexityblur; x4->params.i_frame_reference = avctx->refs; x4->params.i_width = avctx->width; x4->params.i_height = avctx->height; x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num; x4->params.vui.i_sar_height = avctx->sample_aspect_ratio.den; x4->params.i_fps_num = x4->params.i_timebase_den = avctx->time_base.den; x4->params.i_fps_den = x4->params.i_timebase_num = avctx->time_base.num; x4->params.analyse.inter = 0; if (avctx->partitions) { if (avctx->partitions & X264_PART_I4X4) x4->params.analyse.inter |= X264_ANALYSE_I4x4; if (avctx->partitions & X264_PART_I8X8) x4->params.analyse.inter |= X264_ANALYSE_I8x8; if (avctx->partitions & X264_PART_P8X8) x4->params.analyse.inter |= X264_ANALYSE_PSUB16x16; if (avctx->partitions & X264_PART_P4X4) x4->params.analyse.inter |= X264_ANALYSE_PSUB8x8; if (avctx->partitions & X264_PART_B8X8) x4->params.analyse.inter |= X264_ANALYSE_BSUB16x16; } x4->params.analyse.i_direct_mv_pred = avctx->directpred; x4->params.analyse.b_weighted_bipred = avctx->flags2 & CODEC_FLAG2_WPRED; x4->params.analyse.i_weighted_pred = avctx->weighted_p_pred; if (avctx->me_method == ME_EPZS) x4->params.analyse.i_me_method = X264_ME_DIA; else if (avctx->me_method == ME_HEX) x4->params.analyse.i_me_method = X264_ME_HEX; else if (avctx->me_method == ME_UMH) x4->params.analyse.i_me_method = X264_ME_UMH; else if (avctx->me_method == ME_FULL) x4->params.analyse.i_me_method = X264_ME_ESA; else if (avctx->me_method == ME_TESA) x4->params.analyse.i_me_method = X264_ME_TESA; else x4->params.analyse.i_me_method = X264_ME_HEX; x4->params.rc.i_aq_mode = avctx->aq_mode; x4->params.rc.f_aq_strength = avctx->aq_strength; x4->params.rc.i_lookahead = avctx->rc_lookahead; x4->params.analyse.b_psy = avctx->flags2 & CODEC_FLAG2_PSY; x4->params.analyse.f_psy_rd = avctx->psy_rd; x4->params.analyse.f_psy_trellis = avctx->psy_trellis; x4->params.analyse.i_me_range = avctx->me_range; x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality; x4->params.analyse.b_mixed_references = avctx->flags2 & CODEC_FLAG2_MIXED_REFS; x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA; x4->params.analyse.b_transform_8x8 = avctx->flags2 & CODEC_FLAG2_8X8DCT; x4->params.analyse.b_fast_pskip = avctx->flags2 & CODEC_FLAG2_FASTPSKIP; x4->params.analyse.i_trellis = avctx->trellis; x4->params.analyse.i_noise_reduction = avctx->noise_reduction; if (avctx->level > 0) x4->params.i_level_idc = avctx->level; x4->params.rc.f_rate_tolerance = (float)avctx->bit_rate_tolerance/avctx->bit_rate; if ((avctx->rc_buffer_size != 0) && (avctx->rc_initial_buffer_occupancy <= avctx->rc_buffer_size)) { x4->params.rc.f_vbv_buffer_init = (float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size; } else x4->params.rc.f_vbv_buffer_init = 0.9; x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE); x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor); x4->params.rc.f_pb_factor = avctx->b_quant_factor; x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset; x4->params.analyse.b_psnr = avctx->flags & CODEC_FLAG_PSNR; x4->params.analyse.b_ssim = avctx->flags2 & CODEC_FLAG2_SSIM; x4->params.i_log_level = X264_LOG_DEBUG; x4->params.b_aud = avctx->flags2 & CODEC_FLAG2_AUD; x4->params.i_threads = avctx->thread_count; x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT; if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) x4->params.b_repeat_headers = 0; x4->enc = x264_encoder_open(&x4->params); if (!x4->enc) return -1; avctx->coded_frame = &x4->out_pic; if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { x264_nal_t *nal; int nnal, s, i; s = x264_encoder_headers(x4->enc, &nal, &nnal); for (i = 0; i < nnal; i++) if (nal[i].i_type == NAL_SEI) av_log(avctx, AV_LOG_INFO, "%s\n", nal[i].p_payload+25); avctx->extradata = av_malloc(s); avctx->extradata_size = encode_nals(avctx, avctx->extradata, s, nal, nnal, 1); } return 0; } AVCodec libx264_encoder = { .name = "libx264", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_H264, .priv_data_size = sizeof(X264Context), .init = X264_init, .encode = X264_frame, .close = X264_close, .capabilities = CODEC_CAP_DELAY, .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_YUV420P, PIX_FMT_NONE }, .long_name = NULL_IF_CONFIG_SMALL("libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"), };
123linslouis-android-video-cutter
jni/libavcodec/libx264.c
C
asf20
11,692
/* * MPEG Audio header decoder * Copyright (c) 2001, 2002 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MPEG Audio header decoder. */ //#define DEBUG #include "avcodec.h" #include "mpegaudio.h" #include "mpegaudiodata.h" #include "mpegaudiodecheader.h" int ff_mpegaudio_decode_header(MPADecodeHeader *s, uint32_t header) { int sample_rate, frame_size, mpeg25, padding; int sample_rate_index, bitrate_index; if (header & (1<<20)) { s->lsf = (header & (1<<19)) ? 0 : 1; mpeg25 = 0; } else { s->lsf = 1; mpeg25 = 1; } s->layer = 4 - ((header >> 17) & 3); /* extract frequency */ sample_rate_index = (header >> 10) & 3; sample_rate = ff_mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25); sample_rate_index += 3 * (s->lsf + mpeg25); s->sample_rate_index = sample_rate_index; s->error_protection = ((header >> 16) & 1) ^ 1; s->sample_rate = sample_rate; bitrate_index = (header >> 12) & 0xf; padding = (header >> 9) & 1; //extension = (header >> 8) & 1; s->mode = (header >> 6) & 3; s->mode_ext = (header >> 4) & 3; //copyright = (header >> 3) & 1; //original = (header >> 2) & 1; //emphasis = header & 3; if (s->mode == MPA_MONO) s->nb_channels = 1; else s->nb_channels = 2; if (bitrate_index != 0) { frame_size = ff_mpa_bitrate_tab[s->lsf][s->layer - 1][bitrate_index]; s->bit_rate = frame_size * 1000; switch(s->layer) { case 1: frame_size = (frame_size * 12000) / sample_rate; frame_size = (frame_size + padding) * 4; break; case 2: frame_size = (frame_size * 144000) / sample_rate; frame_size += padding; break; default: case 3: frame_size = (frame_size * 144000) / (sample_rate << s->lsf); frame_size += padding; break; } s->frame_size = frame_size; } else { /* if no frame size computed, signal it */ return 1; } #if defined(DEBUG) dprintf(NULL, "layer%d, %d Hz, %d kbits/s, ", s->layer, s->sample_rate, s->bit_rate); if (s->nb_channels == 2) { if (s->layer == 3) { if (s->mode_ext & MODE_EXT_MS_STEREO) dprintf(NULL, "ms-"); if (s->mode_ext & MODE_EXT_I_STEREO) dprintf(NULL, "i-"); } dprintf(NULL, "stereo"); } else { dprintf(NULL, "mono"); } dprintf(NULL, "\n"); #endif return 0; }
123linslouis-android-video-cutter
jni/libavcodec/mpegaudiodecheader.c
C
asf20
3,321
/* * 8SVX audio decoder * Copyright (C) 2008 Jaikrishnan Menon * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * 8svx audio decoder * @author Jaikrishnan Menon * supports: fibonacci delta encoding * : exponential encoding */ #include "avcodec.h" /** decoder context */ typedef struct EightSvxContext { int16_t fib_acc; const int16_t *table; } EightSvxContext; static const int16_t fibonacci[16] = { -34<<8, -21<<8, -13<<8, -8<<8, -5<<8, -3<<8, -2<<8, -1<<8, 0, 1<<8, 2<<8, 3<<8, 5<<8, 8<<8, 13<<8, 21<<8 }; static const int16_t exponential[16] = { -128<<8, -64<<8, -32<<8, -16<<8, -8<<8, -4<<8, -2<<8, -1<<8, 0, 1<<8, 2<<8, 4<<8, 8<<8, 16<<8, 32<<8, 64<<8 }; /** decode a frame */ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; EightSvxContext *esc = avctx->priv_data; int16_t *out_data = data; int consumed = buf_size; const uint8_t *buf_end = buf + buf_size; if((*data_size >> 2) < buf_size) return -1; if(avctx->frame_number == 0) { esc->fib_acc = buf[1] << 8; buf_size -= 2; buf += 2; } *data_size = buf_size << 2; while(buf < buf_end) { uint8_t d = *buf++; esc->fib_acc += esc->table[d & 0x0f]; *out_data++ = esc->fib_acc; esc->fib_acc += esc->table[d >> 4]; *out_data++ = esc->fib_acc; } return consumed; } /** initialize 8svx decoder */ static av_cold int eightsvx_decode_init(AVCodecContext *avctx) { EightSvxContext *esc = avctx->priv_data; switch(avctx->codec->id) { case CODEC_ID_8SVX_FIB: esc->table = fibonacci; break; case CODEC_ID_8SVX_EXP: esc->table = exponential; break; default: return -1; } avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } AVCodec eightsvx_fib_decoder = { .name = "8svx_fib", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_8SVX_FIB, .priv_data_size = sizeof (EightSvxContext), .init = eightsvx_decode_init, .decode = eightsvx_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"), }; AVCodec eightsvx_exp_decoder = { .name = "8svx_exp", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_8SVX_EXP, .priv_data_size = sizeof (EightSvxContext), .init = eightsvx_decode_init, .decode = eightsvx_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"), };
123linslouis-android-video-cutter
jni/libavcodec/8svx.c
C
asf20
3,458
/* * Shorten decoder * Copyright (c) 2005 Jeff Muizelaar * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Shorten decoder * @author Jeff Muizelaar * */ #define DEBUG #include <limits.h> #include "avcodec.h" #include "get_bits.h" #include "golomb.h" #define MAX_CHANNELS 8 #define MAX_BLOCKSIZE 65535 #define OUT_BUFFER_SIZE 16384 #define ULONGSIZE 2 #define WAVE_FORMAT_PCM 0x0001 #define DEFAULT_BLOCK_SIZE 256 #define TYPESIZE 4 #define CHANSIZE 0 #define LPCQSIZE 2 #define ENERGYSIZE 3 #define BITSHIFTSIZE 2 #define TYPE_S16HL 3 #define TYPE_S16LH 5 #define NWRAP 3 #define NSKIPSIZE 1 #define LPCQUANT 5 #define V2LPCQOFFSET (1 << LPCQUANT) #define FNSIZE 2 #define FN_DIFF0 0 #define FN_DIFF1 1 #define FN_DIFF2 2 #define FN_DIFF3 3 #define FN_QUIT 4 #define FN_BLOCKSIZE 5 #define FN_BITSHIFT 6 #define FN_QLPC 7 #define FN_ZERO 8 #define FN_VERBATIM 9 #define VERBATIM_CKSIZE_SIZE 5 #define VERBATIM_BYTE_SIZE 8 #define CANONICAL_HEADER_SIZE 44 typedef struct ShortenContext { AVCodecContext *avctx; GetBitContext gb; int min_framesize, max_framesize; int channels; int32_t *decoded[MAX_CHANNELS]; int32_t *offset[MAX_CHANNELS]; uint8_t *bitstream; int bitstream_size; int bitstream_index; unsigned int allocated_bitstream_size; int header_size; uint8_t header[OUT_BUFFER_SIZE]; int version; int cur_chan; int bitshift; int nmean; int internal_ftype; int nwrap; int blocksize; int bitindex; int32_t lpcqoffset; } ShortenContext; static av_cold int shorten_decode_init(AVCodecContext * avctx) { ShortenContext *s = avctx->priv_data; s->avctx = avctx; avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } static int allocate_buffers(ShortenContext *s) { int i, chan; for (chan=0; chan<s->channels; chan++) { if(FFMAX(1, s->nmean) >= UINT_MAX/sizeof(int32_t)){ av_log(s->avctx, AV_LOG_ERROR, "nmean too large\n"); return -1; } if(s->blocksize + s->nwrap >= UINT_MAX/sizeof(int32_t) || s->blocksize + s->nwrap <= (unsigned)s->nwrap){ av_log(s->avctx, AV_LOG_ERROR, "s->blocksize + s->nwrap too large\n"); return -1; } s->offset[chan] = av_realloc(s->offset[chan], sizeof(int32_t)*FFMAX(1, s->nmean)); s->decoded[chan] = av_realloc(s->decoded[chan], sizeof(int32_t)*(s->blocksize + s->nwrap)); for (i=0; i<s->nwrap; i++) s->decoded[chan][i] = 0; s->decoded[chan] += s->nwrap; } return 0; } static inline unsigned int get_uint(ShortenContext *s, int k) { if (s->version != 0) k = get_ur_golomb_shorten(&s->gb, ULONGSIZE); return get_ur_golomb_shorten(&s->gb, k); } static void fix_bitshift(ShortenContext *s, int32_t *buffer) { int i; if (s->bitshift != 0) for (i = 0; i < s->blocksize; i++) buffer[s->nwrap + i] <<= s->bitshift; } static void init_offset(ShortenContext *s) { int32_t mean = 0; int chan, i; int nblock = FFMAX(1, s->nmean); /* initialise offset */ switch (s->internal_ftype) { case TYPE_S16HL: case TYPE_S16LH: mean = 0; break; default: av_log(s->avctx, AV_LOG_ERROR, "unknown audio type"); abort(); } for (chan = 0; chan < s->channels; chan++) for (i = 0; i < nblock; i++) s->offset[chan][i] = mean; } static inline int get_le32(GetBitContext *gb) { return bswap_32(get_bits_long(gb, 32)); } static inline short get_le16(GetBitContext *gb) { return bswap_16(get_bits_long(gb, 16)); } static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header_size) { GetBitContext hb; int len; int chunk_size; short wave_format; init_get_bits(&hb, header, header_size*8); if (get_le32(&hb) != MKTAG('R','I','F','F')) { av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n"); return -1; } chunk_size = get_le32(&hb); if (get_le32(&hb) != MKTAG('W','A','V','E')) { av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n"); return -1; } while (get_le32(&hb) != MKTAG('f','m','t',' ')) { len = get_le32(&hb); skip_bits(&hb, 8*len); } len = get_le32(&hb); if (len < 16) { av_log(avctx, AV_LOG_ERROR, "fmt chunk was too short\n"); return -1; } wave_format = get_le16(&hb); switch (wave_format) { case WAVE_FORMAT_PCM: break; default: av_log(avctx, AV_LOG_ERROR, "unsupported wave format\n"); return -1; } avctx->channels = get_le16(&hb); avctx->sample_rate = get_le32(&hb); avctx->bit_rate = get_le32(&hb) * 8; avctx->block_align = get_le16(&hb); avctx->bits_per_coded_sample = get_le16(&hb); if (avctx->bits_per_coded_sample != 16) { av_log(avctx, AV_LOG_ERROR, "unsupported number of bits per sample\n"); return -1; } len -= 16; if (len > 0) av_log(avctx, AV_LOG_INFO, "%d header bytes unparsed\n", len); return 0; } static int16_t * interleave_buffer(int16_t *samples, int nchan, int blocksize, int32_t **buffer) { int i, chan; for (i=0; i<blocksize; i++) for (chan=0; chan < nchan; chan++) *samples++ = FFMIN(buffer[chan][i], 32768); return samples; } static void decode_subframe_lpc(ShortenContext *s, int channel, int residual_size, int pred_order) { int sum, i, j; int coeffs[pred_order]; for (i=0; i<pred_order; i++) coeffs[i] = get_sr_golomb_shorten(&s->gb, LPCQUANT); for (i=0; i < s->blocksize; i++) { sum = s->lpcqoffset; for (j=0; j<pred_order; j++) sum += coeffs[j] * s->decoded[channel][i-j-1]; s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + (sum >> LPCQUANT); } } static int shorten_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ShortenContext *s = avctx->priv_data; int i, input_buf_size = 0; int16_t *samples = data; if(s->max_framesize == 0){ s->max_framesize= 1024; // should hopefully be enough for the first header s->bitstream= av_fast_realloc(s->bitstream, &s->allocated_bitstream_size, s->max_framesize); } if(1 && s->max_framesize){//FIXME truncated buf_size= FFMIN(buf_size, s->max_framesize - s->bitstream_size); input_buf_size= buf_size; if(s->bitstream_index + s->bitstream_size + buf_size > s->allocated_bitstream_size){ // printf("memmove\n"); memmove(s->bitstream, &s->bitstream[s->bitstream_index], s->bitstream_size); s->bitstream_index=0; } memcpy(&s->bitstream[s->bitstream_index + s->bitstream_size], buf, buf_size); buf= &s->bitstream[s->bitstream_index]; buf_size += s->bitstream_size; s->bitstream_size= buf_size; if(buf_size < s->max_framesize){ //dprintf(avctx, "wanna more data ... %d\n", buf_size); *data_size = 0; return input_buf_size; } } init_get_bits(&s->gb, buf, buf_size*8); skip_bits(&s->gb, s->bitindex); if (!s->blocksize) { int maxnlpc = 0; /* shorten signature */ if (get_bits_long(&s->gb, 32) != AV_RB32("ajkg")) { av_log(s->avctx, AV_LOG_ERROR, "missing shorten magic 'ajkg'\n"); return -1; } s->lpcqoffset = 0; s->blocksize = DEFAULT_BLOCK_SIZE; s->channels = 1; s->nmean = -1; s->version = get_bits(&s->gb, 8); s->internal_ftype = get_uint(s, TYPESIZE); s->channels = get_uint(s, CHANSIZE); if (s->channels > MAX_CHANNELS) { av_log(s->avctx, AV_LOG_ERROR, "too many channels: %d\n", s->channels); return -1; } /* get blocksize if version > 0 */ if (s->version > 0) { int skip_bytes; s->blocksize = get_uint(s, av_log2(DEFAULT_BLOCK_SIZE)); maxnlpc = get_uint(s, LPCQSIZE); s->nmean = get_uint(s, 0); skip_bytes = get_uint(s, NSKIPSIZE); for (i=0; i<skip_bytes; i++) { skip_bits(&s->gb, 8); } } s->nwrap = FFMAX(NWRAP, maxnlpc); if (allocate_buffers(s)) return -1; init_offset(s); if (s->version > 1) s->lpcqoffset = V2LPCQOFFSET; if (get_ur_golomb_shorten(&s->gb, FNSIZE) != FN_VERBATIM) { av_log(s->avctx, AV_LOG_ERROR, "missing verbatim section at beginning of stream\n"); return -1; } s->header_size = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE); if (s->header_size >= OUT_BUFFER_SIZE || s->header_size < CANONICAL_HEADER_SIZE) { av_log(s->avctx, AV_LOG_ERROR, "header is wrong size: %d\n", s->header_size); return -1; } for (i=0; i<s->header_size; i++) s->header[i] = (char)get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE); if (decode_wave_header(avctx, s->header, s->header_size) < 0) return -1; s->cur_chan = 0; s->bitshift = 0; } else { int cmd; int len; cmd = get_ur_golomb_shorten(&s->gb, FNSIZE); switch (cmd) { case FN_ZERO: case FN_DIFF0: case FN_DIFF1: case FN_DIFF2: case FN_DIFF3: case FN_QLPC: { int residual_size = 0; int channel = s->cur_chan; int32_t coffset; if (cmd != FN_ZERO) { residual_size = get_ur_golomb_shorten(&s->gb, ENERGYSIZE); /* this is a hack as version 0 differed in defintion of get_sr_golomb_shorten */ if (s->version == 0) residual_size--; } if (s->nmean == 0) coffset = s->offset[channel][0]; else { int32_t sum = (s->version < 2) ? 0 : s->nmean / 2; for (i=0; i<s->nmean; i++) sum += s->offset[channel][i]; coffset = sum / s->nmean; if (s->version >= 2) coffset >>= FFMIN(1, s->bitshift); } switch (cmd) { case FN_ZERO: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = 0; break; case FN_DIFF0: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + coffset; break; case FN_DIFF1: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + s->decoded[channel][i - 1]; break; case FN_DIFF2: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + 2*s->decoded[channel][i-1] - s->decoded[channel][i-2]; break; case FN_DIFF3: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + 3*s->decoded[channel][i-1] - 3*s->decoded[channel][i-2] + s->decoded[channel][i-3]; break; case FN_QLPC: { int pred_order = get_ur_golomb_shorten(&s->gb, LPCQSIZE); for (i=0; i<pred_order; i++) s->decoded[channel][i - pred_order] -= coffset; decode_subframe_lpc(s, channel, residual_size, pred_order); if (coffset != 0) for (i=0; i < s->blocksize; i++) s->decoded[channel][i] += coffset; } } if (s->nmean > 0) { int32_t sum = (s->version < 2) ? 0 : s->blocksize / 2; for (i=0; i<s->blocksize; i++) sum += s->decoded[channel][i]; for (i=1; i<s->nmean; i++) s->offset[channel][i-1] = s->offset[channel][i]; if (s->version < 2) s->offset[channel][s->nmean - 1] = sum / s->blocksize; else s->offset[channel][s->nmean - 1] = (sum / s->blocksize) << s->bitshift; } for (i=-s->nwrap; i<0; i++) s->decoded[channel][i] = s->decoded[channel][i + s->blocksize]; fix_bitshift(s, s->decoded[channel]); s->cur_chan++; if (s->cur_chan == s->channels) { samples = interleave_buffer(samples, s->channels, s->blocksize, s->decoded); s->cur_chan = 0; goto frame_done; } break; } break; case FN_VERBATIM: len = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE); while (len--) { get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE); } break; case FN_BITSHIFT: s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE); break; case FN_BLOCKSIZE: s->blocksize = get_uint(s, av_log2(s->blocksize)); break; case FN_QUIT: *data_size = 0; return buf_size; break; default: av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd); return -1; break; } } frame_done: *data_size = (int8_t *)samples - (int8_t *)data; // s->last_blocksize = s->blocksize; s->bitindex = get_bits_count(&s->gb) - 8*((get_bits_count(&s->gb))/8); i= (get_bits_count(&s->gb))/8; if (i > buf_size) { av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", i - buf_size); s->bitstream_size=0; s->bitstream_index=0; return -1; } if (s->bitstream_size) { s->bitstream_index += i; s->bitstream_size -= i; return input_buf_size; } else return i; } static av_cold int shorten_decode_close(AVCodecContext *avctx) { ShortenContext *s = avctx->priv_data; int i; for (i = 0; i < s->channels; i++) { s->decoded[i] -= s->nwrap; av_freep(&s->decoded[i]); av_freep(&s->offset[i]); } av_freep(&s->bitstream); return 0; } static void shorten_flush(AVCodecContext *avctx){ ShortenContext *s = avctx->priv_data; s->bitstream_size= s->bitstream_index= 0; } AVCodec shorten_decoder = { "shorten", AVMEDIA_TYPE_AUDIO, CODEC_ID_SHORTEN, sizeof(ShortenContext), shorten_decode_init, NULL, shorten_decode_close, shorten_decode_frame, .flush= shorten_flush, .long_name= NULL_IF_CONFIG_SMALL("Shorten"), };
123linslouis-android-video-cutter
jni/libavcodec/shorten.c
C
asf20
17,040
/* * TIFF tables * Copyright (c) 2006 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * TIFF tables * @file * @author Konstantin Shishkov */ #ifndef AVCODEC_TIFF_H #define AVCODEC_TIFF_H #include <stdint.h> /** abridged list of TIFF tags */ enum TiffTags{ TIFF_SUBFILE = 0xfe, TIFF_WIDTH = 0x100, TIFF_HEIGHT, TIFF_BPP, TIFF_COMPR, TIFF_INVERT = 0x106, TIFF_FILL_ORDER = 0x10A, TIFF_STRIP_OFFS = 0x111, TIFF_SAMPLES_PER_PIXEL = 0x115, TIFF_ROWSPERSTRIP = 0x116, TIFF_STRIP_SIZE, TIFF_XRES = 0x11A, TIFF_YRES = 0x11B, TIFF_PLANAR = 0x11C, TIFF_XPOS = 0x11E, TIFF_YPOS = 0x11F, TIFF_T4OPTIONS = 0x124, TIFF_T6OPTIONS, TIFF_RES_UNIT = 0x128, TIFF_SOFTWARE_NAME = 0x131, TIFF_PREDICTOR = 0x13D, TIFF_PAL = 0x140, TIFF_YCBCR_COEFFICIENTS = 0x211, TIFF_YCBCR_SUBSAMPLING = 0x212, TIFF_YCBCR_POSITIONING = 0x213, TIFF_REFERENCE_BW = 0x214, }; /** list of TIFF compression types */ enum TiffCompr{ TIFF_RAW = 1, TIFF_CCITT_RLE, TIFF_G3, TIFF_G4, TIFF_LZW, TIFF_JPEG, TIFF_NEWJPEG, TIFF_ADOBE_DEFLATE, TIFF_PACKBITS = 0x8005, TIFF_DEFLATE = 0x80B2 }; enum TiffTypes{ TIFF_BYTE = 1, TIFF_STRING, TIFF_SHORT, TIFF_LONG, TIFF_RATIONAL, }; /** sizes of various TIFF field types (string size = 100)*/ static const uint8_t type_sizes[6] = { 0, 1, 100, 2, 4, 8 }; #endif /* AVCODEC_TIFF_H */
123linslouis-android-video-cutter
jni/libavcodec/tiff.h
C
asf20
2,196
/* * utils for libavcodec * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * utils. */ /* needed for mkstemp() */ #define _XOPEN_SOURCE 600 #include "libavutil/avstring.h" #include "libavutil/integer.h" #include "libavutil/crc.h" #include "libavutil/pixdesc.h" #include "avcodec.h" #include "dsputil.h" #include "opt.h" #include "imgconvert.h" #include "audioconvert.h" #include "libxvid_internal.h" #include "internal.h" #include <stdlib.h> #include <stdarg.h> #include <limits.h> #include <float.h> #if !HAVE_MKSTEMP #include <fcntl.h> #endif static int volatile entangled_thread_counter=0; int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op); static void *codec_mutex; void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size) { if(min_size < *size) return ptr; *size= FFMAX(17*min_size/16 + 32, min_size); ptr= av_realloc(ptr, *size); if(!ptr) //we could set this to the unmodified min_size but this is safer if the user lost the ptr and uses NULL now *size= 0; return ptr; } void av_fast_malloc(void *ptr, unsigned int *size, unsigned int min_size) { void **p = ptr; if (min_size < *size) return; *size= FFMAX(17*min_size/16 + 32, min_size); av_free(*p); *p = av_malloc(*size); if (!*p) *size = 0; } /* encoder management */ static AVCodec *first_avcodec = NULL; AVCodec *av_codec_next(AVCodec *c){ if(c) return c->next; else return first_avcodec; } void avcodec_register(AVCodec *codec) { AVCodec **p; avcodec_init(); p = &first_avcodec; while (*p != NULL) p = &(*p)->next; *p = codec; codec->next = NULL; } #if LIBAVCODEC_VERSION_MAJOR < 53 void register_avcodec(AVCodec *codec) { avcodec_register(codec); } #endif unsigned avcodec_get_edge_width(void) { return EDGE_WIDTH; } void avcodec_set_dimensions(AVCodecContext *s, int width, int height){ s->coded_width = width; s->coded_height= height; s->width = -((-width )>>s->lowres); s->height= -((-height)>>s->lowres); } typedef struct InternalBuffer{ int last_pic_num; uint8_t *base[4]; uint8_t *data[4]; int linesize[4]; int width, height; enum PixelFormat pix_fmt; }InternalBuffer; #define INTERNAL_BUFFER_SIZE 32 void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]){ int w_align= 1; int h_align= 1; switch(s->pix_fmt){ case PIX_FMT_YUV420P: case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_YUV422P: case PIX_FMT_YUV440P: case PIX_FMT_YUV444P: case PIX_FMT_GRAY8: case PIX_FMT_GRAY16BE: case PIX_FMT_GRAY16LE: case PIX_FMT_YUVJ420P: case PIX_FMT_YUVJ422P: case PIX_FMT_YUVJ440P: case PIX_FMT_YUVJ444P: case PIX_FMT_YUVA420P: w_align= 16; //FIXME check for non mpeg style codecs and use less alignment h_align= 16; if(s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG || s->codec_id == CODEC_ID_AMV || s->codec_id == CODEC_ID_THP) h_align= 32; // interlaced is rounded up to 2 MBs break; case PIX_FMT_YUV411P: case PIX_FMT_UYYVYY411: w_align=32; h_align=8; break; case PIX_FMT_YUV410P: if(s->codec_id == CODEC_ID_SVQ1){ w_align=64; h_align=64; } case PIX_FMT_RGB555: if(s->codec_id == CODEC_ID_RPZA){ w_align=4; h_align=4; } case PIX_FMT_PAL8: case PIX_FMT_BGR8: case PIX_FMT_RGB8: if(s->codec_id == CODEC_ID_SMC){ w_align=4; h_align=4; } break; case PIX_FMT_BGR24: if((s->codec_id == CODEC_ID_MSZH) || (s->codec_id == CODEC_ID_ZLIB)){ w_align=4; h_align=4; } break; default: w_align= 1; h_align= 1; break; } *width = FFALIGN(*width , w_align); *height= FFALIGN(*height, h_align); if(s->codec_id == CODEC_ID_H264) *height+=2; // some of the optimized chroma MC reads one line too much linesize_align[0] = linesize_align[1] = linesize_align[2] = linesize_align[3] = STRIDE_ALIGN; //STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes //we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the //picture size unneccessarily in some cases. The solution here is not //pretty and better ideas are welcome! #if HAVE_MMX if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 || s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F || s->codec_id == CODEC_ID_VP6A) { linesize_align[0] = linesize_align[1] = linesize_align[2] = 16; } #endif } void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w; int linesize_align[4]; int align; avcodec_align_dimensions2(s, width, height, linesize_align); align = FFMAX(linesize_align[0], linesize_align[3]); linesize_align[1] <<= chroma_shift; linesize_align[2] <<= chroma_shift; align = FFMAX3(align, linesize_align[1], linesize_align[2]); *width=FFALIGN(*width, align); } int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h){ if((int)w>0 && (int)h>0 && (w+128)*(uint64_t)(h+128) < INT_MAX/8) return 0; av_log(av_log_ctx, AV_LOG_ERROR, "picture size invalid (%ux%u)\n", w, h); return AVERROR(EINVAL); } int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ int i; int w= s->width; int h= s->height; InternalBuffer *buf; int *picture_number; if(pic->data[0]!=NULL) { av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n"); return -1; } if(s->internal_buffer_count >= INTERNAL_BUFFER_SIZE) { av_log(s, AV_LOG_ERROR, "internal_buffer_count overflow (missing release_buffer?)\n"); return -1; } if(avcodec_check_dimensions(s,w,h)) return -1; if(s->internal_buffer==NULL){ s->internal_buffer= av_mallocz((INTERNAL_BUFFER_SIZE+1)*sizeof(InternalBuffer)); } #if 0 s->internal_buffer= av_fast_realloc( s->internal_buffer, &s->internal_buffer_size, sizeof(InternalBuffer)*FFMAX(99, s->internal_buffer_count+1)/*FIXME*/ ); #endif buf= &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count]; picture_number= &(((InternalBuffer*)s->internal_buffer)[INTERNAL_BUFFER_SIZE]).last_pic_num; //FIXME ugly hack (*picture_number)++; if(buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)){ for(i=0; i<4; i++){ av_freep(&buf->base[i]); buf->data[i]= NULL; } } if(buf->base[0]){ pic->age= *picture_number - buf->last_pic_num; buf->last_pic_num= *picture_number; }else{ int h_chroma_shift, v_chroma_shift; int size[4] = {0}; int tmpsize; int unaligned; AVPicture picture; int stride_align[4]; avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); avcodec_align_dimensions2(s, &w, &h, stride_align); if(!(s->flags&CODEC_FLAG_EMU_EDGE)){ w+= EDGE_WIDTH*2; h+= EDGE_WIDTH*2; } do { // NOTE: do not align linesizes individually, this breaks e.g. assumptions // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2 ff_fill_linesize(&picture, s->pix_fmt, w); // increase alignment of w for next try (rhs gives the lowest bit set in w) w += w & ~(w-1); unaligned = 0; for (i=0; i<4; i++){ unaligned |= picture.linesize[i] % stride_align[i]; } } while (unaligned); tmpsize = ff_fill_pointer(&picture, NULL, s->pix_fmt, h); if (tmpsize < 0) return -1; for (i=0; i<3 && picture.data[i+1]; i++) size[i] = picture.data[i+1] - picture.data[i]; size[i] = tmpsize - (picture.data[i] - picture.data[0]); buf->last_pic_num= -256*256*256*64; memset(buf->base, 0, sizeof(buf->base)); memset(buf->data, 0, sizeof(buf->data)); for(i=0; i<4 && size[i]; i++){ const int h_shift= i==0 ? 0 : h_chroma_shift; const int v_shift= i==0 ? 0 : v_chroma_shift; buf->linesize[i]= picture.linesize[i]; buf->base[i]= av_malloc(size[i]+16); //FIXME 16 if(buf->base[i]==NULL) return -1; memset(buf->base[i], 128, size[i]); // no edge if EDEG EMU or not planar YUV if((s->flags&CODEC_FLAG_EMU_EDGE) || !size[2]) buf->data[i] = buf->base[i]; else buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift), stride_align[i]); } if(size[1] && !size[2]) ff_set_systematic_pal((uint32_t*)buf->data[1], s->pix_fmt); buf->width = s->width; buf->height = s->height; buf->pix_fmt= s->pix_fmt; pic->age= 256*256*256*64; } pic->type= FF_BUFFER_TYPE_INTERNAL; for(i=0; i<4; i++){ pic->base[i]= buf->base[i]; pic->data[i]= buf->data[i]; pic->linesize[i]= buf->linesize[i]; } s->internal_buffer_count++; pic->reordered_opaque= s->reordered_opaque; if(s->debug&FF_DEBUG_BUFFERS) av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d buffers used\n", pic, s->internal_buffer_count); return 0; } void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ int i; InternalBuffer *buf, *last; assert(pic->type==FF_BUFFER_TYPE_INTERNAL); assert(s->internal_buffer_count); buf = NULL; /* avoids warning */ for(i=0; i<s->internal_buffer_count; i++){ //just 3-5 checks so is not worth to optimize buf= &((InternalBuffer*)s->internal_buffer)[i]; if(buf->data[0] == pic->data[0]) break; } assert(i < s->internal_buffer_count); s->internal_buffer_count--; last = &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count]; FFSWAP(InternalBuffer, *buf, *last); for(i=0; i<4; i++){ pic->data[i]=NULL; // pic->base[i]=NULL; } //printf("R%X\n", pic->opaque); if(s->debug&FF_DEBUG_BUFFERS) av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d buffers used\n", pic, s->internal_buffer_count); } int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){ AVFrame temp_pic; int i; /* If no picture return a new buffer */ if(pic->data[0] == NULL) { /* We will copy from buffer, so must be readable */ pic->buffer_hints |= FF_BUFFER_HINTS_READABLE; return s->get_buffer(s, pic); } /* If internal buffer type return the same buffer */ if(pic->type == FF_BUFFER_TYPE_INTERNAL) { pic->reordered_opaque= s->reordered_opaque; return 0; } /* * Not internal type and reget_buffer not overridden, emulate cr buffer */ temp_pic = *pic; for(i = 0; i < 4; i++) pic->data[i] = pic->base[i] = NULL; pic->opaque = NULL; /* Allocate new frame */ if (s->get_buffer(s, pic)) return -1; /* Copy image data from old buffer to new buffer */ av_picture_copy((AVPicture*)pic, (AVPicture*)&temp_pic, s->pix_fmt, s->width, s->height); s->release_buffer(s, &temp_pic); // Release old frame return 0; } int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){ int i; for(i=0; i<count; i++){ int r= func(c, (char*)arg + i*size); if(ret) ret[i]= r; } return 0; } int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr),void *arg, int *ret, int count){ int i; for(i=0; i<count; i++){ int r= func(c, arg, i, 0); if(ret) ret[i]= r; } return 0; } enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat *fmt){ while (*fmt != PIX_FMT_NONE && ff_is_hwaccel_pix_fmt(*fmt)) ++fmt; return fmt[0]; } void avcodec_get_frame_defaults(AVFrame *pic){ memset(pic, 0, sizeof(AVFrame)); pic->pts= AV_NOPTS_VALUE; pic->key_frame= 1; } AVFrame *avcodec_alloc_frame(void){ AVFrame *pic= av_malloc(sizeof(AVFrame)); if(pic==NULL) return NULL; avcodec_get_frame_defaults(pic); return pic; } int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec) { int ret= -1; /* If there is a user-supplied mutex locking routine, call it. */ if (ff_lockmgr_cb) { if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) return -1; } entangled_thread_counter++; if(entangled_thread_counter != 1){ av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n"); goto end; } if(avctx->codec || !codec) goto end; if (codec->priv_data_size > 0) { avctx->priv_data = av_mallocz(codec->priv_data_size); if (!avctx->priv_data) { ret = AVERROR(ENOMEM); goto end; } } else { avctx->priv_data = NULL; } if(avctx->coded_width && avctx->coded_height) avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); else if(avctx->width && avctx->height) avcodec_set_dimensions(avctx, avctx->width, avctx->height); #define SANE_NB_CHANNELS 128U if (((avctx->coded_width || avctx->coded_height) && avcodec_check_dimensions(avctx, avctx->coded_width, avctx->coded_height)) || avctx->channels > SANE_NB_CHANNELS) { ret = AVERROR(EINVAL); goto free_and_end; } avctx->codec = codec; if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) && avctx->codec_id == CODEC_ID_NONE) { avctx->codec_type = codec->type; avctx->codec_id = codec->id; } if(avctx->codec_id != codec->id || avctx->codec_type != codec->type){ av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n"); goto free_and_end; } avctx->frame_number = 0; if(avctx->codec->init){ ret = avctx->codec->init(avctx); if (ret < 0) { goto free_and_end; } } ret=0; end: entangled_thread_counter--; /* Release any user-supplied mutex. */ if (ff_lockmgr_cb) { (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE); } return ret; free_and_end: av_freep(&avctx->priv_data); avctx->codec= NULL; goto end; } int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size, const short *samples) { if(buf_size < FF_MIN_BUFFER_SIZE && 0){ av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n"); return -1; } if((avctx->codec->capabilities & CODEC_CAP_DELAY) || samples){ int ret = avctx->codec->encode(avctx, buf, buf_size, samples); avctx->frame_number++; return ret; }else return 0; } int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVFrame *pict) { if(buf_size < FF_MIN_BUFFER_SIZE){ av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n"); return -1; } if(avcodec_check_dimensions(avctx,avctx->width,avctx->height)) return -1; if((avctx->codec->capabilities & CODEC_CAP_DELAY) || pict){ int ret = avctx->codec->encode(avctx, buf, buf_size, pict); avctx->frame_number++; emms_c(); //needed to avoid an emms_c() call before every return; return ret; }else return 0; } int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub) { int ret; if(sub->start_display_time) { av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n"); return -1; } if(sub->num_rects == 0 || !sub->rects) return -1; ret = avctx->codec->encode(avctx, buf, buf_size, sub); avctx->frame_number++; return ret; } #if LIBAVCODEC_VERSION_MAJOR < 53 int attribute_align_arg avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const uint8_t *buf, int buf_size) { AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = buf; avpkt.size = buf_size; // HACK for CorePNG to decode as normal PNG by default avpkt.flags = AV_PKT_FLAG_KEY; return avcodec_decode_video2(avctx, picture, got_picture_ptr, &avpkt); } #endif int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt) { int ret; *got_picture_ptr= 0; if((avctx->coded_width||avctx->coded_height) && avcodec_check_dimensions(avctx,avctx->coded_width,avctx->coded_height)) return -1; if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){ ret = avctx->codec->decode(avctx, picture, got_picture_ptr, avpkt); emms_c(); //needed to avoid an emms_c() call before every return; if (*got_picture_ptr) avctx->frame_number++; }else ret= 0; return ret; } #if LIBAVCODEC_VERSION_MAJOR < 53 int attribute_align_arg avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples, int *frame_size_ptr, const uint8_t *buf, int buf_size) { AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = buf; avpkt.size = buf_size; return avcodec_decode_audio3(avctx, samples, frame_size_ptr, &avpkt); } #endif int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, int *frame_size_ptr, AVPacket *avpkt) { int ret; if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){ //FIXME remove the check below _after_ ensuring that all audio check that the available space is enough if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){ av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n"); return -1; } if(*frame_size_ptr < FF_MIN_BUFFER_SIZE || *frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t)){ av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr); return -1; } ret = avctx->codec->decode(avctx, samples, frame_size_ptr, avpkt); avctx->frame_number++; }else{ ret= 0; *frame_size_ptr=0; } return ret; } #if LIBAVCODEC_VERSION_MAJOR < 53 int avcodec_decode_subtitle(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const uint8_t *buf, int buf_size) { AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = buf; avpkt.size = buf_size; return avcodec_decode_subtitle2(avctx, sub, got_sub_ptr, &avpkt); } #endif int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt) { int ret; *got_sub_ptr = 0; ret = avctx->codec->decode(avctx, sub, got_sub_ptr, avpkt); if (*got_sub_ptr) avctx->frame_number++; return ret; } av_cold int avcodec_close(AVCodecContext *avctx) { /* If there is a user-supplied mutex locking routine, call it. */ if (ff_lockmgr_cb) { if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) return -1; } entangled_thread_counter++; if(entangled_thread_counter != 1){ av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n"); entangled_thread_counter--; return -1; } if (HAVE_THREADS && avctx->thread_opaque) avcodec_thread_free(avctx); if (avctx->codec && avctx->codec->close) avctx->codec->close(avctx); avcodec_default_free_buffers(avctx); av_freep(&avctx->priv_data); if(avctx->codec && avctx->codec->encode) av_freep(&avctx->extradata); avctx->codec = NULL; entangled_thread_counter--; /* Release any user-supplied mutex. */ if (ff_lockmgr_cb) { (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE); } return 0; } AVCodec *avcodec_find_encoder(enum CodecID id) { AVCodec *p, *experimental=NULL; p = first_avcodec; while (p) { if (p->encode != NULL && p->id == id) { if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) { experimental = p; } else return p; } p = p->next; } return experimental; } AVCodec *avcodec_find_encoder_by_name(const char *name) { AVCodec *p; if (!name) return NULL; p = first_avcodec; while (p) { if (p->encode != NULL && strcmp(name,p->name) == 0) return p; p = p->next; } return NULL; } AVCodec *avcodec_find_decoder(enum CodecID id) { AVCodec *p; p = first_avcodec; while (p) { if (p->decode != NULL && p->id == id) return p; p = p->next; } return NULL; } AVCodec *avcodec_find_decoder_by_name(const char *name) { AVCodec *p; if (!name) return NULL; p = first_avcodec; while (p) { if (p->decode != NULL && strcmp(name,p->name) == 0) return p; p = p->next; } return NULL; } static int get_bit_rate(AVCodecContext *ctx) { int bit_rate; int bits_per_sample; switch(ctx->codec_type) { case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_ATTACHMENT: bit_rate = ctx->bit_rate; break; case AVMEDIA_TYPE_AUDIO: bits_per_sample = av_get_bits_per_sample(ctx->codec_id); bit_rate = bits_per_sample ? ctx->sample_rate * ctx->channels * bits_per_sample : ctx->bit_rate; break; default: bit_rate = 0; break; } return bit_rate; } void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) { const char *codec_name; AVCodec *p; char buf1[32]; int bitrate; AVRational display_aspect_ratio; if (encode) p = avcodec_find_encoder(enc->codec_id); else p = avcodec_find_decoder(enc->codec_id); if (p) { codec_name = p->name; } else if (enc->codec_id == CODEC_ID_MPEG2TS) { /* fake mpeg2 transport stream codec (currently not registered) */ codec_name = "mpeg2ts"; } else if (enc->codec_name[0] != '\0') { codec_name = enc->codec_name; } else { /* output avi tags */ if( isprint(enc->codec_tag&0xFF) && isprint((enc->codec_tag>>8)&0xFF) && isprint((enc->codec_tag>>16)&0xFF) && isprint((enc->codec_tag>>24)&0xFF)){ snprintf(buf1, sizeof(buf1), "%c%c%c%c / 0x%04X", enc->codec_tag & 0xff, (enc->codec_tag >> 8) & 0xff, (enc->codec_tag >> 16) & 0xff, (enc->codec_tag >> 24) & 0xff, enc->codec_tag); } else { snprintf(buf1, sizeof(buf1), "0x%04x", enc->codec_tag); } codec_name = buf1; } switch(enc->codec_type) { case AVMEDIA_TYPE_VIDEO: snprintf(buf, buf_size, "Video: %s%s", codec_name, enc->mb_decision ? " (hq)" : ""); if (enc->pix_fmt != PIX_FMT_NONE) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %s", avcodec_get_pix_fmt_name(enc->pix_fmt)); } if (enc->width) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %dx%d", enc->width, enc->height); if (enc->sample_aspect_ratio.num) { av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, enc->width*enc->sample_aspect_ratio.num, enc->height*enc->sample_aspect_ratio.den, 1024*1024); snprintf(buf + strlen(buf), buf_size - strlen(buf), " [PAR %d:%d DAR %d:%d]", enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } if(av_log_get_level() >= AV_LOG_DEBUG){ int g= av_gcd(enc->time_base.num, enc->time_base.den); snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d/%d", enc->time_base.num/g, enc->time_base.den/g); } } if (encode) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", q=%d-%d", enc->qmin, enc->qmax); } break; case AVMEDIA_TYPE_AUDIO: snprintf(buf, buf_size, "Audio: %s", codec_name); if (enc->sample_rate) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d Hz", enc->sample_rate); } av_strlcat(buf, ", ", buf_size); avcodec_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout); if (enc->sample_fmt != SAMPLE_FMT_NONE) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %s", avcodec_get_sample_fmt_name(enc->sample_fmt)); } break; case AVMEDIA_TYPE_DATA: snprintf(buf, buf_size, "Data: %s", codec_name); break; case AVMEDIA_TYPE_SUBTITLE: snprintf(buf, buf_size, "Subtitle: %s", codec_name); break; case AVMEDIA_TYPE_ATTACHMENT: snprintf(buf, buf_size, "Attachment: %s", codec_name); break; default: snprintf(buf, buf_size, "Invalid Codec type %d", enc->codec_type); return; } if (encode) { if (enc->flags & CODEC_FLAG_PASS1) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", pass 1"); if (enc->flags & CODEC_FLAG_PASS2) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", pass 2"); } bitrate = get_bit_rate(enc); if (bitrate != 0) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %d kb/s", bitrate / 1000); } } unsigned avcodec_version( void ) { return LIBAVCODEC_VERSION_INT; } const char *avcodec_configuration(void) { return FFMPEG_CONFIGURATION; } const char *avcodec_license(void) { #define LICENSE_PREFIX "libavcodec license: " return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; } void avcodec_init(void) { static int initialized = 0; if (initialized != 0) return; initialized = 1; dsputil_static_init(); } void avcodec_flush_buffers(AVCodecContext *avctx) { if(avctx->codec->flush) avctx->codec->flush(avctx); } void avcodec_default_free_buffers(AVCodecContext *s){ int i, j; if(s->internal_buffer==NULL) return; if (s->internal_buffer_count) av_log(s, AV_LOG_WARNING, "Found %i unreleased buffers!\n", s->internal_buffer_count); for(i=0; i<INTERNAL_BUFFER_SIZE; i++){ InternalBuffer *buf= &((InternalBuffer*)s->internal_buffer)[i]; for(j=0; j<4; j++){ av_freep(&buf->base[j]); buf->data[j]= NULL; } } av_freep(&s->internal_buffer); s->internal_buffer_count=0; } char av_get_pict_type_char(int pict_type){ switch(pict_type){ case FF_I_TYPE: return 'I'; case FF_P_TYPE: return 'P'; case FF_B_TYPE: return 'B'; case FF_S_TYPE: return 'S'; case FF_SI_TYPE:return 'i'; case FF_SP_TYPE:return 'p'; case FF_BI_TYPE:return 'b'; default: return '?'; } } int av_get_bits_per_sample(enum CodecID codec_id){ switch(codec_id){ case CODEC_ID_ADPCM_SBPRO_2: return 2; case CODEC_ID_ADPCM_SBPRO_3: return 3; case CODEC_ID_ADPCM_SBPRO_4: case CODEC_ID_ADPCM_CT: case CODEC_ID_ADPCM_IMA_WAV: case CODEC_ID_ADPCM_MS: case CODEC_ID_ADPCM_YAMAHA: return 4; case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: case CODEC_ID_PCM_S8: case CODEC_ID_PCM_U8: case CODEC_ID_PCM_ZORK: return 8; case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16LE_PLANAR: case CODEC_ID_PCM_U16BE: case CODEC_ID_PCM_U16LE: return 16; case CODEC_ID_PCM_S24DAUD: case CODEC_ID_PCM_S24BE: case CODEC_ID_PCM_S24LE: case CODEC_ID_PCM_U24BE: case CODEC_ID_PCM_U24LE: return 24; case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_U32BE: case CODEC_ID_PCM_U32LE: case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_F32LE: return 32; case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F64LE: return 64; default: return 0; } } int av_get_bits_per_sample_format(enum SampleFormat sample_fmt) { switch (sample_fmt) { case SAMPLE_FMT_U8: return 8; case SAMPLE_FMT_S16: return 16; case SAMPLE_FMT_S32: case SAMPLE_FMT_FLT: return 32; case SAMPLE_FMT_DBL: return 64; default: return 0; } } #if !HAVE_THREADS int avcodec_thread_init(AVCodecContext *s, int thread_count){ s->thread_count = thread_count; return -1; } #endif unsigned int av_xiphlacing(unsigned char *s, unsigned int v) { unsigned int n = 0; while(v >= 0xff) { *s++ = 0xff; v -= 0xff; n++; } *s = v; n++; return n; } /* Wrapper to work around the lack of mkstemp() on mingw/cygin. * Also, tries to create file in /tmp first, if possible. * *prefix can be a character constant; *filename will be allocated internally. * Returns file descriptor of opened file (or -1 on error) * and opened file name in **filename. */ int av_tempfile(char *prefix, char **filename) { int fd=-1; #if !HAVE_MKSTEMP *filename = tempnam(".", prefix); #else size_t len = strlen(prefix) + 12; /* room for "/tmp/" and "XXXXXX\0" */ *filename = av_malloc(len); #endif /* -----common section-----*/ if (*filename == NULL) { av_log(NULL, AV_LOG_ERROR, "ff_tempfile: Cannot allocate file name\n"); return -1; } #if !HAVE_MKSTEMP fd = open(*filename, O_RDWR | O_BINARY | O_CREAT, 0444); #else snprintf(*filename, len, "/tmp/%sXXXXXX", prefix); fd = mkstemp(*filename); if (fd < 0) { snprintf(*filename, len, "./%sXXXXXX", prefix); fd = mkstemp(*filename); } #endif /* -----common section-----*/ if (fd < 0) { av_log(NULL, AV_LOG_ERROR, "ff_tempfile: Cannot open temporary file %s\n", *filename); return -1; } return fd; /* success */ } typedef struct { const char *abbr; int width, height; } VideoFrameSizeAbbr; typedef struct { const char *abbr; int rate_num, rate_den; } VideoFrameRateAbbr; static const VideoFrameSizeAbbr video_frame_size_abbrs[] = { { "ntsc", 720, 480 }, { "pal", 720, 576 }, { "qntsc", 352, 240 }, /* VCD compliant NTSC */ { "qpal", 352, 288 }, /* VCD compliant PAL */ { "sntsc", 640, 480 }, /* square pixel NTSC */ { "spal", 768, 576 }, /* square pixel PAL */ { "film", 352, 240 }, { "ntsc-film", 352, 240 }, { "sqcif", 128, 96 }, { "qcif", 176, 144 }, { "cif", 352, 288 }, { "4cif", 704, 576 }, { "16cif", 1408,1152 }, { "qqvga", 160, 120 }, { "qvga", 320, 240 }, { "vga", 640, 480 }, { "svga", 800, 600 }, { "xga", 1024, 768 }, { "uxga", 1600,1200 }, { "qxga", 2048,1536 }, { "sxga", 1280,1024 }, { "qsxga", 2560,2048 }, { "hsxga", 5120,4096 }, { "wvga", 852, 480 }, { "wxga", 1366, 768 }, { "wsxga", 1600,1024 }, { "wuxga", 1920,1200 }, { "woxga", 2560,1600 }, { "wqsxga", 3200,2048 }, { "wquxga", 3840,2400 }, { "whsxga", 6400,4096 }, { "whuxga", 7680,4800 }, { "cga", 320, 200 }, { "ega", 640, 350 }, { "hd480", 852, 480 }, { "hd720", 1280, 720 }, { "hd1080", 1920,1080 }, }; static const VideoFrameRateAbbr video_frame_rate_abbrs[]= { { "ntsc", 30000, 1001 }, { "pal", 25, 1 }, { "qntsc", 30000, 1001 }, /* VCD compliant NTSC */ { "qpal", 25, 1 }, /* VCD compliant PAL */ { "sntsc", 30000, 1001 }, /* square pixel NTSC */ { "spal", 25, 1 }, /* square pixel PAL */ { "film", 24, 1 }, { "ntsc-film", 24000, 1001 }, }; int av_parse_video_frame_size(int *width_ptr, int *height_ptr, const char *str) { int i; int n = FF_ARRAY_ELEMS(video_frame_size_abbrs); char *p; int frame_width = 0, frame_height = 0; for(i=0;i<n;i++) { if (!strcmp(video_frame_size_abbrs[i].abbr, str)) { frame_width = video_frame_size_abbrs[i].width; frame_height = video_frame_size_abbrs[i].height; break; } } if (i == n) { p = str; frame_width = strtol(p, &p, 10); if (*p) p++; frame_height = strtol(p, &p, 10); } if (frame_width <= 0 || frame_height <= 0) return -1; *width_ptr = frame_width; *height_ptr = frame_height; return 0; } int av_parse_video_frame_rate(AVRational *frame_rate, const char *arg) { int i; int n = FF_ARRAY_ELEMS(video_frame_rate_abbrs); char* cp; /* First, we check our abbreviation table */ for (i = 0; i < n; ++i) if (!strcmp(video_frame_rate_abbrs[i].abbr, arg)) { frame_rate->num = video_frame_rate_abbrs[i].rate_num; frame_rate->den = video_frame_rate_abbrs[i].rate_den; return 0; } /* Then, we try to parse it as fraction */ cp = strchr(arg, '/'); if (!cp) cp = strchr(arg, ':'); if (cp) { char* cpp; frame_rate->num = strtol(arg, &cpp, 10); if (cpp != arg || cpp == cp) frame_rate->den = strtol(cp+1, &cpp, 10); else frame_rate->num = 0; } else { /* Finally we give up and parse it as double */ AVRational time_base = av_d2q(strtod(arg, 0), 1001000); frame_rate->den = time_base.den; frame_rate->num = time_base.num; } if (!frame_rate->num || !frame_rate->den) return -1; else return 0; } int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b){ int i; for(i=0; i<size && !(tab[i][0]==a && tab[i][1]==b); i++); return i; } void av_log_missing_feature(void *avc, const char *feature, int want_sample) { av_log(avc, AV_LOG_WARNING, "%s not implemented. Update your FFmpeg " "version to the newest one from SVN. If the problem still " "occurs, it means that your file has a feature which has not " "been implemented.", feature); if(want_sample) av_log_ask_for_sample(avc, NULL); else av_log(avc, AV_LOG_WARNING, "\n"); } void av_log_ask_for_sample(void *avc, const char *msg) { if (msg) av_log(avc, AV_LOG_WARNING, "%s ", msg); av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample " "of this file to ftp://upload.ffmpeg.org/MPlayer/incoming/ " "and contact the ffmpeg-devel mailing list.\n"); } static AVHWAccel *first_hwaccel = NULL; void av_register_hwaccel(AVHWAccel *hwaccel) { AVHWAccel **p = &first_hwaccel; while (*p) p = &(*p)->next; *p = hwaccel; hwaccel->next = NULL; } AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel) { return hwaccel ? hwaccel->next : first_hwaccel; } AVHWAccel *ff_find_hwaccel(enum CodecID codec_id, enum PixelFormat pix_fmt) { AVHWAccel *hwaccel=NULL; while((hwaccel= av_hwaccel_next(hwaccel))){ if ( hwaccel->id == codec_id && hwaccel->pix_fmt == pix_fmt) return hwaccel; } return NULL; } int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) { if (ff_lockmgr_cb) { if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY)) return -1; } ff_lockmgr_cb = cb; if (ff_lockmgr_cb) { if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_CREATE)) return -1; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/utils.c
C
asf20
38,112
/* * H.26L/H.264/AVC/JVT/14496-10/... reference picture handling * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.264 / AVC / MPEG4 part10 reference picture handling. * @author Michael Niedermayer <michaelni@gmx.at> */ #include "internal.h" #include "dsputil.h" #include "avcodec.h" #include "h264.h" #include "golomb.h" //#undef NDEBUG #include <assert.h> static void pic_as_field(Picture *pic, const int parity){ int i; for (i = 0; i < 4; ++i) { if (parity == PICT_BOTTOM_FIELD) pic->data[i] += pic->linesize[i]; pic->reference = parity; pic->linesize[i] *= 2; } pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD]; } static int split_field_copy(Picture *dest, Picture *src, int parity, int id_add){ int match = !!(src->reference & parity); if (match) { *dest = *src; if(parity != PICT_FRAME){ pic_as_field(dest, parity); dest->pic_id *= 2; dest->pic_id += id_add; } } return match; } static int build_def_list(Picture *def, Picture **in, int len, int is_long, int sel){ int i[2]={0}; int index=0; while(i[0]<len || i[1]<len){ while(i[0]<len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel))) i[0]++; while(i[1]<len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3)))) i[1]++; if(i[0] < len){ in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num; split_field_copy(&def[index++], in[ i[0]++ ], sel , 1); } if(i[1] < len){ in[ i[1] ]->pic_id= is_long ? i[1] : in[ i[1] ]->frame_num; split_field_copy(&def[index++], in[ i[1]++ ], sel^3, 0); } } return index; } static int add_sorted(Picture **sorted, Picture **src, int len, int limit, int dir){ int i, best_poc; int out_i= 0; for(;;){ best_poc= dir ? INT_MIN : INT_MAX; for(i=0; i<len; i++){ const int poc= src[i]->poc; if(((poc > limit) ^ dir) && ((poc < best_poc) ^ dir)){ best_poc= poc; sorted[out_i]= src[i]; } } if(best_poc == (dir ? INT_MIN : INT_MAX)) break; limit= sorted[out_i++]->poc - dir; } return out_i; } int ff_h264_fill_default_ref_list(H264Context *h){ MpegEncContext * const s = &h->s; int i, len; if(h->slice_type_nos==FF_B_TYPE){ Picture *sorted[32]; int cur_poc, list; int lens[2]; if(FIELD_PICTURE) cur_poc= s->current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ]; else cur_poc= s->current_picture_ptr->poc; for(list= 0; list<2; list++){ len= add_sorted(sorted , h->short_ref, h->short_ref_count, cur_poc, 1^list); len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list); assert(len<=32); len= build_def_list(h->default_ref_list[list] , sorted , len, 0, s->picture_structure); len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure); assert(len<=32); if(len < h->ref_count[list]) memset(&h->default_ref_list[list][len], 0, sizeof(Picture)*(h->ref_count[list] - len)); lens[list]= len; } if(lens[0] == lens[1] && lens[1] > 1){ for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++); if(i == lens[0]) FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]); } }else{ len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, s->picture_structure); len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16 , 1, s->picture_structure); assert(len <= 32); if(len < h->ref_count[0]) memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len)); } #ifdef TRACE for (i=0; i<h->ref_count[0]; i++) { tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]); } if(h->slice_type_nos==FF_B_TYPE){ for (i=0; i<h->ref_count[1]; i++) { tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]); } } #endif return 0; } static void print_short_term(H264Context *h); static void print_long_term(H264Context *h); /** * Extract structure information about the picture described by pic_num in * the current decoding context (frame or field). Note that pic_num is * picture number without wrapping (so, 0<=pic_num<max_pic_num). * @param pic_num picture number for which to extract structure information * @param structure one of PICT_XXX describing structure of picture * with pic_num * @return frame number (short term) or long term index of picture * described by pic_num */ static int pic_num_extract(H264Context *h, int pic_num, int *structure){ MpegEncContext * const s = &h->s; *structure = s->picture_structure; if(FIELD_PICTURE){ if (!(pic_num & 1)) /* opposite field */ *structure ^= PICT_FRAME; pic_num >>= 1; } return pic_num; } int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ MpegEncContext * const s = &h->s; int list, index, pic_structure; print_short_term(h); print_long_term(h); for(list=0; list<h->list_count; list++){ memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]); if(get_bits1(&s->gb)){ int pred= h->curr_pic_num; for(index=0; ; index++){ unsigned int reordering_of_pic_nums_idc= get_ue_golomb_31(&s->gb); unsigned int pic_id; int i; Picture *ref = NULL; if(reordering_of_pic_nums_idc==3) break; if(index >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n"); return -1; } if(reordering_of_pic_nums_idc<3){ if(reordering_of_pic_nums_idc<2){ const unsigned int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1; int frame_num; if(abs_diff_pic_num > h->max_pic_num){ av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n"); return -1; } if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num; else pred+= abs_diff_pic_num; pred &= h->max_pic_num - 1; frame_num = pic_num_extract(h, pred, &pic_structure); for(i= h->short_ref_count-1; i>=0; i--){ ref = h->short_ref[i]; assert(ref->reference); assert(!ref->long_ref); if( ref->frame_num == frame_num && (ref->reference & pic_structure) ) break; } if(i>=0) ref->pic_id= pred; }else{ int long_idx; pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx long_idx= pic_num_extract(h, pic_id, &pic_structure); if(long_idx>31){ av_log(h->s.avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n"); return -1; } ref = h->long_ref[long_idx]; assert(!(ref && !ref->reference)); if(ref && (ref->reference & pic_structure)){ ref->pic_id= pic_id; assert(ref->long_ref); i=0; }else{ i=-1; } } if (i < 0) { av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n"); memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME } else { for(i=index; i+1<h->ref_count[list]; i++){ if(ref->long_ref == h->ref_list[list][i].long_ref && ref->pic_id == h->ref_list[list][i].pic_id) break; } for(; i > index; i--){ h->ref_list[list][i]= h->ref_list[list][i-1]; } h->ref_list[list][index]= *ref; if (FIELD_PICTURE){ pic_as_field(&h->ref_list[list][index], pic_structure); } } }else{ av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n"); return -1; } } } } for(list=0; list<h->list_count; list++){ for(index= 0; index < h->ref_count[list]; index++){ if(!h->ref_list[list][index].data[0]){ av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n"); if(h->default_ref_list[list][0].data[0]) h->ref_list[list][index]= h->default_ref_list[list][0]; else return -1; } } } return 0; } void ff_h264_fill_mbaff_ref_list(H264Context *h){ int list, i, j; for(list=0; list<2; list++){ //FIXME try list_count for(i=0; i<h->ref_count[list]; i++){ Picture *frame = &h->ref_list[list][i]; Picture *field = &h->ref_list[list][16+2*i]; field[0] = *frame; for(j=0; j<3; j++) field[0].linesize[j] <<= 1; field[0].reference = PICT_TOP_FIELD; field[0].poc= field[0].field_poc[0]; field[1] = field[0]; for(j=0; j<3; j++) field[1].data[j] += frame->linesize[j]; field[1].reference = PICT_BOTTOM_FIELD; field[1].poc= field[1].field_poc[1]; h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0]; h->luma_weight[16+2*i][list][1] = h->luma_weight[16+2*i+1][list][1] = h->luma_weight[i][list][1]; for(j=0; j<2; j++){ h->chroma_weight[16+2*i][list][j][0] = h->chroma_weight[16+2*i+1][list][j][0] = h->chroma_weight[i][list][j][0]; h->chroma_weight[16+2*i][list][j][1] = h->chroma_weight[16+2*i+1][list][j][1] = h->chroma_weight[i][list][j][1]; } } } } /** * Mark a picture as no longer needed for reference. The refmask * argument allows unreferencing of individual fields or the whole frame. * If the picture becomes entirely unreferenced, but is being held for * display purposes, it is marked as such. * @param refmask mask of fields to unreference; the mask is bitwise * anded with the reference marking of pic * @return non-zero if pic becomes entirely unreferenced (except possibly * for display purposes) zero if one of the fields remains in * reference */ static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){ int i; if (pic->reference &= refmask) { return 0; } else { for(i = 0; h->delayed_pic[i]; i++) if(pic == h->delayed_pic[i]){ pic->reference=DELAYED_PIC_REF; break; } return 1; } } /** * Find a Picture in the short term reference list by frame number. * @param frame_num frame number to search for * @param idx the index into h->short_ref where returned picture is found * undefined if no picture found. * @return pointer to the found picture, or NULL if no pic with the provided * frame number is found */ static Picture * find_short(H264Context *h, int frame_num, int *idx){ MpegEncContext * const s = &h->s; int i; for(i=0; i<h->short_ref_count; i++){ Picture *pic= h->short_ref[i]; if(s->avctx->debug&FF_DEBUG_MMCO) av_log(h->s.avctx, AV_LOG_DEBUG, "%d %d %p\n", i, pic->frame_num, pic); if(pic->frame_num == frame_num) { *idx = i; return pic; } } return NULL; } /** * Remove a picture from the short term reference list by its index in * that list. This does no checking on the provided index; it is assumed * to be valid. Other list entries are shifted down. * @param i index into h->short_ref of picture to remove. */ static void remove_short_at_index(H264Context *h, int i){ assert(i >= 0 && i < h->short_ref_count); h->short_ref[i]= NULL; if (--h->short_ref_count) memmove(&h->short_ref[i], &h->short_ref[i+1], (h->short_ref_count - i)*sizeof(Picture*)); } /** * * @return the removed picture or NULL if an error occurs */ static Picture * remove_short(H264Context *h, int frame_num, int ref_mask){ MpegEncContext * const s = &h->s; Picture *pic; int i; if(s->avctx->debug&FF_DEBUG_MMCO) av_log(h->s.avctx, AV_LOG_DEBUG, "remove short %d count %d\n", frame_num, h->short_ref_count); pic = find_short(h, frame_num, &i); if (pic){ if(unreference_pic(h, pic, ref_mask)) remove_short_at_index(h, i); } return pic; } /** * Remove a picture from the long term reference list by its index in * that list. * @return the removed picture or NULL if an error occurs */ static Picture * remove_long(H264Context *h, int i, int ref_mask){ Picture *pic; pic= h->long_ref[i]; if (pic){ if(unreference_pic(h, pic, ref_mask)){ assert(h->long_ref[i]->long_ref == 1); h->long_ref[i]->long_ref= 0; h->long_ref[i]= NULL; h->long_ref_count--; } } return pic; } void ff_h264_remove_all_refs(H264Context *h){ int i; for(i=0; i<16; i++){ remove_long(h, i, 0); } assert(h->long_ref_count==0); for(i=0; i<h->short_ref_count; i++){ unreference_pic(h, h->short_ref[i], 0); h->short_ref[i]= NULL; } h->short_ref_count=0; } /** * print short term list */ static void print_short_term(H264Context *h) { uint32_t i; if(h->s.avctx->debug&FF_DEBUG_MMCO) { av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n"); for(i=0; i<h->short_ref_count; i++){ Picture *pic= h->short_ref[i]; av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]); } } } /** * print long term list */ static void print_long_term(H264Context *h) { uint32_t i; if(h->s.avctx->debug&FF_DEBUG_MMCO) { av_log(h->s.avctx, AV_LOG_DEBUG, "long term list:\n"); for(i = 0; i < 16; i++){ Picture *pic= h->long_ref[i]; if (pic) { av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]); } } } } int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ MpegEncContext * const s = &h->s; int i, av_uninit(j); int current_ref_assigned=0; Picture *av_uninit(pic); if((s->avctx->debug&FF_DEBUG_MMCO) && mmco_count==0) av_log(h->s.avctx, AV_LOG_DEBUG, "no mmco here\n"); for(i=0; i<mmco_count; i++){ int av_uninit(structure), av_uninit(frame_num); if(s->avctx->debug&FF_DEBUG_MMCO) av_log(h->s.avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_pic_num, h->mmco[i].long_arg); if( mmco[i].opcode == MMCO_SHORT2UNUSED || mmco[i].opcode == MMCO_SHORT2LONG){ frame_num = pic_num_extract(h, mmco[i].short_pic_num, &structure); pic = find_short(h, frame_num, &j); if(!pic){ if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg] || h->long_ref[mmco[i].long_arg]->frame_num != frame_num) av_log(h->s.avctx, AV_LOG_ERROR, "mmco: unref short failure\n"); continue; } } switch(mmco[i].opcode){ case MMCO_SHORT2UNUSED: if(s->avctx->debug&FF_DEBUG_MMCO) av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref short %d count %d\n", h->mmco[i].short_pic_num, h->short_ref_count); remove_short(h, frame_num, structure ^ PICT_FRAME); break; case MMCO_SHORT2LONG: if (h->long_ref[mmco[i].long_arg] != pic) remove_long(h, mmco[i].long_arg, 0); remove_short_at_index(h, j); h->long_ref[ mmco[i].long_arg ]= pic; if (h->long_ref[ mmco[i].long_arg ]){ h->long_ref[ mmco[i].long_arg ]->long_ref=1; h->long_ref_count++; } break; case MMCO_LONG2UNUSED: j = pic_num_extract(h, mmco[i].long_arg, &structure); pic = h->long_ref[j]; if (pic) { remove_long(h, j, structure ^ PICT_FRAME); } else if(s->avctx->debug&FF_DEBUG_MMCO) av_log(h->s.avctx, AV_LOG_DEBUG, "mmco: unref long failure\n"); break; case MMCO_LONG: // Comment below left from previous code as it is an interresting note. /* First field in pair is in short term list or * at a different long term index. * This is not allowed; see 7.4.3.3, notes 2 and 3. * Report the problem and keep the pair where it is, * and mark this field valid. */ if (h->long_ref[mmco[i].long_arg] != s->current_picture_ptr) { remove_long(h, mmco[i].long_arg, 0); h->long_ref[ mmco[i].long_arg ]= s->current_picture_ptr; h->long_ref[ mmco[i].long_arg ]->long_ref=1; h->long_ref_count++; } s->current_picture_ptr->reference |= s->picture_structure; current_ref_assigned=1; break; case MMCO_SET_MAX_LONG: assert(mmco[i].long_arg <= 16); // just remove the long term which index is greater than new max for(j = mmco[i].long_arg; j<16; j++){ remove_long(h, j, 0); } break; case MMCO_RESET: while(h->short_ref_count){ remove_short(h, h->short_ref[0]->frame_num, 0); } for(j = 0; j < 16; j++) { remove_long(h, j, 0); } s->current_picture_ptr->poc= s->current_picture_ptr->field_poc[0]= s->current_picture_ptr->field_poc[1]= h->poc_lsb= h->poc_msb= h->frame_num= s->current_picture_ptr->frame_num= 0; s->current_picture_ptr->mmco_reset=1; break; default: assert(0); } } if (!current_ref_assigned) { /* Second field of complementary field pair; the first field of * which is already referenced. If short referenced, it * should be first entry in short_ref. If not, it must exist * in long_ref; trying to put it on the short list here is an * error in the encoded bit stream (ref: 7.4.3.3, NOTE 2 and 3). */ if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) { /* Just mark the second field valid */ s->current_picture_ptr->reference = PICT_FRAME; } else if (s->current_picture_ptr->long_ref) { av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference " "assignment for second field " "in complementary field pair " "(first field is long term)\n"); } else { pic= remove_short(h, s->current_picture_ptr->frame_num, 0); if(pic){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term buffer state detected\n"); } if(h->short_ref_count) memmove(&h->short_ref[1], &h->short_ref[0], h->short_ref_count*sizeof(Picture*)); h->short_ref[0]= s->current_picture_ptr; h->short_ref_count++; s->current_picture_ptr->reference |= s->picture_structure; } } if (h->long_ref_count + h->short_ref_count > h->sps.ref_frame_count){ /* We have too many reference frames, probably due to corrupted * stream. Need to discard one frame. Prevents overrun of the * short_ref and long_ref buffers. */ av_log(h->s.avctx, AV_LOG_ERROR, "number of reference frames exceeds max (probably " "corrupt input), discarding one\n"); if (h->long_ref_count && !h->short_ref_count) { for (i = 0; i < 16; ++i) if (h->long_ref[i]) break; assert(i < 16); remove_long(h, i, 0); } else { pic = h->short_ref[h->short_ref_count - 1]; remove_short(h, pic->frame_num, 0); } } print_short_term(h); print_long_term(h); return 0; } int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb){ MpegEncContext * const s = &h->s; int i; h->mmco_index= 0; if(h->nal_unit_type == NAL_IDR_SLICE){ //FIXME fields s->broken_link= get_bits1(gb) -1; if(get_bits1(gb)){ h->mmco[0].opcode= MMCO_LONG; h->mmco[0].long_arg= 0; h->mmco_index= 1; } }else{ if(get_bits1(gb)){ // adaptive_ref_pic_marking_mode_flag for(i= 0; i<MAX_MMCO_COUNT; i++) { MMCOOpcode opcode= get_ue_golomb_31(gb); h->mmco[i].opcode= opcode; if(opcode==MMCO_SHORT2UNUSED || opcode==MMCO_SHORT2LONG){ h->mmco[i].short_pic_num= (h->curr_pic_num - get_ue_golomb(gb) - 1) & (h->max_pic_num - 1); /* if(h->mmco[i].short_pic_num >= h->short_ref_count || h->short_ref[ h->mmco[i].short_pic_num ] == NULL){ av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control operation %d\n", mmco); return -1; }*/ } if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){ unsigned int long_arg= get_ue_golomb_31(gb); if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode); return -1; } h->mmco[i].long_arg= long_arg; } if(opcode > (unsigned)MMCO_LONG){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode); return -1; } if(opcode == MMCO_END) break; } h->mmco_index= i; }else{ assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count); if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count && !(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) { h->mmco[0].opcode= MMCO_SHORT2UNUSED; h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num; h->mmco_index= 1; if (FIELD_PICTURE) { h->mmco[0].short_pic_num *= 2; h->mmco[1].opcode= MMCO_SHORT2UNUSED; h->mmco[1].short_pic_num= h->mmco[0].short_pic_num + 1; h->mmco_index= 2; } } } } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/h264_refs.c
C
asf20
25,866
/** * @file * VP5 and VP6 compatible video decoder (common features) * * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_VP56_H #define AVCODEC_VP56_H #include "vp56data.h" #include "dsputil.h" #include "get_bits.h" #include "bytestream.h" #include "vp56dsp.h" typedef struct vp56_context VP56Context; typedef struct vp56_mv VP56mv; typedef void (*VP56ParseVectorAdjustment)(VP56Context *s, VP56mv *vect); typedef void (*VP56Filter)(VP56Context *s, uint8_t *dst, uint8_t *src, int offset1, int offset2, int stride, VP56mv mv, int mask, int select, int luma); typedef void (*VP56ParseCoeff)(VP56Context *s); typedef void (*VP56DefaultModelsInit)(VP56Context *s); typedef void (*VP56ParseVectorModels)(VP56Context *s); typedef void (*VP56ParseCoeffModels)(VP56Context *s); typedef int (*VP56ParseHeader)(VP56Context *s, const uint8_t *buf, int buf_size, int *golden_frame); typedef struct { int high; int bits; const uint8_t *buffer; const uint8_t *end; unsigned long code_word; } VP56RangeCoder; typedef struct { uint8_t not_null_dc; VP56Frame ref_frame; DCTELEM dc_coeff; } VP56RefDc; struct vp56_mv { int x; int y; }; typedef struct { uint8_t type; VP56mv mv; } VP56Macroblock; typedef struct { uint8_t coeff_reorder[64]; /* used in vp6 only */ uint8_t coeff_index_to_pos[64]; /* used in vp6 only */ uint8_t vector_sig[2]; /* delta sign */ uint8_t vector_dct[2]; /* delta coding types */ uint8_t vector_pdi[2][2]; /* predefined delta init */ uint8_t vector_pdv[2][7]; /* predefined delta values */ uint8_t vector_fdv[2][8]; /* 8 bit delta value definition */ uint8_t coeff_dccv[2][11]; /* DC coeff value */ uint8_t coeff_ract[2][3][6][11]; /* Run/AC coding type and AC coeff value */ uint8_t coeff_acct[2][3][3][6][5];/* vp5 only AC coding type for coding group < 3 */ uint8_t coeff_dcct[2][36][5]; /* DC coeff coding type */ uint8_t coeff_runv[2][14]; /* run value (vp6 only) */ uint8_t mb_type[3][10][10]; /* model for decoding MB type */ uint8_t mb_types_stats[3][10][2];/* contextual, next MB type stats */ } VP56Model; struct vp56_context { AVCodecContext *avctx; DSPContext dsp; VP56DSPContext vp56dsp; ScanTable scantable; AVFrame frames[4]; AVFrame *framep[6]; uint8_t *edge_emu_buffer_alloc; uint8_t *edge_emu_buffer; VP56RangeCoder c; VP56RangeCoder cc; VP56RangeCoder *ccp; int sub_version; /* frame info */ int plane_width[4]; int plane_height[4]; int mb_width; /* number of horizontal MB */ int mb_height; /* number of vertical MB */ int block_offset[6]; int quantizer; uint16_t dequant_dc; uint16_t dequant_ac; int8_t *qscale_table; /* DC predictors management */ VP56RefDc *above_blocks; VP56RefDc left_block[4]; int above_block_idx[6]; DCTELEM prev_dc[3][3]; /* [plan][ref_frame] */ /* blocks / macroblock */ VP56mb mb_type; VP56Macroblock *macroblocks; DECLARE_ALIGNED(16, DCTELEM, block_coeff)[6][64]; /* motion vectors */ VP56mv mv[6]; /* vectors for each block in MB */ VP56mv vector_candidate[2]; int vector_candidate_pos; /* filtering hints */ int filter_header; /* used in vp6 only */ int deblock_filtering; int filter_selection; int filter_mode; int max_vector_length; int sample_variance_threshold; uint8_t coeff_ctx[4][64]; /* used in vp5 only */ uint8_t coeff_ctx_last[4]; /* used in vp5 only */ int has_alpha; /* upside-down flipping hints */ int flip; /* are we flipping ? */ int frbi; /* first row block index in MB */ int srbi; /* second row block index in MB */ int stride[4]; /* stride for each plan */ const uint8_t *vp56_coord_div; VP56ParseVectorAdjustment parse_vector_adjustment; VP56Filter filter; VP56ParseCoeff parse_coeff; VP56DefaultModelsInit default_models_init; VP56ParseVectorModels parse_vector_models; VP56ParseCoeffModels parse_coeff_models; VP56ParseHeader parse_header; VP56Model *modelp; VP56Model models[2]; /* huffman decoding */ int use_huffman; GetBitContext gb; VLC dccv_vlc[2]; VLC runv_vlc[2]; VLC ract_vlc[2][3][6]; unsigned int nb_null[2][2]; /* number of consecutive NULL DC/AC */ }; void vp56_init(AVCodecContext *avctx, int flip, int has_alpha); int vp56_free(AVCodecContext *avctx); void vp56_init_dequant(VP56Context *s, int quantizer); int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt); /** * vp56 specific range coder implementation */ static inline void vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size) { c->high = 255; c->bits = 8; c->buffer = buf; c->end = buf + buf_size; c->code_word = bytestream_get_be16(&c->buffer); } static inline int vp56_rac_get_prob(VP56RangeCoder *c, uint8_t prob) { unsigned int low = 1 + (((c->high - 1) * prob) / 256); unsigned int low_shift = low << 8; int bit = c->code_word >= low_shift; if (bit) { c->high -= low; c->code_word -= low_shift; } else { c->high = low; } /* normalize */ while (c->high < 128) { c->high <<= 1; c->code_word <<= 1; if (--c->bits == 0 && c->buffer < c->end) { c->bits = 8; c->code_word |= *c->buffer++; } } return bit; } static inline int vp56_rac_get(VP56RangeCoder *c) { /* equiprobable */ int low = (c->high + 1) >> 1; unsigned int low_shift = low << 8; int bit = c->code_word >= low_shift; if (bit) { c->high = (c->high - low) << 1; c->code_word -= low_shift; } else { c->high = low << 1; } /* normalize */ c->code_word <<= 1; if (--c->bits == 0 && c->buffer < c->end) { c->bits = 8; c->code_word |= *c->buffer++; } return bit; } static inline int vp56_rac_gets(VP56RangeCoder *c, int bits) { int value = 0; while (bits--) { value = (value << 1) | vp56_rac_get(c); } return value; } static inline int vp56_rac_gets_nn(VP56RangeCoder *c, int bits) { int v = vp56_rac_gets(c, 7) << 1; return v + !v; } static inline int vp56_rac_get_tree(VP56RangeCoder *c, const VP56Tree *tree, const uint8_t *probs) { while (tree->val > 0) { if (vp56_rac_get_prob(c, probs[tree->prob_idx])) tree += tree->val; else tree++; } return -tree->val; } #endif /* AVCODEC_VP56_H */
123linslouis-android-video-cutter
jni/libavcodec/vp56.h
C
asf20
7,766
/* * PAM image format * Copyright (c) 2002, 2003 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "bytestream.h" #include "pnm.h" static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data) { PNMContext *s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p = (AVFrame*)&s->picture; int i, h, w, n, linesize, depth, maxval; const char *tuple_type; uint8_t *ptr; if (buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200) { av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } *p = *pict; p->pict_type = FF_I_TYPE; p->key_frame = 1; s->bytestream_start = s->bytestream = outbuf; s->bytestream_end = outbuf+buf_size; h = avctx->height; w = avctx->width; switch (avctx->pix_fmt) { case PIX_FMT_MONOWHITE: n = (w + 7) >> 3; depth = 1; maxval = 1; tuple_type = "BLACKANDWHITE"; break; case PIX_FMT_GRAY8: n = w; depth = 1; maxval = 255; tuple_type = "GRAYSCALE"; break; case PIX_FMT_RGB24: n = w * 3; depth = 3; maxval = 255; tuple_type = "RGB"; break; case PIX_FMT_RGB32: n = w * 4; depth = 4; maxval = 255; tuple_type = "RGB_ALPHA"; break; default: return -1; } snprintf(s->bytestream, s->bytestream_end - s->bytestream, "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n", w, h, depth, maxval, tuple_type); s->bytestream += strlen(s->bytestream); ptr = p->data[0]; linesize = p->linesize[0]; if (avctx->pix_fmt == PIX_FMT_RGB32) { int j; unsigned int v; for (i = 0; i < h; i++) { for (j = 0; j < w; j++) { v = ((uint32_t *)ptr)[j]; bytestream_put_be24(&s->bytestream, v); *s->bytestream++ = v >> 24; } ptr += linesize; } } else { for (i = 0; i < h; i++) { memcpy(s->bytestream, ptr, n); s->bytestream += n; ptr += linesize; } } return s->bytestream - s->bytestream_start; } AVCodec pam_encoder = { "pam", AVMEDIA_TYPE_VIDEO, CODEC_ID_PAM, sizeof(PNMContext), ff_pnm_init, pam_encode_frame, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"), };
123linslouis-android-video-cutter
jni/libavcodec/pamenc.c
C
asf20
3,537
/* * SVQ1 Encoder * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Sorenson Vector Quantizer #1 (SVQ1) video codec. * For more information of the SVQ1 algorithm, visit: * http://www.pcisys.net/~melanson/codecs/ */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "h263.h" #include "internal.h" #include "svq1.h" #include "svq1enc_cb.h" #undef NDEBUG #include <assert.h> typedef struct SVQ1Context { MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to make the motion estimation eventually independent of MpegEncContext, so this will be removed then (FIXME/XXX) AVCodecContext *avctx; DSPContext dsp; AVFrame picture; AVFrame current_picture; AVFrame last_picture; PutBitContext pb; GetBitContext gb; PutBitContext reorder_pb[6]; //why ooh why this sick breadth first order, everything is slower and more complex int frame_width; int frame_height; /* Y plane block dimensions */ int y_block_width; int y_block_height; /* U & V plane (C planes) block dimensions */ int c_block_width; int c_block_height; uint16_t *mb_type; uint32_t *dummy; int16_t (*motion_val8[3])[2]; int16_t (*motion_val16[3])[2]; int64_t rd_total; uint8_t *scratchbuf; } SVQ1Context; static void svq1_write_header(SVQ1Context *s, int frame_type) { int i; /* frame code */ put_bits(&s->pb, 22, 0x20); /* temporal reference (sure hope this is a "don't care") */ put_bits(&s->pb, 8, 0x00); /* frame type */ put_bits(&s->pb, 2, frame_type - 1); if (frame_type == FF_I_TYPE) { /* no checksum since frame code is 0x20 */ /* no embedded string either */ /* output 5 unknown bits (2 + 2 + 1) */ put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */ i= ff_match_2uint16(ff_svq1_frame_size_table, FF_ARRAY_ELEMS(ff_svq1_frame_size_table), s->frame_width, s->frame_height); put_bits(&s->pb, 3, i); if (i == 7) { put_bits(&s->pb, 12, s->frame_width); put_bits(&s->pb, 12, s->frame_height); } } /* no checksum or extra data (next 2 bits get 0) */ put_bits(&s->pb, 2, 0); } #define QUALITY_THRESHOLD 100 #define THRESHOLD_MULTIPLIER 0.6 #if HAVE_ALTIVEC #undef vector #endif static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra){ int count, y, x, i, j, split, best_mean, best_score, best_count; int best_vector[6]; int block_sum[7]= {0, 0, 0, 0, 0, 0}; int w= 2<<((level+2)>>1); int h= 2<<((level+1)>>1); int size=w*h; int16_t block[7][256]; const int8_t *codebook_sum, *codebook; const uint16_t (*mean_vlc)[2]; const uint8_t (*multistage_vlc)[2]; best_score=0; //FIXME optimize, this doenst need to be done multiple times if(intra){ codebook_sum= svq1_intra_codebook_sum[level]; codebook= ff_svq1_intra_codebooks[level]; mean_vlc= ff_svq1_intra_mean_vlc; multistage_vlc= ff_svq1_intra_multistage_vlc[level]; for(y=0; y<h; y++){ for(x=0; x<w; x++){ int v= src[x + y*stride]; block[0][x + w*y]= v; best_score += v*v; block_sum[0] += v; } } }else{ codebook_sum= svq1_inter_codebook_sum[level]; codebook= ff_svq1_inter_codebooks[level]; mean_vlc= ff_svq1_inter_mean_vlc + 256; multistage_vlc= ff_svq1_inter_multistage_vlc[level]; for(y=0; y<h; y++){ for(x=0; x<w; x++){ int v= src[x + y*stride] - ref[x + y*stride]; block[0][x + w*y]= v; best_score += v*v; block_sum[0] += v; } } } best_count=0; best_score -= ((block_sum[0]*block_sum[0])>>(level+3)); best_mean= (block_sum[0] + (size>>1)) >> (level+3); if(level<4){ for(count=1; count<7; count++){ int best_vector_score= INT_MAX; int best_vector_sum=-999, best_vector_mean=-999; const int stage= count-1; const int8_t *vector; for(i=0; i<16; i++){ int sum= codebook_sum[stage*16 + i]; int sqr, diff, score; vector = codebook + stage*size*16 + i*size; sqr = s->dsp.ssd_int8_vs_int16(vector, block[stage], size); diff= block_sum[stage] - sum; score= sqr - ((diff*(int64_t)diff)>>(level+3)); //FIXME 64bit slooow if(score < best_vector_score){ int mean= (diff + (size>>1)) >> (level+3); assert(mean >-300 && mean<300); mean= av_clip(mean, intra?0:-256, 255); best_vector_score= score; best_vector[stage]= i; best_vector_sum= sum; best_vector_mean= mean; } } assert(best_vector_mean != -999); vector= codebook + stage*size*16 + best_vector[stage]*size; for(j=0; j<size; j++){ block[stage+1][j] = block[stage][j] - vector[j]; } block_sum[stage+1]= block_sum[stage] - best_vector_sum; best_vector_score += lambda*(+ 1 + 4*count + multistage_vlc[1+count][1] + mean_vlc[best_vector_mean][1]); if(best_vector_score < best_score){ best_score= best_vector_score; best_count= count; best_mean= best_vector_mean; } } } split=0; if(best_score > threshold && level){ int score=0; int offset= (level&1) ? stride*h/2 : w/2; PutBitContext backup[6]; for(i=level-1; i>=0; i--){ backup[i]= s->reorder_pb[i]; } score += encode_block(s, src , ref , decoded , stride, level-1, threshold>>1, lambda, intra); score += encode_block(s, src + offset, ref + offset, decoded + offset, stride, level-1, threshold>>1, lambda, intra); score += lambda; if(score < best_score){ best_score= score; split=1; }else{ for(i=level-1; i>=0; i--){ s->reorder_pb[i]= backup[i]; } } } if (level > 0) put_bits(&s->reorder_pb[level], 1, split); if(!split){ assert((best_mean >= 0 && best_mean<256) || !intra); assert(best_mean >= -256 && best_mean<256); assert(best_count >=0 && best_count<7); assert(level<4 || best_count==0); /* output the encoding */ put_bits(&s->reorder_pb[level], multistage_vlc[1 + best_count][1], multistage_vlc[1 + best_count][0]); put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1], mean_vlc[best_mean][0]); for (i = 0; i < best_count; i++){ assert(best_vector[i]>=0 && best_vector[i]<16); put_bits(&s->reorder_pb[level], 4, best_vector[i]); } for(y=0; y<h; y++){ for(x=0; x<w; x++){ decoded[x + y*stride]= src[x + y*stride] - block[best_count][x + w*y] + best_mean; } } } return best_score; } static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride) { int x, y; int i; int block_width, block_height; int level; int threshold[6]; uint8_t *src = s->scratchbuf + stride * 16; const int lambda= (s->picture.quality*s->picture.quality) >> (2*FF_LAMBDA_SHIFT); /* figure out the acceptable level thresholds in advance */ threshold[5] = QUALITY_THRESHOLD; for (level = 4; level >= 0; level--) threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER; block_width = (width + 15) / 16; block_height = (height + 15) / 16; if(s->picture.pict_type == FF_P_TYPE){ s->m.avctx= s->avctx; s->m.current_picture_ptr= &s->m.current_picture; s->m.last_picture_ptr = &s->m.last_picture; s->m.last_picture.data[0]= ref_plane; s->m.linesize= s->m.last_picture.linesize[0]= s->m.new_picture.linesize[0]= s->m.current_picture.linesize[0]= stride; s->m.width= width; s->m.height= height; s->m.mb_width= block_width; s->m.mb_height= block_height; s->m.mb_stride= s->m.mb_width+1; s->m.b8_stride= 2*s->m.mb_width+1; s->m.f_code=1; s->m.pict_type= s->picture.pict_type; s->m.me_method= s->avctx->me_method; s->m.me.scene_change_score=0; s->m.flags= s->avctx->flags; // s->m.out_format = FMT_H263; // s->m.unrestricted_mv= 1; s->m.lambda= s->picture.quality; s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; if(!s->motion_val8[plane]){ s->motion_val8 [plane]= av_mallocz((s->m.b8_stride*block_height*2 + 2)*2*sizeof(int16_t)); s->motion_val16[plane]= av_mallocz((s->m.mb_stride*(block_height + 2) + 1)*2*sizeof(int16_t)); } s->m.mb_type= s->mb_type; //dummies, to avoid segfaults s->m.current_picture.mb_mean= (uint8_t *)s->dummy; s->m.current_picture.mb_var= (uint16_t*)s->dummy; s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy; s->m.current_picture.mb_type= s->dummy; s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2; s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1; s->m.dsp= s->dsp; //move ff_init_me(&s->m); s->m.me.dia_size= s->avctx->dia_size; s->m.first_slice_line=1; for (y = 0; y < block_height; y++) { s->m.new_picture.data[0]= src - y*16*stride; //ugly s->m.mb_y= y; for(i=0; i<16 && i + 16*y<height; i++){ memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width); for(x=width; x<16*block_width; x++) src[i*stride+x]= src[i*stride+x-1]; } for(; i<16 && i + 16*y<16*block_height; i++) memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width); for (x = 0; x < block_width; x++) { s->m.mb_x= x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); ff_estimate_p_frame_motion(&s->m, x, y); } s->m.first_slice_line=0; } ff_fix_long_p_mvs(&s->m); ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0); } s->m.first_slice_line=1; for (y = 0; y < block_height; y++) { for(i=0; i<16 && i + 16*y<height; i++){ memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width); for(x=width; x<16*block_width; x++) src[i*stride+x]= src[i*stride+x-1]; } for(; i<16 && i + 16*y<16*block_height; i++) memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width); s->m.mb_y= y; for (x = 0; x < block_width; x++) { uint8_t reorder_buffer[3][6][7*32]; int count[3][6]; int offset = y * 16 * stride + x * 16; uint8_t *decoded= decoded_plane + offset; uint8_t *ref= ref_plane + offset; int score[4]={0,0,0,0}, best; uint8_t *temp = s->scratchbuf; if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3000){ //FIXME check size av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } s->m.mb_x= x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); if(s->picture.pict_type == FF_I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){ for(i=0; i<6; i++){ init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32); } if(s->picture.pict_type == FF_P_TYPE){ const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA]; put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); score[0]= vlc[1]*lambda; } score[0]+= encode_block(s, src+16*x, NULL, temp, stride, 5, 64, lambda, 1); for(i=0; i<6; i++){ count[0][i]= put_bits_count(&s->reorder_pb[i]); flush_put_bits(&s->reorder_pb[i]); } }else score[0]= INT_MAX; best=0; if(s->picture.pict_type == FF_P_TYPE){ const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER]; int mx, my, pred_x, pred_y, dxy; int16_t *motion_ptr; motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y); if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){ for(i=0; i<6; i++) init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32); put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); s->m.pb= s->reorder_pb[5]; mx= motion_ptr[0]; my= motion_ptr[1]; assert(mx>=-32 && mx<=31); assert(my>=-32 && my<=31); assert(pred_x>=-32 && pred_x<=31); assert(pred_y>=-32 && pred_y<=31); ff_h263_encode_motion(&s->m, mx - pred_x, 1); ff_h263_encode_motion(&s->m, my - pred_y, 1); s->reorder_pb[5]= s->m.pb; score[1] += lambda*put_bits_count(&s->reorder_pb[5]); dxy= (mx&1) + 2*(my&1); s->dsp.put_pixels_tab[0][dxy](temp+16, ref + (mx>>1) + stride*(my>>1), stride, 16); score[1]+= encode_block(s, src+16*x, temp+16, decoded, stride, 5, 64, lambda, 0); best= score[1] <= score[0]; vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP]; score[2]= s->dsp.sse[0](NULL, src+16*x, ref, stride, 16); score[2]+= vlc[1]*lambda; if(score[2] < score[best] && mx==0 && my==0){ best=2; s->dsp.put_pixels_tab[0][0](decoded, ref, stride, 16); for(i=0; i<6; i++){ count[2][i]=0; } put_bits(&s->pb, vlc[1], vlc[0]); } } if(best==1){ for(i=0; i<6; i++){ count[1][i]= put_bits_count(&s->reorder_pb[i]); flush_put_bits(&s->reorder_pb[i]); } }else{ motion_ptr[0 ] = motion_ptr[1 ]= motion_ptr[2 ] = motion_ptr[3 ]= motion_ptr[0+2*s->m.b8_stride] = motion_ptr[1+2*s->m.b8_stride]= motion_ptr[2+2*s->m.b8_stride] = motion_ptr[3+2*s->m.b8_stride]=0; } } s->rd_total += score[best]; for(i=5; i>=0; i--){ ff_copy_bits(&s->pb, reorder_buffer[best][i], count[best][i]); } if(best==0){ s->dsp.put_pixels_tab[0][0](decoded, temp, stride, 16); } } s->m.first_slice_line=0; } return 0; } static av_cold int svq1_encode_init(AVCodecContext *avctx) { SVQ1Context * const s = avctx->priv_data; dsputil_init(&s->dsp, avctx); avctx->coded_frame= (AVFrame*)&s->picture; s->frame_width = avctx->width; s->frame_height = avctx->height; s->y_block_width = (s->frame_width + 15) / 16; s->y_block_height = (s->frame_height + 15) / 16; s->c_block_width = (s->frame_width / 4 + 15) / 16; s->c_block_height = (s->frame_height / 4 + 15) / 16; s->avctx= avctx; s->m.avctx= avctx; s->m.me.temp = s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t)); s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t)); s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t)); s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t)); s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t)); h263_encode_init(&s->m); //mv_penalty return 0; } static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) { SVQ1Context * const s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p= (AVFrame*)&s->picture; AVFrame temp; int i; if(avctx->pix_fmt != PIX_FMT_YUV410P){ av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n"); return -1; } if(!s->current_picture.data[0]){ avctx->get_buffer(avctx, &s->current_picture); avctx->get_buffer(avctx, &s->last_picture); s->scratchbuf = av_malloc(s->current_picture.linesize[0] * 16 * 2); } temp= s->current_picture; s->current_picture= s->last_picture; s->last_picture= temp; init_put_bits(&s->pb, buf, buf_size); *p = *pict; p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? FF_P_TYPE : FF_I_TYPE; p->key_frame = p->pict_type == FF_I_TYPE; svq1_write_header(s, p->pict_type); for(i=0; i<3; i++){ if(svq1_encode_plane(s, i, s->picture.data[i], s->last_picture.data[i], s->current_picture.data[i], s->frame_width / (i?4:1), s->frame_height / (i?4:1), s->picture.linesize[i], s->current_picture.linesize[i]) < 0) return -1; } // align_put_bits(&s->pb); while(put_bits_count(&s->pb) & 31) put_bits(&s->pb, 1, 0); flush_put_bits(&s->pb); return put_bits_count(&s->pb) / 8; } static av_cold int svq1_encode_end(AVCodecContext *avctx) { SVQ1Context * const s = avctx->priv_data; int i; av_log(avctx, AV_LOG_DEBUG, "RD: %f\n", s->rd_total/(double)(avctx->width*avctx->height*avctx->frame_number)); av_freep(&s->m.me.scratchpad); av_freep(&s->m.me.map); av_freep(&s->m.me.score_map); av_freep(&s->mb_type); av_freep(&s->dummy); av_freep(&s->scratchbuf); for(i=0; i<3; i++){ av_freep(&s->motion_val8[i]); av_freep(&s->motion_val16[i]); } return 0; } AVCodec svq1_encoder = { "svq1", AVMEDIA_TYPE_VIDEO, CODEC_ID_SVQ1, sizeof(SVQ1Context), svq1_encode_init, svq1_encode_frame, svq1_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV410P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"), };
123linslouis-android-video-cutter
jni/libavcodec/svq1enc.c
C
asf20
20,192
/* * WMA 9/3/PRO compatible decoder * Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion * Copyright (c) 2008 - 2009 Sascha Sommer * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief tables for wmapro decoding */ #ifndef AVCODEC_WMAPRODATA_H #define AVCODEC_WMAPRODATA_H #include <stddef.h> #include <stdint.h> /** * @brief frequencies to divide the frequency spectrum into scale factor bands */ static const uint16_t critical_freq[] = { 100, 200, 300, 400, 510, 630, 770, 920, 1080, 1270, 1480, 1720, 2000, 2320, 2700, 3150, 3700, 4400, 5300, 6400, 7700, 9500, 12000, 15500, 20675, 28575, 41375, 63875, }; /** * @name Huffman tables for DPCM-coded scale factors * @{ */ #define HUFF_SCALE_SIZE 121 #define HUFF_SCALE_MAXBITS 19 static const uint16_t scale_huffcodes[HUFF_SCALE_SIZE] = { 0xE639, 0xE6C2, 0xE6C1, 0xE6C0, 0xE63F, 0xE63E, 0xE63D, 0xE63C, 0xE63B, 0xE63A, 0xE638, 0xE637, 0xE636, 0xE635, 0xE634, 0xE632, 0xE633, 0xE620, 0x737B, 0xE610, 0xE611, 0xE612, 0xE613, 0xE614, 0xE615, 0xE616, 0xE617, 0xE618, 0xE619, 0xE61A, 0xE61B, 0xE61C, 0xE61D, 0xE61E, 0xE61F, 0xE6C3, 0xE621, 0xE622, 0xE623, 0xE624, 0xE625, 0xE626, 0xE627, 0xE628, 0xE629, 0xE62A, 0xE62B, 0xE62C, 0xE62D, 0xE62E, 0xE62F, 0xE630, 0xE631, 0x1CDF, 0x0E60, 0x0399, 0x00E7, 0x001D, 0x0000, 0x0001, 0x0001, 0x0001, 0x0002, 0x0006, 0x0002, 0x0007, 0x0006, 0x000F, 0x0038, 0x0072, 0x039A, 0xE6C4, 0xE6C5, 0xE6C6, 0xE6C7, 0xE6C8, 0xE6C9, 0xE6CA, 0xE6CB, 0xE6CC, 0xE6CD, 0xE6CE, 0xE6CF, 0xE6D0, 0xE6D1, 0xE6D2, 0xE6D3, 0xE6D4, 0xE6D5, 0xE6D6, 0xE6D7, 0xE6D8, 0xE6D9, 0xE6DA, 0xE6DB, 0xE6DC, 0xE6DD, 0xE6DE, 0xE6DF, 0xE6E0, 0xE6E1, 0xE6E2, 0xE6E3, 0xE6E4, 0xE6E5, 0xE6E6, 0xE6E7, 0xE6E8, 0xE6E9, 0xE6EA, 0xE6EB, 0xE6EC, 0xE6ED, 0xE6EE, 0xE6EF, 0xE6F0, 0xE6F1, 0xE6F2, 0xE6F3, 0xE6F4, 0xE6F5, }; static const uint8_t scale_huffbits[HUFF_SCALE_SIZE] = { 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 16, 15, 13, 11, 8, 5, 2, 1, 3, 5, 6, 6, 7, 7, 7, 9, 10, 13, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, }; /** @} */ /** * @name Huffman, run and level tables for runlevel-coded scale factors * @{ */ #define HUFF_SCALE_RL_SIZE 120 #define HUFF_SCALE_RL_MAXBITS 21 static const uint32_t scale_rl_huffcodes[HUFF_SCALE_RL_SIZE] = { 0x00010C, 0x000001, 0x10FE2A, 0x000003, 0x000003, 0x000001, 0x000013, 0x000020, 0x000029, 0x000014, 0x000016, 0x000045, 0x000049, 0x00002F, 0x000042, 0x00008E, 0x00008F, 0x000129, 0x000009, 0x00000D, 0x0004AC, 0x00002C, 0x000561, 0x0002E6, 0x00087C, 0x0002E2, 0x00095C, 0x000018, 0x000001, 0x000016, 0x000044, 0x00002A, 0x000007, 0x000159, 0x000143, 0x000128, 0x00015A, 0x00012D, 0x00002B, 0x0000A0, 0x000142, 0x00012A, 0x0002EF, 0x0004AF, 0x00087D, 0x004AE9, 0x0043F9, 0x000067, 0x000199, 0x002B05, 0x001583, 0x0021FE, 0x10FE2C, 0x000004, 0x00002E, 0x00010D, 0x00000A, 0x000244, 0x000017, 0x000245, 0x000011, 0x00010E, 0x00012C, 0x00002A, 0x00002F, 0x000121, 0x000046, 0x00087E, 0x0000BA, 0x000032, 0x0087F0, 0x0056DC, 0x0002EC, 0x0043FA, 0x002B6F, 0x004AE8, 0x0002B7, 0x10FE2B, 0x000001, 0x000051, 0x000010, 0x0002EE, 0x000B9C, 0x002576, 0x000198, 0x0056DD, 0x0000CD, 0x000AC0, 0x000170, 0x004AEF, 0x00002D, 0x0004AD, 0x0021FF, 0x0005CF, 0x002B04, 0x10FE29, 0x10FE28, 0x0002ED, 0x002E74, 0x021FC4, 0x004AEE, 0x010FE3, 0x087F17, 0x000000, 0x000097, 0x0002E3, 0x000ADA, 0x002575, 0x00173B, 0x0043FB, 0x002E75, 0x10FE2D, 0x0015B6, 0x00056C, 0x000057, 0x000123, 0x000120, 0x00021E, 0x000172, 0x0002B1, }; static const uint8_t scale_rl_huffbits[HUFF_SCALE_RL_SIZE] = { 9, 2, 21, 2, 4, 5, 5, 6, 6, 7, 7, 7, 7, 6, 7, 8, 8, 9, 10, 10, 11, 12, 11, 12, 12, 12, 12, 11, 4, 5, 7, 8, 9, 9, 9, 9, 9, 9, 8, 8, 9, 9, 12, 11, 12, 15, 15, 13, 15, 14, 13, 14, 21, 5, 6, 9, 10, 10, 11, 10, 11, 9, 9, 6, 8, 9, 7, 12, 10, 12, 16, 15, 12, 15, 14, 15, 10, 21, 6, 7, 11, 12, 14, 14, 15, 15, 14, 12, 11, 15, 12, 11, 14, 13, 14, 21, 21, 12, 16, 18, 15, 17, 20, 7, 8, 12, 12, 14, 15, 15, 16, 21, 13, 11, 7, 9, 9, 10, 11, 10, }; static const uint8_t scale_rl_run[HUFF_SCALE_RL_SIZE] = { 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 0, 1, 0, 1, }; static const uint8_t scale_rl_level[HUFF_SCALE_RL_SIZE] = { 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 8, 8, 9, 9, }; /** @} */ /** * @name Huffman, run and level codes for runlevel-coded coefficients * @{ */ #define HUFF_COEF0_SIZE 272 #define HUFF_COEF0_MAXBITS 21 static const uint32_t coef0_huffcodes[HUFF_COEF0_SIZE] = { 0x00004A, 0x00002B, 0x000000, 0x000003, 0x000006, 0x000009, 0x00000F, 0x000010, 0x000016, 0x000011, 0x000016, 0x000028, 0x00002F, 0x000026, 0x000029, 0x000045, 0x000055, 0x00005D, 0x000042, 0x00004E, 0x000051, 0x00005E, 0x00008D, 0x0000A8, 0x0000AD, 0x000080, 0x000096, 0x00009F, 0x0000AA, 0x0000BE, 0x00011C, 0x000153, 0x000158, 0x000170, 0x000104, 0x00010D, 0x000105, 0x000103, 0x00012F, 0x000177, 0x000175, 0x000157, 0x000174, 0x000225, 0x00023B, 0x00020D, 0x00021F, 0x000281, 0x00027B, 0x000282, 0x0002AC, 0x0002FD, 0x00044F, 0x000478, 0x00044D, 0x0002EC, 0x00044E, 0x000564, 0x000409, 0x00040B, 0x000501, 0x000545, 0x0004F3, 0x000541, 0x00043B, 0x0004F1, 0x0004F4, 0x0008FD, 0x000A94, 0x000811, 0x000B88, 0x000B91, 0x000B93, 0x0008EA, 0x000899, 0x000B8A, 0x000972, 0x0009E5, 0x000A8F, 0x000A84, 0x000A8E, 0x000A00, 0x000830, 0x0008E8, 0x000B95, 0x000871, 0x00083A, 0x000814, 0x000873, 0x000BFE, 0x001728, 0x001595, 0x001712, 0x00102A, 0x001021, 0x001729, 0x00152E, 0x0013C3, 0x001721, 0x001597, 0x00151B, 0x0010F2, 0x001403, 0x001703, 0x001503, 0x001708, 0x0013C1, 0x00170E, 0x00170C, 0x0010E1, 0x0011EA, 0x001020, 0x001500, 0x0017FA, 0x001704, 0x001705, 0x0017F0, 0x0017FB, 0x0021E6, 0x002B2D, 0x0020C6, 0x002B29, 0x002E4A, 0x0023AC, 0x001519, 0x0023F3, 0x002B2C, 0x0021C0, 0x0017FE, 0x0023D7, 0x0017F9, 0x0012E7, 0x0013C0, 0x002261, 0x0023D3, 0x002057, 0x002056, 0x0021D2, 0x0020C7, 0x0023D2, 0x0020EC, 0x0044C0, 0x002FE2, 0x00475B, 0x002A03, 0x002FE3, 0x0021E2, 0x0021D0, 0x002A31, 0x002E13, 0x002E05, 0x0047E5, 0x00000E, 0x000024, 0x000088, 0x0000B9, 0x00010C, 0x000224, 0x0002B3, 0x000283, 0x0002ED, 0x00047B, 0x00041E, 0x00043D, 0x0004F5, 0x0005FD, 0x000A92, 0x000B96, 0x000838, 0x000971, 0x000B83, 0x000B80, 0x000BF9, 0x0011D3, 0x0011E8, 0x0011D7, 0x001527, 0x0011F8, 0x001073, 0x0010F0, 0x0010E4, 0x0017F8, 0x001062, 0x001402, 0x0017E3, 0x00151A, 0x001077, 0x00152B, 0x00170D, 0x0021D3, 0x002E41, 0x0013C2, 0x000029, 0x0000A9, 0x00025D, 0x000419, 0x000544, 0x000B8B, 0x0009E4, 0x0011D2, 0x001526, 0x001724, 0x0012E6, 0x00150B, 0x0017FF, 0x002E26, 0x002E4B, 0x002B28, 0x0021E3, 0x002A14, 0x00475A, 0x002E12, 0x000057, 0x00023E, 0x000A90, 0x000BF0, 0x001072, 0x001502, 0x0023D6, 0x0020ED, 0x002A30, 0x0044C7, 0x00008C, 0x00047F, 0x00152A, 0x002262, 0x002E04, 0x0000A1, 0x0005F9, 0x000173, 0x000875, 0x000171, 0x00152D, 0x0002E3, 0x0017E2, 0x0002AD, 0x0021C1, 0x000479, 0x0021E7, 0x00041F, 0x005C4E, 0x000543, 0x005C4F, 0x000A91, 0x00898D, 0x000B97, 0x008746, 0x000970, 0x008745, 0x000B85, 0x00A856, 0x00152F, 0x010E8E, 0x0010E5, 0x00A857, 0x00170F, 0x021D11, 0x002A58, 0x010E8F, 0x002E40, 0x021D13, 0x002A59, 0x043A25, 0x002A02, 0x043A21, 0x0044C1, 0x087448, 0x0047E4, 0x043A20, 0x00542A, 0x087449, 0x00898C, }; static const uint8_t coef0_huffbits[HUFF_COEF0_SIZE] = { 8, 7, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 13, 13, 14, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 13, 14, 14, 14, 14, 14, 14, 14, 15, 14, 15, 14, 14, 14, 14, 14, 14, 15, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 4, 7, 8, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 13, 14, 15, 14, 14, 6, 9, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 7, 10, 12, 13, 14, 14, 14, 15, 15, 15, 8, 11, 13, 14, 15, 9, 12, 9, 13, 10, 13, 10, 14, 11, 15, 11, 15, 12, 15, 12, 15, 12, 16, 12, 17, 13, 17, 13, 17, 13, 18, 14, 17, 14, 19, 14, 18, 14, 19, 14, 20, 15, 20, 15, 21, 15, 20, 16, 21, 16, }; #define HUFF_COEF1_SIZE 244 #define HUFF_COEF1_MAXBITS 22 static const uint32_t coef1_huffcodes[HUFF_COEF1_SIZE] = { 0x0001E2, 0x00007F, 0x000000, 0x000002, 0x000008, 0x00000E, 0x000019, 0x00002F, 0x000037, 0x000060, 0x00006C, 0x000095, 0x0000C6, 0x0000F0, 0x00012E, 0x000189, 0x0001A5, 0x0001F8, 0x000253, 0x00030A, 0x000344, 0x00034D, 0x0003F2, 0x0004BD, 0x0005D7, 0x00062A, 0x00068B, 0x000693, 0x000797, 0x00097D, 0x000BAB, 0x000C52, 0x000C5E, 0x000D21, 0x000D20, 0x000F1A, 0x000FCE, 0x000FD1, 0x0012F1, 0x001759, 0x0018AC, 0x0018A7, 0x0018BF, 0x001A2B, 0x001E52, 0x001E50, 0x001E31, 0x001FB8, 0x0025E6, 0x0025E7, 0x002EB4, 0x002EB7, 0x003169, 0x00315B, 0x00317C, 0x00316C, 0x0034CA, 0x00348D, 0x003F40, 0x003CA2, 0x003F76, 0x004BC3, 0x004BE5, 0x003F73, 0x004BF8, 0x004BF9, 0x006131, 0x00628B, 0x006289, 0x0062DA, 0x00628A, 0x0062D4, 0x006997, 0x0062B4, 0x006918, 0x00794D, 0x007E7B, 0x007E87, 0x007EEA, 0x00794E, 0x00699D, 0x007967, 0x00699F, 0x0062DB, 0x007E7A, 0x007EEB, 0x00BAC0, 0x0097C9, 0x00C537, 0x00C5AB, 0x00D233, 0x00D338, 0x00BAC1, 0x00D23D, 0x012F91, 0x00D339, 0x00FDC8, 0x00D23C, 0x00FDDC, 0x00FDC9, 0x00FDDD, 0x00D33C, 0x000003, 0x000016, 0x00003E, 0x0000C3, 0x0001A1, 0x000347, 0x00062E, 0x000BAA, 0x000F2D, 0x001A2A, 0x001E58, 0x00309B, 0x003CA3, 0x005D6A, 0x00629A, 0x006996, 0x00794F, 0x007EE5, 0x00BAD7, 0x00C5AA, 0x00C5F4, 0x00FDDF, 0x00FDDE, 0x018A20, 0x018A6D, 0x01A67B, 0x01A464, 0x025F21, 0x01F9E2, 0x01F9E3, 0x00000A, 0x00003D, 0x000128, 0x0003C7, 0x000C24, 0x0018A3, 0x002EB1, 0x003CB2, 0x00691F, 0x007E79, 0x000013, 0x0000BB, 0x00034E, 0x000D14, 0x0025FD, 0x004BE7, 0x000024, 0x000188, 0x0007EF, 0x000035, 0x000308, 0x0012F2, 0x00005C, 0x0003F6, 0x0025E0, 0x00006D, 0x000698, 0x000096, 0x000C25, 0x0000C7, 0x000F1B, 0x0000F3, 0x0012FF, 0x000174, 0x001A66, 0x0001A0, 0x003099, 0x0001E4, 0x00316B, 0x000252, 0x003F31, 0x00030B, 0x004BE6, 0x000346, 0x0062FB, 0x00034F, 0x007966, 0x0003F5, 0x007E86, 0x0005D4, 0x00C511, 0x00062C, 0x00C5F5, 0x000692, 0x00F299, 0x000795, 0x00F298, 0x0007E9, 0x018A21, 0x00097E, 0x0175AD, 0x000C27, 0x01A67A, 0x000C57, 0x02EB59, 0x000D22, 0x0314D9, 0x000F19, 0x03F3C2, 0x000FCD, 0x0348CB, 0x0012F8, 0x04BE41, 0x0018A0, 0x03F3C1, 0x0018A1, 0x04BE40, 0x0018B7, 0x0629B0, 0x001A64, 0x0D2329, 0x001E30, 0x03F3C3, 0x001F9F, 0x0BAD62, 0x001F99, 0x0FCF00, 0x00309A, 0x0629B1, 0x002EB6, 0x175AC3, 0x00314C, 0x069195, 0x003168, 0x0BAD63, 0x00348E, 0x175AC1, 0x003F30, 0x07E781, 0x003F41, 0x0D2328, 0x003F42, 0x1F9E03, 0x004BC2, 0x175AC2, 0x003F74, 0x175AC0, 0x005D61, 0x3F3C05, 0x006130, 0x3F3C04, 0x0062B5, }; static const uint8_t coef1_huffbits[HUFF_COEF1_SIZE] = { 9, 7, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16, 16, 16, 3, 5, 6, 8, 9, 10, 11, 12, 12, 13, 13, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 17, 17, 4, 6, 9, 10, 12, 13, 14, 14, 15, 15, 5, 8, 10, 12, 14, 15, 6, 9, 11, 6, 10, 13, 7, 10, 14, 7, 11, 8, 12, 8, 12, 8, 13, 9, 13, 9, 14, 9, 14, 10, 14, 10, 15, 10, 15, 10, 15, 10, 15, 11, 16, 11, 16, 11, 16, 11, 16, 11, 17, 12, 17, 12, 17, 12, 18, 12, 18, 12, 18, 12, 18, 13, 19, 13, 18, 13, 19, 13, 19, 13, 20, 13, 18, 13, 20, 13, 20, 14, 19, 14, 21, 14, 19, 14, 20, 14, 21, 14, 19, 14, 20, 14, 21, 15, 21, 14, 21, 15, 22, 15, 22, 15, }; static const uint16_t coef0_run[HUFF_COEF0_SIZE] = { 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, }; static const float coef0_level[HUFF_COEF0_SIZE] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, }; static const uint16_t coef1_run[HUFF_COEF1_SIZE] = { 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, }; static const float coef1_level[HUFF_COEF1_SIZE] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 51, 52, }; /** @} */ /** * @name Huffman and vector lookup tables for vector-coded coefficients * @{ */ #define HUFF_VEC4_SIZE 127 #define HUFF_VEC4_MAXBITS 14 static const uint16_t vec4_huffcodes[HUFF_VEC4_SIZE] = { 0x0019, 0x0027, 0x00F2, 0x03BA, 0x0930, 0x1267, 0x0031, 0x0030, 0x0097, 0x0221, 0x058B, 0x0124, 0x00EB, 0x01D4, 0x03D8, 0x0584, 0x0364, 0x045F, 0x0F66, 0x0931, 0x24CD, 0x002F, 0x0039, 0x00E8, 0x02C3, 0x078A, 0x0037, 0x0029, 0x0084, 0x01B1, 0x00ED, 0x0086, 0x00F9, 0x03AB, 0x01EB, 0x08BC, 0x011E, 0x00F3, 0x0220, 0x058A, 0x00EC, 0x008E, 0x012B, 0x01EA, 0x0119, 0x04B0, 0x04B1, 0x03B8, 0x0691, 0x0365, 0x01ED, 0x049A, 0x0EA9, 0x0EA8, 0x08BD, 0x24CC, 0x0026, 0x0035, 0x00DB, 0x02C4, 0x07B2, 0x0038, 0x002B, 0x007F, 0x01B3, 0x00F4, 0x0091, 0x0116, 0x03BB, 0x0215, 0x0932, 0x002D, 0x002A, 0x008A, 0x01DE, 0x0028, 0x0020, 0x005C, 0x0090, 0x0068, 0x01EE, 0x00E9, 0x008D, 0x012A, 0x0087, 0x005D, 0x0118, 0x0349, 0x01EF, 0x01E3, 0x08B9, 0x00F0, 0x00D3, 0x0214, 0x049B, 0x00DA, 0x0089, 0x0125, 0x0217, 0x012D, 0x0690, 0x0094, 0x007D, 0x011F, 0x007E, 0x0059, 0x0127, 0x01A5, 0x0111, 0x00F8, 0x045D, 0x03B9, 0x0259, 0x0580, 0x02C1, 0x01DF, 0x0585, 0x0216, 0x0163, 0x01B0, 0x03C4, 0x08B8, 0x078B, 0x0755, 0x0581, 0x0F67, 0x0000, }; static const uint8_t vec4_huffbits[HUFF_VEC4_SIZE] = { 5, 6, 8, 10, 12, 13, 6, 6, 8, 10, 11, 9, 8, 9, 10, 11, 10, 11, 12, 12, 14, 6, 6, 8, 10, 11, 6, 6, 8, 9, 8, 8, 8, 10, 9, 12, 9, 8, 10, 11, 8, 8, 9, 9, 9, 11, 11, 10, 11, 10, 9, 11, 12, 12, 12, 14, 6, 6, 8, 10, 11, 6, 6, 7, 9, 8, 8, 9, 10, 10, 12, 6, 6, 8, 9, 6, 6, 7, 8, 7, 9, 8, 8, 9, 8, 7, 9, 10, 9, 9, 12, 8, 8, 10, 11, 8, 8, 9, 10, 9, 11, 8, 7, 9, 7, 7, 9, 9, 9, 8, 11, 10, 10, 11, 10, 9, 11, 10, 9, 9, 10, 12, 11, 11, 11, 12, 1, }; #define HUFF_VEC2_SIZE 137 #define HUFF_VEC2_MAXBITS 12 static const uint16_t vec2_huffcodes[HUFF_VEC2_SIZE] = { 0x055, 0x01C, 0x01A, 0x02B, 0x028, 0x067, 0x08B, 0x039, 0x170, 0x10D, 0x2A5, 0x047, 0x464, 0x697, 0x523, 0x8CB, 0x01B, 0x00E, 0x000, 0x010, 0x012, 0x036, 0x048, 0x04C, 0x0C2, 0x09B, 0x171, 0x03B, 0x224, 0x34A, 0x2D6, 0x019, 0x00F, 0x002, 0x014, 0x017, 0x006, 0x05D, 0x054, 0x0C7, 0x0B4, 0x192, 0x10E, 0x233, 0x043, 0x02C, 0x00F, 0x013, 0x006, 0x02F, 0x02C, 0x068, 0x077, 0x0DF, 0x111, 0x1A4, 0x16A, 0x2A4, 0x027, 0x011, 0x018, 0x02D, 0x00F, 0x04A, 0x040, 0x097, 0x01F, 0x11B, 0x022, 0x16D, 0x066, 0x035, 0x005, 0x02B, 0x049, 0x009, 0x075, 0x0CB, 0x0AA, 0x187, 0x106, 0x08A, 0x047, 0x060, 0x06E, 0x01D, 0x074, 0x0C4, 0x01E, 0x118, 0x1A7, 0x038, 0x042, 0x053, 0x076, 0x0A8, 0x0CA, 0x082, 0x110, 0x18D, 0x12D, 0x0B9, 0x0C8, 0x0DE, 0x01C, 0x0AB, 0x113, 0x18C, 0x10F, 0x09A, 0x0A5, 0x0B7, 0x11A, 0x186, 0x1A6, 0x259, 0x153, 0x18A, 0x193, 0x020, 0x10C, 0x046, 0x03A, 0x107, 0x149, 0x16C, 0x2D7, 0x225, 0x258, 0x316, 0x696, 0x317, 0x042, 0x522, 0x290, 0x8CA, 0x001, }; static const uint8_t vec2_huffbits[HUFF_VEC2_SIZE] = { 7, 6, 6, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 6, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 10, 10, 11, 6, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 6, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 7, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 7, 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 8, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 8, 8, 8, 8, 8, 9, 9, 9, 9, 8, 8, 8, 9, 9, 9, 9, 10, 9, 9, 9, 9, 9, 9, 10, 9, 9, 9, 10, 10, 11, 10, 10, 10, 10, 11, 10, 10, 10, 11, 10, 11, 12, 11, 12, 3, }; #define HUFF_VEC1_SIZE 101 #define HUFF_VEC1_MAXBITS 11 static const uint16_t vec1_huffcodes[HUFF_VEC1_SIZE] = { 0x01A, 0x003, 0x017, 0x010, 0x00C, 0x009, 0x005, 0x000, 0x00D, 0x00A, 0x009, 0x00C, 0x00F, 0x002, 0x004, 0x007, 0x00B, 0x00F, 0x01C, 0x006, 0x010, 0x015, 0x01C, 0x022, 0x03B, 0x00E, 0x019, 0x023, 0x034, 0x036, 0x03A, 0x047, 0x008, 0x00A, 0x01E, 0x031, 0x037, 0x050, 0x053, 0x06B, 0x06F, 0x08C, 0x0E8, 0x0EA, 0x0EB, 0x016, 0x03E, 0x03F, 0x06C, 0x089, 0x08A, 0x0A3, 0x0A4, 0x0D4, 0x0DD, 0x0EC, 0x0EE, 0x11A, 0x1D2, 0x024, 0x025, 0x02E, 0x027, 0x0C2, 0x0C0, 0x0DA, 0x0DB, 0x111, 0x144, 0x116, 0x14A, 0x145, 0x1B8, 0x1AB, 0x1DA, 0x1DE, 0x1DB, 0x1DF, 0x236, 0x237, 0x3A6, 0x3A7, 0x04D, 0x04C, 0x05E, 0x05F, 0x183, 0x182, 0x186, 0x221, 0x187, 0x220, 0x22E, 0x22F, 0x296, 0x354, 0x297, 0x355, 0x372, 0x373, 0x016, }; static const uint8_t vec1_huffbits[HUFF_VEC1_SIZE] = { 7, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 5, }; static const uint16_t symbol_to_vec4[HUFF_VEC4_SIZE] = { 0, 1, 2, 3, 4, 5, 16, 17, 18, 19, 20, 32, 33, 34, 35, 48, 49, 50, 64, 65, 80, 256, 257, 258, 259, 260, 272, 273, 274, 275, 288, 289, 290, 304, 305, 320, 512, 513, 514, 515, 528, 529, 530, 544, 545, 560, 768, 769, 770, 784, 785, 800, 1024, 1025, 1040, 1280, 4096, 4097, 4098, 4099, 4100, 4112, 4113, 4114, 4115, 4128, 4129, 4130, 4144, 4145, 4160, 4352, 4353, 4354, 4355, 4368, 4369, 4370, 4384, 4385, 4400, 4608, 4609, 4610, 4624, 4625, 4640, 4864, 4865, 4880, 5120, 8192, 8193, 8194, 8195, 8208, 8209, 8210, 8224, 8225, 8240, 8448, 8449, 8450, 8464, 8465, 8480, 8704, 8705, 8720, 8960, 12288, 12289, 12290, 12304, 12305, 12320, 12544, 12545, 12560, 12800, 16384, 16385, 16400, 16640, 20480, 0, }; static const uint8_t symbol_to_vec2[HUFF_VEC2_SIZE] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 112, 113, 114, 115, 116, 117, 118, 119, 120, 128, 129, 130, 131, 132, 133, 134, 135, 144, 145, 146, 147, 148, 149, 150, 160, 161, 162, 163, 164, 165, 176, 177, 178, 179, 180, 192, 193, 194, 195, 208, 209, 210, 224, 225, 240, 0, }; /** @} */ /** * @brief decorrelation matrix for multichannel streams **/ static const float default_decorrelation_matrices[] = { 1.000000, 0.707031, -0.707031, 0.707031, 0.707031, 0.578125, 0.707031, 0.410156, 0.578125, -0.707031, 0.410156, 0.578125, 0.000000, -0.816406, 0.500000, 0.652344, 0.500000, 0.269531, 0.500000, 0.269531, -0.500000, -0.652344, 0.500000, -0.269531, -0.500000, 0.652344, 0.500000, -0.652344, 0.500000, -0.269531, 0.445312, 0.601562, 0.511719, 0.371094, 0.195312, 0.445312, 0.371094, -0.195312, -0.601562, -0.511719, 0.445312, 0.000000, -0.632812, 0.000000, 0.632812, 0.445312, -0.371094, -0.195312, 0.601562, -0.511719, 0.445312, -0.601562, 0.511719, -0.371094, 0.195312, 0.410156, 0.558594, 0.500000, 0.410156, 0.289062, 0.148438, 0.410156, 0.410156, 0.000000, -0.410156, -0.578125, -0.410156, 0.410156, 0.148438, -0.500000, -0.410156, 0.289062, 0.558594, 0.410156, -0.148438, -0.500000, 0.410156, 0.289062, -0.558594, 0.410156, -0.410156, 0.000000, 0.410156, -0.578125, 0.410156, 0.410156, -0.558594, 0.500000, -0.410156, 0.289062, -0.148438, }; /** * @brief default decorrelation matrix offsets */ static const float * const default_decorrelation[] = { NULL, &default_decorrelation_matrices[0], &default_decorrelation_matrices[1], &default_decorrelation_matrices[5], &default_decorrelation_matrices[14], &default_decorrelation_matrices[30], &default_decorrelation_matrices[55] }; #endif /* AVCODEC_WMAPRODATA_H */
123linslouis-android-video-cutter
jni/libavcodec/wmaprodata.h
C
asf20
28,506
/* * QDM2 compatible decoder * Copyright (c) 2003 Ewald Snel * Copyright (c) 2005 Benjamin Larsson * Copyright (c) 2005 Alex Beregszaszi * Copyright (c) 2005 Roberto Togni * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Various QDM2 tables. */ #ifndef AVCODEC_QDM2DATA_H #define AVCODEC_QDM2DATA_H #include <stdint.h> /** VLC TABLES **/ /* values in this table range from -1..23; adjust retrieved value by -1 */ static const uint16_t vlc_tab_level_huffcodes[24] = { 0x037c, 0x0004, 0x003c, 0x004c, 0x003a, 0x002c, 0x001c, 0x001a, 0x0024, 0x0014, 0x0001, 0x0002, 0x0000, 0x0003, 0x0007, 0x0005, 0x0006, 0x0008, 0x0009, 0x000a, 0x000c, 0x00fc, 0x007c, 0x017c }; static const uint8_t vlc_tab_level_huffbits[24] = { 10, 6, 7, 7, 6, 6, 6, 6, 6, 5, 4, 4, 4, 3, 3, 3, 3, 4, 4, 5, 7, 8, 9, 10 }; /* values in this table range from -1..36; adjust retrieved value by -1 */ static const uint16_t vlc_tab_diff_huffcodes[37] = { 0x1c57, 0x0004, 0x0000, 0x0001, 0x0003, 0x0002, 0x000f, 0x000e, 0x0007, 0x0016, 0x0037, 0x0027, 0x0026, 0x0066, 0x0006, 0x0097, 0x0046, 0x01c6, 0x0017, 0x0786, 0x0086, 0x0257, 0x00d7, 0x0357, 0x00c6, 0x0386, 0x0186, 0x0000, 0x0157, 0x0c57, 0x0057, 0x0000, 0x0b86, 0x0000, 0x1457, 0x0000, 0x0457 }; static const uint8_t vlc_tab_diff_huffbits[37] = { 13, 3, 3, 2, 3, 3, 4, 4, 6, 5, 6, 6, 7, 7, 8, 8, 8, 9, 8, 11, 9, 10, 8, 10, 9, 12, 10, 0, 10, 13, 11, 0, 12, 0, 13, 0, 13 }; /* values in this table range from -1..5; adjust retrieved value by -1 */ static const uint8_t vlc_tab_run_huffcodes[6] = { 0x1f, 0x00, 0x01, 0x03, 0x07, 0x0f }; static const uint8_t vlc_tab_run_huffbits[6] = { 5, 1, 2, 3, 4, 5 }; /* values in this table range from -1..19; adjust retrieved value by -1 */ static const uint16_t vlc_tab_tone_level_idx_hi1_huffcodes[20] = { 0x5714, 0x000c, 0x0002, 0x0001, 0x0000, 0x0004, 0x0034, 0x0054, 0x0094, 0x0014, 0x0114, 0x0214, 0x0314, 0x0614, 0x0e14, 0x0f14, 0x2714, 0x0714, 0x1714, 0x3714 }; static const uint8_t vlc_tab_tone_level_idx_hi1_huffbits[20] = { 15, 4, 2, 1, 3, 5, 6, 7, 8, 10, 10, 11, 11, 12, 12, 12, 14, 14, 15, 14 }; /* values in this table range from -1..23; adjust retrieved value by -1 */ static const uint16_t vlc_tab_tone_level_idx_mid_huffcodes[24] = { 0x0fea, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03ea, 0x00ea, 0x002a, 0x001a, 0x0006, 0x0001, 0x0000, 0x0002, 0x000a, 0x006a, 0x01ea, 0x07ea }; static const uint8_t vlc_tab_tone_level_idx_mid_huffbits[24] = { 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 9, 7, 5, 3, 1, 2, 4, 6, 8, 10, 12 }; /* values in this table range from -1..23; adjust retrieved value by -1 */ static const uint16_t vlc_tab_tone_level_idx_hi2_huffcodes[24] = { 0x0664, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0064, 0x00e4, 0x00a4, 0x0068, 0x0004, 0x0008, 0x0014, 0x0018, 0x0000, 0x0001, 0x0002, 0x0003, 0x000c, 0x0028, 0x0024, 0x0164, 0x0000, 0x0264 }; static const uint8_t vlc_tab_tone_level_idx_hi2_huffbits[24] = { 11, 0, 0, 0, 0, 0, 10, 8, 8, 7, 6, 6, 5, 5, 4, 2, 2, 2, 4, 7, 8, 9, 0, 11 }; /* values in this table range from -1..8; adjust retrieved value by -1 */ static const uint8_t vlc_tab_type30_huffcodes[9] = { 0x3c, 0x06, 0x00, 0x01, 0x03, 0x02, 0x04, 0x0c, 0x1c }; static const uint8_t vlc_tab_type30_huffbits[9] = { 6, 3, 3, 2, 2, 3, 4, 5, 6 }; /* values in this table range from -1..9; adjust retrieved value by -1 */ static const uint8_t vlc_tab_type34_huffcodes[10] = { 0x18, 0x00, 0x01, 0x04, 0x05, 0x07, 0x03, 0x02, 0x06, 0x08 }; static const uint8_t vlc_tab_type34_huffbits[10] = { 5, 4, 3, 3, 3, 3, 3, 3, 3, 5 }; /* values in this table range from -1..22; adjust retrieved value by -1 */ static const uint16_t vlc_tab_fft_tone_offset_0_huffcodes[23] = { 0x038e, 0x0001, 0x0000, 0x0022, 0x000a, 0x0006, 0x0012, 0x0002, 0x001e, 0x003e, 0x0056, 0x0016, 0x000e, 0x0032, 0x0072, 0x0042, 0x008e, 0x004e, 0x00f2, 0x002e, 0x0036, 0x00c2, 0x018e }; static const uint8_t vlc_tab_fft_tone_offset_0_huffbits[23] = { 10, 1, 2, 6, 4, 5, 6, 7, 6, 6, 7, 7, 8, 7, 8, 8, 9, 7, 8, 6, 6, 8, 10 }; /* values in this table range from -1..27; adjust retrieved value by -1 */ static const uint16_t vlc_tab_fft_tone_offset_1_huffcodes[28] = { 0x07a4, 0x0001, 0x0020, 0x0012, 0x001c, 0x0008, 0x0006, 0x0010, 0x0000, 0x0014, 0x0004, 0x0032, 0x0070, 0x000c, 0x0002, 0x003a, 0x001a, 0x002c, 0x002a, 0x0022, 0x0024, 0x000a, 0x0064, 0x0030, 0x0062, 0x00a4, 0x01a4, 0x03a4 }; static const uint8_t vlc_tab_fft_tone_offset_1_huffbits[28] = { 11, 1, 6, 6, 5, 4, 3, 6, 6, 5, 6, 6, 7, 6, 6, 6, 6, 6, 6, 7, 8, 6, 7, 7, 7, 9, 10, 11 }; /* values in this table range from -1..31; adjust retrieved value by -1 */ static const uint16_t vlc_tab_fft_tone_offset_2_huffcodes[32] = { 0x1760, 0x0001, 0x0000, 0x0082, 0x000c, 0x0006, 0x0003, 0x0007, 0x0008, 0x0004, 0x0010, 0x0012, 0x0022, 0x001a, 0x0000, 0x0020, 0x000a, 0x0040, 0x004a, 0x006a, 0x002a, 0x0042, 0x0002, 0x0060, 0x00aa, 0x00e0, 0x00c2, 0x01c2, 0x0160, 0x0360, 0x0760, 0x0f60 }; static const uint8_t vlc_tab_fft_tone_offset_2_huffbits[32] = { 13, 2, 0, 8, 4, 3, 3, 3, 4, 4, 5, 5, 6, 5, 7, 7, 7, 7, 7, 7, 8, 8, 8, 9, 8, 8, 9, 9, 10, 11, 13, 12 }; /* values in this table range from -1..34; adjust retrieved value by -1 */ static const uint16_t vlc_tab_fft_tone_offset_3_huffcodes[35] = { 0x33ea, 0x0005, 0x0000, 0x000c, 0x0000, 0x0006, 0x0003, 0x0008, 0x0002, 0x0001, 0x0004, 0x0007, 0x001a, 0x000f, 0x001c, 0x002c, 0x000a, 0x001d, 0x002d, 0x002a, 0x000d, 0x004c, 0x008c, 0x006a, 0x00cd, 0x004d, 0x00ea, 0x020c, 0x030c, 0x010c, 0x01ea, 0x07ea, 0x0bea, 0x03ea, 0x13ea }; static const uint8_t vlc_tab_fft_tone_offset_3_huffbits[35] = { 14, 4, 0, 10, 4, 3, 3, 4, 4, 3, 4, 4, 5, 4, 5, 6, 6, 5, 6, 7, 7, 7, 8, 8, 8, 8, 9, 10, 10, 10, 10, 11, 12, 13, 14 }; /* values in this table range from -1..37; adjust retrieved value by -1 */ static const uint16_t vlc_tab_fft_tone_offset_4_huffcodes[38] = { 0x5282, 0x0016, 0x0000, 0x0136, 0x0004, 0x0000, 0x0007, 0x000a, 0x000e, 0x0003, 0x0001, 0x000d, 0x0006, 0x0009, 0x0012, 0x0005, 0x0025, 0x0022, 0x0015, 0x0002, 0x0076, 0x0035, 0x0042, 0x00c2, 0x0182, 0x00b6, 0x0036, 0x03c2, 0x0482, 0x01c2, 0x0682, 0x0882, 0x0a82, 0x0082, 0x0282, 0x1282, 0x3282, 0x2282 }; static const uint8_t vlc_tab_fft_tone_offset_4_huffbits[38] = { 15, 6, 0, 9, 3, 3, 3, 4, 4, 3, 4, 4, 5, 4, 5, 6, 6, 6, 6, 8, 7, 6, 8, 9, 9, 8, 9, 10, 11, 10, 11, 12, 12, 12, 14, 15, 14, 14 }; /** FFT TABLES **/ /* values in this table range from -1..27; adjust retrieved value by -1 */ static const uint16_t fft_level_exp_alt_huffcodes[28] = { 0x1ec6, 0x0006, 0x00c2, 0x0142, 0x0242, 0x0246, 0x00c6, 0x0046, 0x0042, 0x0146, 0x00a2, 0x0062, 0x0026, 0x0016, 0x000e, 0x0005, 0x0004, 0x0003, 0x0000, 0x0001, 0x000a, 0x0012, 0x0002, 0x0022, 0x01c6, 0x02c6, 0x06c6, 0x0ec6 }; static const uint8_t fft_level_exp_alt_huffbits[28] = { 13, 7, 8, 9, 10, 10, 10, 10, 10, 9, 8, 7, 6, 5, 4, 3, 3, 2, 3, 3, 4, 5, 7, 8, 9, 11, 12, 13 }; /* values in this table range from -1..19; adjust retrieved value by -1 */ static const uint16_t fft_level_exp_huffcodes[20] = { 0x0f24, 0x0001, 0x0002, 0x0000, 0x0006, 0x0005, 0x0007, 0x000c, 0x000b, 0x0014, 0x0013, 0x0004, 0x0003, 0x0023, 0x0064, 0x00a4, 0x0024, 0x0124, 0x0324, 0x0724 }; static const uint8_t fft_level_exp_huffbits[20] = { 12, 3, 3, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 9, 10, 11, 12 }; /* values in this table range from -1..6; adjust retrieved value by -1 */ static const uint8_t fft_stereo_exp_huffcodes[7] = { 0x3e, 0x01, 0x00, 0x02, 0x06, 0x0e, 0x1e }; static const uint8_t fft_stereo_exp_huffbits[7] = { 6, 1, 2, 3, 4, 5, 6 }; /* values in this table range from -1..8; adjust retrieved value by -1 */ static const uint8_t fft_stereo_phase_huffcodes[9] = { 0x35, 0x02, 0x00, 0x01, 0x0d, 0x15, 0x05, 0x09, 0x03 }; static const uint8_t fft_stereo_phase_huffbits[9] = { 6, 2, 2, 4, 4, 6, 5, 4, 2 }; static const int fft_cutoff_index_table[4][2] = { { 1, 2 }, {-1, 0 }, {-1,-2 }, { 0, 0 } }; static const int16_t fft_level_index_table[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, }; static const uint8_t last_coeff[3] = { 4, 7, 10 }; static const uint8_t coeff_per_sb_for_avg[3][30] = { { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, { 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9 } }; static const uint32_t dequant_table[3][10][30] = { { { 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 256, 256, 205, 154, 102, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 51, 102, 154, 205, 256, 238, 219, 201, 183, 165, 146, 128, 110, 91, 73, 55, 37, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 18, 37, 55, 73, 91, 110, 128, 146, 165, 183, 201, 219, 238, 256, 228, 199, 171, 142, 114, 85, 57, 28 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 256, 171, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 85, 171, 256, 171, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 85, 171, 256, 219, 183, 146, 110, 73, 37, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 73, 110, 146, 183, 219, 256, 228, 199, 171, 142, 114, 85, 57, 28, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 57, 85, 114, 142, 171, 199, 228, 256, 213, 171, 128, 85, 43 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 256, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 256, 171, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 85, 171, 256, 192, 128, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 128, 192, 256, 205, 154, 102, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 102, 154, 205, 256, 213, 171, 128, 85, 43, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 85, 128, 171, 213, 256, 213, 171, 128, 85, 43 } } }; static const uint8_t coeff_per_sb_for_dequant[3][30] = { { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, { 0, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6 }, { 0, 1, 2, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9 } }; /* first index is subband, 2nd index is 0, 1 or 3 (2 is unused) */ static const int8_t tone_level_idx_offset_table[30][4] = { { -50, -50, 0, -50 }, { -50, -50, 0, -50 }, { -50, -9, 0, -19 }, { -16, -6, 0, -12 }, { -11, -4, 0, -8 }, { -8, -3, 0, -6 }, { -7, -3, 0, -5 }, { -6, -2, 0, -4 }, { -5, -2, 0, -3 }, { -4, -1, 0, -3 }, { -4, -1, 0, -2 }, { -3, -1, 0, -2 }, { -3, -1, 0, -2 }, { -3, -1, 0, -2 }, { -2, -1, 0, -1 }, { -2, -1, 0, -1 }, { -2, -1, 0, -1 }, { -2, 0, 0, -1 }, { -2, 0, 0, -1 }, { -1, 0, 0, -1 }, { -1, 0, 0, -1 }, { -1, 0, 0, -1 }, { -1, 0, 0, -1 }, { -1, 0, 0, -1 }, { -1, 0, 0, -1 }, { -1, 0, 0, -1 }, { -1, 0, 0, 0 }, { -1, 0, 0, 0 }, { -1, 0, 0, 0 }, { -1, 0, 0, 0 } }; /* all my samples have 1st index 0 or 1 */ /* second index is subband, only indexes 0-29 seem to be used */ static const int8_t coding_method_table[5][30] = { { 34, 30, 24, 24, 16, 16, 16, 16, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 }, { 34, 30, 24, 24, 16, 16, 16, 16, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 }, { 34, 30, 30, 30, 24, 24, 16, 16, 16, 16, 16, 16, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 }, { 34, 34, 30, 30, 24, 24, 24, 24, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 10, 10, 10, 10, 10, 10, 10, 10 }, { 34, 34, 30, 30, 30, 30, 30, 30, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16 }, }; static const int vlc_stage3_values[60] = { 0, 1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 36, 44, 52, 60, 76, 92, 108, 124, 156, 188, 220, 252, 316, 380, 444, 508, 636, 764, 892, 1020, 1276, 1532, 1788, 2044, 2556, 3068, 3580, 4092, 5116, 6140, 7164, 8188, 10236, 12284, 14332, 16380, 20476, 24572, 28668, 32764, 40956, 49148, 57340, 65532, 81916, 98300,114684 }; static const float fft_tone_sample_table[4][16][5] = { { { .0100000000f,-.0037037037f,-.0020000000f,-.0069444444f,-.0018416207f }, { .0416666667f, .0000000000f, .0000000000f,-.0208333333f,-.0123456791f }, { .1250000000f, .0558035709f, .0330687836f,-.0164473690f,-.0097465888f }, { .1562500000f, .0625000000f, .0370370370f,-.0062500000f,-.0037037037f }, { .1996007860f, .0781250000f, .0462962948f, .0022727272f, .0013468013f }, { .2000000000f, .0625000000f, .0370370373f, .0208333333f, .0074074073f }, { .2127659619f, .0555555556f, .0329218097f, .0208333333f, .0123456791f }, { .2173913121f, .0473484844f, .0280583613f, .0347222239f, .0205761325f }, { .2173913121f, .0347222239f, .0205761325f, .0473484844f, .0280583613f }, { .2127659619f, .0208333333f, .0123456791f, .0555555556f, .0329218097f }, { .2000000000f, .0208333333f, .0074074073f, .0625000000f, .0370370370f }, { .1996007860f, .0022727272f, .0013468013f, .0781250000f, .0462962948f }, { .1562500000f,-.0062500000f,-.0037037037f, .0625000000f, .0370370370f }, { .1250000000f,-.0164473690f,-.0097465888f, .0558035709f, .0330687836f }, { .0416666667f,-.0208333333f,-.0123456791f, .0000000000f, .0000000000f }, { .0100000000f,-.0069444444f,-.0018416207f,-.0037037037f,-.0020000000f } }, { { .0050000000f,-.0200000000f, .0125000000f,-.3030303030f, .0020000000f }, { .1041666642f, .0400000000f,-.0250000000f, .0333333333f,-.0200000000f }, { .1250000000f, .0100000000f, .0142857144f,-.0500000007f,-.0200000000f }, { .1562500000f,-.0006250000f,-.00049382716f,-.000625000f,-.00049382716f }, { .1562500000f,-.0006250000f,-.00049382716f,-.000625000f,-.00049382716f }, { .1250000000f,-.0500000000f,-.0200000000f, .0100000000f, .0142857144f }, { .1041666667f, .0333333333f,-.0200000000f, .0400000000f,-.0250000000f }, { .0050000000f,-.3030303030f, .0020000001f,-.0200000000f, .0125000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f } }, { { .1428571492f, .1250000000f,-.0285714287f,-.0357142873f, .0208333333f }, { .1818181818f, .0588235296f, .0333333333f, .0212765951f, .0100000000f }, { .1818181818f, .0212765951f, .0100000000f, .0588235296f, .0333333333f }, { .1428571492f,-.0357142873f, .0208333333f, .1250000000f,-.0285714287f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f } }, { { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f }, { .0000000000f, .0000000000f, .0000000000f, .0000000000f, .0000000000f } } }; static const float fft_tone_level_table[2][64] = { { /* pow ~ (i > 46) ? 0 : (((((i & 1) ? 431 : 304) << (i >> 1))) / 1024.0); */ 0.17677669f, 0.42677650f, 0.60355347f, 0.85355347f, 1.20710683f, 1.68359375f, 2.37500000f, 3.36718750f, 4.75000000f, 6.73437500f, 9.50000000f, 13.4687500f, 19.0000000f, 26.9375000f, 38.0000000f, 53.8750000f, 76.0000000f, 107.750000f, 152.000000f, 215.500000f, 304.000000f, 431.000000f, 608.000000f, 862.000000f, 1216.00000f, 1724.00000f, 2432.00000f, 3448.00000f, 4864.00000f, 6896.00000f, 9728.00000f, 13792.0000f, 19456.0000f, 27584.0000f, 38912.0000f, 55168.0000f, 77824.0000f, 110336.000f, 155648.000f, 220672.000f, 311296.000f, 441344.000f, 622592.000f, 882688.000f, 1245184.00f, 1765376.00f, 2490368.00f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, }, { /* pow = (i > 45) ? 0 : ((((i & 1) ? 431 : 304) << (i >> 1)) / 512.0); */ 0.59375000f, 0.84179688f, 1.18750000f, 1.68359375f, 2.37500000f, 3.36718750f, 4.75000000f, 6.73437500f, 9.50000000f, 13.4687500f, 19.0000000f, 26.9375000f, 38.0000000f, 53.8750000f, 76.0000000f, 107.750000f, 152.000000f, 215.500000f, 304.000000f, 431.000000f, 608.000000f, 862.000000f, 1216.00000f, 1724.00000f, 2432.00000f, 3448.00000f, 4864.00000f, 6896.00000f, 9728.00000f, 13792.0000f, 19456.0000f, 27584.0000f, 38912.0000f, 55168.0000f, 77824.0000f, 110336.000f, 155648.000f, 220672.000f, 311296.000f, 441344.000f, 622592.000f, 882688.000f, 1245184.00f, 1765376.00f, 2490368.00f, 3530752.00f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f, 0.00000000f } }; static const float fft_tone_envelope_table[4][31] = { { .009607375f, .038060248f, .084265202f, .146446645f, .222214907f, .308658302f, .402454883f, .500000060f, .597545207f, .691341758f, .777785182f, .853553414f, .915734828f, .961939812f, .990392685f, 1.00000000f, .990392625f, .961939752f, .915734768f, .853553295f, .777785063f, .691341639f, .597545087f, .500000000f, .402454853f, .308658272f, .222214878f, .146446615f, .084265172f, .038060218f, .009607345f }, { .038060248f, .146446645f, .308658302f, .500000060f, .691341758f, .853553414f, .961939812f, 1.00000000f, .961939752f, .853553295f, .691341639f, .500000000f, .308658272f, .146446615f, .038060218f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f }, { .146446645f, .500000060f, .853553414f, 1.00000000f, .853553295f, .500000000f, .146446615f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f }, { .500000060f, 1.00000000f, .500000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f, .000000000f } }; static const float sb_noise_attenuation[32] = { 0.0f, 0.0f, 0.3f, 0.4f, 0.5f, 0.7f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, }; static const uint8_t fft_subpackets[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0 }; /* first index is joined_stereo, second index is 0 or 2 (1 is unused) */ static const float dequant_1bit[2][3] = { {-0.920000f, 0.000000f, 0.920000f }, {-0.890000f, 0.000000f, 0.890000f } }; static const float type30_dequant[8] = { -1.0f,-0.625f,-0.291666656732559f,0.0f, 0.25f,0.5f,0.75f,1.0f, }; static const float type34_delta[10] = { // FIXME: covers 8 entries.. -1.0f,-0.60947573184967f,-0.333333343267441f,-0.138071194291115f,0.0f, 0.138071194291115f,0.333333343267441f,0.60947573184967f,1.0f,0.0f, }; #endif /* AVCODEC_QDM2DATA_H */
123linslouis-android-video-cutter
jni/libavcodec/qdm2data.h
C
asf20
26,201
/* * Generate a header file for hardcoded Parametric Stereo tables * * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #define CONFIG_HARDCODED_TABLES 0 #include "aacps_tablegen.h" #include "tableprint.h" void write_float_3d_array (const void *p, int b, int c, int d) { int i; const float *f = p; for (i = 0; i < b; i++) { printf("{\n"); write_float_2d_array(f, c, d); printf("},\n"); f += c * d; } } void write_float_4d_array (const void *p, int a, int b, int c, int d) { int i; const float *f = p; for (i = 0; i < a; i++) { printf("{\n"); write_float_3d_array(f, b, c, d); printf("},\n"); f += b * c * d; } } int main(void) { ps_tableinit(); write_fileheader(); printf("static const float pd_re_smooth[8*8*8] = {\n"); write_float_array(pd_re_smooth, 8*8*8); printf("};\n"); printf("static const float pd_im_smooth[8*8*8] = {\n"); write_float_array(pd_im_smooth, 8*8*8); printf("};\n"); printf("static const float HA[46][8][4] = {\n"); write_float_3d_array(HA, 46, 8, 4); printf("};\n"); printf("static const float HB[46][8][4] = {\n"); write_float_3d_array(HB, 46, 8, 4); printf("};\n"); printf("static const float f20_0_8[8][7][2] = {\n"); write_float_3d_array(f20_0_8, 8, 7, 2); printf("};\n"); printf("static const float f34_0_12[12][7][2] = {\n"); write_float_3d_array(f34_0_12, 12, 7, 2); printf("};\n"); printf("static const float f34_1_8[8][7][2] = {\n"); write_float_3d_array(f34_1_8, 8, 7, 2); printf("};\n"); printf("static const float f34_2_4[4][7][2] = {\n"); write_float_3d_array(f34_2_4, 4, 7, 2); printf("};\n"); printf("static const float Q_fract_allpass[2][50][3][2] = {\n"); write_float_4d_array(Q_fract_allpass, 2, 50, 3, 2); printf("};\n"); printf("static const float phi_fract[2][50][2] = {\n"); write_float_3d_array(phi_fract, 2, 50, 2); printf("};\n"); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/aacps_tablegen.c
C
asf20
2,825
/* * CamStudio decoder * Copyright (c) 2006 Reimar Doeffinger * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stdlib.h> #include "avcodec.h" #if CONFIG_ZLIB #include <zlib.h> #endif #include "libavutil/lzo.h" typedef struct { AVFrame pic; int linelen, height, bpp; unsigned int decomp_size; unsigned char* decomp_buf; } CamStudioContext; static void copy_frame_default(AVFrame *f, const uint8_t *src, int linelen, int height) { int i; uint8_t *dst = f->data[0]; dst += (height - 1) * f->linesize[0]; for (i = height; i; i--) { memcpy(dst, src, linelen); src += linelen; dst -= f->linesize[0]; } } static void add_frame_default(AVFrame *f, const uint8_t *src, int linelen, int height) { int i, j; uint8_t *dst = f->data[0]; dst += (height - 1) * f->linesize[0]; for (i = height; i; i--) { for (j = linelen; j; j--) *dst++ += *src++; dst -= f->linesize[0] + linelen; } } #if !HAVE_BIGENDIAN #define copy_frame_16 copy_frame_default #define copy_frame_32 copy_frame_default #define add_frame_16 add_frame_default #define add_frame_32 add_frame_default #else static void copy_frame_16(AVFrame *f, const uint8_t *src, int linelen, int height) { int i, j; uint8_t *dst = f->data[0]; dst += (height - 1) * f->linesize[0]; for (i = height; i; i--) { for (j = linelen / 2; j; j--) { dst[0] = src[1]; dst[1] = src[0]; src += 2; dst += 2; } dst -= f->linesize[0] + linelen; } } static void copy_frame_32(AVFrame *f, const uint8_t *src, int linelen, int height) { int i, j; uint8_t *dst = f->data[0]; dst += (height - 1) * f->linesize[0]; for (i = height; i; i--) { for (j = linelen / 4; j; j--) { dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; src += 4; dst += 4; } dst -= f->linesize[0] + linelen; } } static void add_frame_16(AVFrame *f, const uint8_t *src, int linelen, int height) { int i, j; uint8_t *dst = f->data[0]; dst += (height - 1) * f->linesize[0]; for (i = height; i; i--) { for (j = linelen / 2; j; j--) { dst[0] += src[1]; dst[1] += src[0]; src += 2; dst += 2; } dst -= f->linesize[0] + linelen; } } static void add_frame_32(AVFrame *f, const uint8_t *src, int linelen, int height) { int i, j; uint8_t *dst = f->data[0]; dst += (height - 1) * f->linesize[0]; for (i = height; i; i--) { for (j = linelen / 4; j; j--) { dst[0] += src[3]; dst[1] += src[2]; dst[2] += src[1]; dst[3] += src[0]; src += 4; dst += 4; } dst -= f->linesize[0] + linelen; } } #endif static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; CamStudioContext *c = avctx->priv_data; AVFrame *picture = data; if (buf_size < 2) { av_log(avctx, AV_LOG_ERROR, "coded frame too small\n"); return -1; } if (c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); c->pic.reference = 1; c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->get_buffer(avctx, &c->pic) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } // decompress data switch ((buf[0] >> 1) & 7) { case 0: { // lzo compression int outlen = c->decomp_size, inlen = buf_size - 2; if (av_lzo1x_decode(c->decomp_buf, &outlen, &buf[2], &inlen)) av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n"); break; } case 1: { // zlib compression #if CONFIG_ZLIB unsigned long dlen = c->decomp_size; if (uncompress(c->decomp_buf, &dlen, &buf[2], buf_size - 2) != Z_OK) av_log(avctx, AV_LOG_ERROR, "error during zlib decompression\n"); break; #else av_log(avctx, AV_LOG_ERROR, "compiled without zlib support\n"); return -1; #endif } default: av_log(avctx, AV_LOG_ERROR, "unknown compression\n"); return -1; } // flip upside down, add difference frame if (buf[0] & 1) { // keyframe c->pic.pict_type = FF_I_TYPE; c->pic.key_frame = 1; switch (c->bpp) { case 16: copy_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height); break; case 32: copy_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height); break; default: copy_frame_default(&c->pic, c->decomp_buf, c->linelen, c->height); } } else { c->pic.pict_type = FF_P_TYPE; c->pic.key_frame = 0; switch (c->bpp) { case 16: add_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height); break; case 32: add_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height); break; default: add_frame_default(&c->pic, c->decomp_buf, c->linelen, c->height); } } *picture = c->pic; *data_size = sizeof(AVFrame); return buf_size; } static av_cold int decode_init(AVCodecContext *avctx) { CamStudioContext *c = avctx->priv_data; switch (avctx->bits_per_coded_sample) { case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; case 32: avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "CamStudio codec error: invalid depth %i bpp\n", avctx->bits_per_coded_sample); return 1; } c->bpp = avctx->bits_per_coded_sample; c->pic.data[0] = NULL; c->linelen = avctx->width * avctx->bits_per_coded_sample / 8; c->height = avctx->height; c->decomp_size = c->height * c->linelen; c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING); if (!c->decomp_buf) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return 1; } return 0; } static av_cold int decode_end(AVCodecContext *avctx) { CamStudioContext *c = avctx->priv_data; av_freep(&c->decomp_buf); if (c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); return 0; } AVCodec cscd_decoder = { "camstudio", AVMEDIA_TYPE_VIDEO, CODEC_ID_CSCD, sizeof(CamStudioContext), decode_init, NULL, decode_end, decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("CamStudio"), };
123linslouis-android-video-cutter
jni/libavcodec/cscd.c
C
asf20
7,875
/* * DCA parser * Copyright (C) 2004 Gildas Bazin * Copyright (C) 2004 Benjamin Zores * Copyright (C) 2006 Benjamin Larsson * Copyright (C) 2007 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "parser.h" #include "dca.h" typedef struct DCAParseContext { ParseContext pc; uint32_t lastmarker; int size; int framesize; int hd_pos; } DCAParseContext; #define IS_MARKER(state, i, buf, buf_size) \ ((state == DCA_MARKER_14B_LE && (i < buf_size-2) && (buf[i+1] & 0xF0) == 0xF0 && buf[i+2] == 0x07) \ || (state == DCA_MARKER_14B_BE && (i < buf_size-2) && buf[i+1] == 0x07 && (buf[i+2] & 0xF0) == 0xF0) \ || state == DCA_MARKER_RAW_LE || state == DCA_MARKER_RAW_BE) /** * finds the end of the current frame in the bitstream. * @return the position of the first byte of the next frame, or -1 */ static int dca_find_frame_end(DCAParseContext * pc1, const uint8_t * buf, int buf_size) { int start_found, i; uint32_t state; ParseContext *pc = &pc1->pc; start_found = pc->frame_start_found; state = pc->state; i = 0; if (!start_found) { for (i = 0; i < buf_size; i++) { state = (state << 8) | buf[i]; if (IS_MARKER(state, i, buf, buf_size)) { if (pc1->lastmarker && state == pc1->lastmarker) { start_found = 1; break; } else if (!pc1->lastmarker) { start_found = 1; pc1->lastmarker = state; break; } } } } if (start_found) { for (; i < buf_size; i++) { pc1->size++; state = (state << 8) | buf[i]; if (state == DCA_HD_MARKER && !pc1->hd_pos) pc1->hd_pos = pc1->size; if (state == pc1->lastmarker && IS_MARKER(state, i, buf, buf_size)) { if(pc1->framesize > pc1->size) continue; if(!pc1->framesize){ pc1->framesize = pc1->hd_pos ? pc1->hd_pos : pc1->size; } pc->frame_start_found = 0; pc->state = -1; pc1->size = 0; return i - 3; } } } pc->frame_start_found = start_found; pc->state = state; return END_NOT_FOUND; } static av_cold int dca_parse_init(AVCodecParserContext * s) { DCAParseContext *pc1 = s->priv_data; pc1->lastmarker = 0; return 0; } static int dca_parse(AVCodecParserContext * s, AVCodecContext * avctx, const uint8_t ** poutbuf, int *poutbuf_size, const uint8_t * buf, int buf_size) { DCAParseContext *pc1 = s->priv_data; ParseContext *pc = &pc1->pc; int next; if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { next = buf_size; } else { next = dca_find_frame_end(pc1, buf, buf_size); if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { *poutbuf = NULL; *poutbuf_size = 0; return buf_size; } } *poutbuf = buf; *poutbuf_size = buf_size; return next; } AVCodecParser dca_parser = { {CODEC_ID_DTS}, sizeof(DCAParseContext), dca_parse_init, dca_parse, ff_parse_close, };
123linslouis-android-video-cutter
jni/libavcodec/dca_parser.c
C
asf20
4,078
/** * @file * Common code for Vorbis I encoder and decoder * @author Denes Balatoni ( dbalatoni programozo hu ) * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #undef V_DEBUG //#define V_DEBUG #define ALT_BITSTREAM_READER_LE #include "avcodec.h" #include "get_bits.h" #include "vorbis.h" /* Helper functions */ // x^(1/n) unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n) { unsigned int ret = 0, i, j; do { ++ret; for (i = 0, j = ret; i < n - 1; i++) j *= ret; } while (j <= x); return ret - 1; } // Generate vlc codes from vorbis huffman code lengths // the two bits[p] > 32 checks should be redundant, all calling code should // already ensure that, but since it allows overwriting the stack it seems // reasonable to check redundantly. int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, uint_fast32_t num) { uint_fast32_t exit_at_level[33] = { 404, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; uint_fast8_t i, j; uint_fast32_t code, p; #ifdef V_DEBUG GetBitContext gb; #endif for (p = 0; (bits[p] == 0) && (p < num); ++p) ; if (p == num) { // av_log(vc->avccontext, AV_LOG_INFO, "An empty codebook. Heh?! \n"); return 0; } codes[p] = 0; if (bits[p] > 32) return 1; for (i = 0; i < bits[p]; ++i) exit_at_level[i+1] = 1 << i; #ifdef V_DEBUG av_log(NULL, AV_LOG_INFO, " %d. of %d code len %d code %d - ", p, num, bits[p], codes[p]); init_get_bits(&gb, (uint_fast8_t *)&codes[p], bits[p]); for (i = 0; i < bits[p]; ++i) av_log(NULL, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0"); av_log(NULL, AV_LOG_INFO, "\n"); #endif ++p; for (; p < num; ++p) { if (bits[p] > 32) return 1; if (bits[p] == 0) continue; // find corresponding exit(node which the tree can grow further from) for (i = bits[p]; i > 0; --i) if (exit_at_level[i]) break; if (!i) // overspecified tree return 1; code = exit_at_level[i]; exit_at_level[i] = 0; // construct code (append 0s to end) and introduce new exits for (j = i + 1 ;j <= bits[p]; ++j) exit_at_level[j] = code + (1 << (j - 1)); codes[p] = code; #ifdef V_DEBUG av_log(NULL, AV_LOG_INFO, " %d. code len %d code %d - ", p, bits[p], codes[p]); init_get_bits(&gb, (uint_fast8_t *)&codes[p], bits[p]); for (i = 0; i < bits[p]; ++i) av_log(NULL, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0"); av_log(NULL, AV_LOG_INFO, "\n"); #endif } //no exits should be left (underspecified tree - ie. unused valid vlcs - not allowed by SPEC) for (p = 1; p < 33; p++) if (exit_at_level[p]) return 1; return 0; } void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values) { int i; list[0].sort = 0; list[1].sort = 1; for (i = 2; i < values; i++) { int j; list[i].low = 0; list[i].high = 1; list[i].sort = i; for (j = 2; j < i; j++) { int tmp = list[j].x; if (tmp < list[i].x) { if (tmp > list[list[i].low].x) list[i].low = j; } else { if (tmp < list[list[i].high].x) list[i].high = j; } } } for (i = 0; i < values - 1; i++) { int j; for (j = i + 1; j < values; j++) { if (list[list[i].sort].x > list[list[j].sort].x) { int tmp = list[i].sort; list[i].sort = list[j].sort; list[j].sort = tmp; } } } } static inline void render_line_unrolled(intptr_t x, intptr_t y, int x1, intptr_t sy, int ady, int adx, float *buf) { int err = -adx; x -= x1 - 1; buf += x1 - 1; while (++x < 0) { err += ady; if (err >= 0) { err += ady - adx; y += sy; buf[x++] = ff_vorbis_floor1_inverse_db_table[y]; } buf[x] = ff_vorbis_floor1_inverse_db_table[y]; } if (x <= 0) { if (err + ady >= 0) y += sy; buf[x] = ff_vorbis_floor1_inverse_db_table[y]; } } static void render_line(int x0, int y0, int x1, int y1, float *buf) { int dy = y1 - y0; int adx = x1 - x0; int ady = FFABS(dy); int sy = dy < 0 ? -1 : 1; buf[x0] = ff_vorbis_floor1_inverse_db_table[y0]; if (ady*2 <= adx) { // optimized common case render_line_unrolled(x0, y0, x1, sy, ady, adx, buf); } else { int base = dy / adx; int x = x0; int y = y0; int err = -adx; ady -= FFABS(base) * adx; while (++x < x1) { y += base; err += ady; if (err >= 0) { err -= adx; y += sy; } buf[x] = ff_vorbis_floor1_inverse_db_table[y]; } } } void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values, uint_fast16_t *y_list, int *flag, int multiplier, float *out, int samples) { int lx, ly, i; lx = 0; ly = y_list[0] * multiplier; for (i = 1; i < values; i++) { int pos = list[i].sort; if (flag[pos]) { int x1 = list[pos].x; int y1 = y_list[pos] * multiplier; if (lx < samples) render_line(lx, ly, FFMIN(x1,samples), y1, out); lx = x1; ly = y1; } if (lx >= samples) break; } if (lx < samples) render_line(lx, ly, samples, ly, out); }
123linslouis-android-video-cutter
jni/libavcodec/vorbis.c
C
asf20
6,634
/* * reference discrete cosine transform (double precision) * Copyright (C) 2009 Dylan Yudaken * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * reference discrete cosine transform (double precision) * * @author Dylan Yudaken (dyudaken at gmail) * * @note This file could be optimized a lot, but is for * reference and so readability is better. */ #include "libavutil/mathematics.h" #include "dctref.h" static double coefficients[8 * 8]; /** * Initialize the double precision discrete cosine transform * functions fdct & idct. */ av_cold void ff_ref_dct_init(void) { unsigned int i, j; for (j = 0; j < 8; ++j) { coefficients[j] = sqrt(0.125); for (i = 8; i < 64; i += 8) { coefficients[i + j] = 0.5 * cos(i * (j + 0.5) * M_PI / 64.0); } } } /** * Transform 8x8 block of data with a double precision forward DCT <br> * This is a reference implementation. * * @param block pointer to 8x8 block of data to transform */ void ff_ref_fdct(short *block) { /* implement the equation: block = coefficients * block * coefficients' */ unsigned int i, j, k; double out[8 * 8]; /* out = coefficients * block */ for (i = 0; i < 64; i += 8) { for (j = 0; j < 8; ++j) { double tmp = 0; for (k = 0; k < 8; ++k) { tmp += coefficients[i + k] * block[k * 8 + j]; } out[i + j] = tmp * 8; } } /* block = out * (coefficients') */ for (j = 0; j < 8; ++j) { for (i = 0; i < 64; i += 8) { double tmp = 0; for (k = 0; k < 8; ++k) { tmp += out[i + k] * coefficients[j * 8 + k]; } block[i + j] = floor(tmp + 0.499999999999); } } } /** * Transform 8x8 block of data with a double precision inverse DCT <br> * This is a reference implementation. * * @param block pointer to 8x8 block of data to transform */ void ff_ref_idct(short *block) { /* implement the equation: block = (coefficients') * block * coefficients */ unsigned int i, j, k; double out[8 * 8]; /* out = block * coefficients */ for (i = 0; i < 64; i += 8) { for (j = 0; j < 8; ++j) { double tmp = 0; for (k = 0; k < 8; ++k) { tmp += block[i + k] * coefficients[k * 8 + j]; } out[i + j] = tmp; } } /* block = (coefficients') * out */ for (i = 0; i < 8; ++i) { for (j = 0; j < 8; ++j) { double tmp = 0; for (k = 0; k < 64; k += 8) { tmp += coefficients[k + i] * out[k + j]; } block[i * 8 + j] = floor(tmp + 0.5); } } }
123linslouis-android-video-cutter
jni/libavcodec/dctref.c
C
asf20
3,453
/* * QCELP decoder * Copyright (c) 2007 Reynaldo H. Verdejo Pinochet * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_QCELPDATA_H #define AVCODEC_QCELPDATA_H /** * @file * Data tables for the QCELP decoder * @author Reynaldo H. Verdejo Pinochet * @remark FFmpeg merging spearheaded by Kenan Gillet * @remark Development mentored by Benjamin Larson */ #include <stddef.h> #include <stdint.h> #include "libavutil/common.h" /** * QCELP unpacked data frame */ typedef struct { /// @defgroup qcelp_codebook_parameters QCELP excitation codebook parameters /// @{ uint8_t cbsign[16]; ///!< sign of the codebook gain for each codebook subframe uint8_t cbgain[16]; ///!< unsigned codebook gain for each codebook subframe uint8_t cindex[16]; ///!< codebook index for each codebook subframe /// @} /// @defgroup qcelp_pitch_parameters QCELP pitch prediction parameters /// @{ uint8_t plag[4]; ///!< pitch lag for each pitch subframe uint8_t pfrac[4]; ///!< fractional pitch lag for each pitch subframe uint8_t pgain[4]; ///!< pitch gain for each pitch subframe /// @} /** * line spectral pair frequencies (LSP) for RATE_OCTAVE, * line spectral pair frequencies grouped into five vectors * of dimension two (LSPV) for other rates */ uint8_t lspv[10]; /** * reserved bits only present in bitrate 1, 1/4 and 1/8 packets */ uint8_t reserved; } QCELPFrame; /** * pre-calculated table for hammsinc function * Only half of the table is needed because of symmetry. * * TIA/EIA/IS-733 2.4.5.2-2/3 */ static const float qcelp_hammsinc_table[4] = { -0.006822, 0.041249, -0.143459, 0.588863}; typedef struct { uint8_t index; /*!< index into the QCELPContext structure */ uint8_t bitpos; /*!< position of the lowest bit in the value's byte */ uint8_t bitlen; /*!< number of bits to read */ } QCELPBitmap; #define QCELP_OF(variable, bit, len) {offsetof(QCELPFrame, variable), bit, len} /** * bitmap unpacking tables for RATE_FULL * * TIA/EIA/IS-733 Table 2.4.7.1-1 */ static const QCELPBitmap qcelp_rate_full_bitmap[] = { // start on bit QCELP_OF(lspv [ 2], 0, 3), // 265 QCELP_OF(lspv [ 1], 0, 7), // 262 QCELP_OF(lspv [ 0], 0, 6), // 255 QCELP_OF(lspv [ 4], 0, 6), // 249 QCELP_OF(lspv [ 3], 0, 6), // 243 QCELP_OF(lspv [ 2], 3, 4), // 237 QCELP_OF(cbsign[ 0], 0, 1), // 233 QCELP_OF(cbgain[ 0], 0, 4), // 232 QCELP_OF(pfrac [ 0], 0, 1), // 228 QCELP_OF(plag [ 0], 0, 7), // 227 QCELP_OF(pgain [ 0], 0, 3), // 220 QCELP_OF(cindex[ 1], 0, 4), // 217 QCELP_OF(cbsign[ 1], 0, 1), // 213 QCELP_OF(cbgain[ 1], 0, 4), // 212 QCELP_OF(cindex[ 0], 0, 7), // 208 QCELP_OF(cbgain[ 3], 0, 1), // 201 QCELP_OF(cindex[ 2], 0, 7), // 200 QCELP_OF(cbsign[ 2], 0, 1), // 193 QCELP_OF(cbgain[ 2], 0, 4), // 192 QCELP_OF(cindex[ 1], 4, 3), // 188 QCELP_OF(plag [ 1], 0, 3), // 185 QCELP_OF(pgain [ 1], 0, 3), // 182 QCELP_OF(cindex[ 3], 0, 7), // 179 QCELP_OF(cbsign[ 3], 0, 1), // 172 QCELP_OF(cbgain[ 3], 1, 2), // 171 QCELP_OF(cindex[ 4], 0, 6), // 169 QCELP_OF(cbsign[ 4], 0, 1), // 163 QCELP_OF(cbgain[ 4], 0, 4), // 162 QCELP_OF(pfrac [ 1], 0, 1), // 158 QCELP_OF(plag [ 1], 3, 4), // 157 QCELP_OF(cbgain[ 6], 0, 3), // 153 QCELP_OF(cindex[ 5], 0, 7), // 150 QCELP_OF(cbsign[ 5], 0, 1), // 143 QCELP_OF(cbgain[ 5], 0, 4), // 142 QCELP_OF(cindex[ 4], 6, 1), // 138 QCELP_OF(cindex[ 7], 0, 3), // 137 QCELP_OF(cbsign[ 7], 0, 1), // 134 QCELP_OF(cbgain[ 7], 0, 3), // 133 QCELP_OF(cindex[ 6], 0, 7), // 130 QCELP_OF(cbsign[ 6], 0, 1), // 123 QCELP_OF(cbgain[ 6], 3, 1), // 122 QCELP_OF(cbgain[ 8], 0, 1), // 121 QCELP_OF(pfrac [ 2], 0, 1), // 120 QCELP_OF(plag [ 2], 0, 7), // 119 QCELP_OF(pgain [ 2], 0, 3), // 112 QCELP_OF(cindex[ 7], 3, 4), // 109 QCELP_OF(cbsign[ 9], 0, 1), // 105 QCELP_OF(cbgain[ 9], 0, 4), // 104 QCELP_OF(cindex[ 8], 0, 7), // 100 QCELP_OF(cbsign[ 8], 0, 1), // 93 QCELP_OF(cbgain[ 8], 1, 3), // 92 QCELP_OF(cindex[10], 0, 4), // 89 QCELP_OF(cbsign[10], 0, 1), // 85 QCELP_OF(cbgain[10], 0, 4), // 84 QCELP_OF(cindex[ 9], 0, 7), // 80 QCELP_OF(pgain [ 3], 0, 2), // 73 QCELP_OF(cindex[11], 0, 7), // 71 QCELP_OF(cbsign[11], 0, 1), // 64 QCELP_OF(cbgain[11], 0, 3), // 63 QCELP_OF(cindex[10], 4, 3), // 60 QCELP_OF(cindex[12], 0, 2), // 57 QCELP_OF(cbsign[12], 0, 1), // 55 QCELP_OF(cbgain[12], 0, 4), // 54 QCELP_OF(pfrac [ 3], 0, 1), // 50 QCELP_OF(plag [ 3], 0, 7), // 49 QCELP_OF(pgain [ 3], 2, 1), // 42 QCELP_OF(cindex[13], 0, 6), // 41 QCELP_OF(cbsign[13], 0, 1), // 35 QCELP_OF(cbgain[13], 0, 4), // 34 QCELP_OF(cindex[12], 2, 5), // 30 QCELP_OF(cbgain[15], 0, 3), // 25 QCELP_OF(cindex[14], 0, 7), // 22 QCELP_OF(cbsign[14], 0, 1), // 15 QCELP_OF(cbgain[14], 0, 4), // 14 QCELP_OF(cindex[13], 6, 1), // 10 QCELP_OF(reserved, 0, 2), // 9 QCELP_OF(cindex[15], 0, 7), // 7 QCELP_OF(cbsign[15], 0, 1) // 0 }; /** * bitmap unpacking tables for RATE_HALF * * TIA/EIA/IS-733 Table 2.4.7.2-1 */ static const QCELPBitmap qcelp_rate_half_bitmap[] = { // start on bit QCELP_OF(lspv [2], 0, 3), // 123 QCELP_OF(lspv [1], 0, 7), // 120 QCELP_OF(lspv [0], 0, 6), // 113 QCELP_OF(lspv [4], 0, 6), // 107 QCELP_OF(lspv [3], 0, 6), // 101 QCELP_OF(lspv [2], 3, 4), // 95 QCELP_OF(cbsign[0], 0, 1), // 91 QCELP_OF(cbgain[0], 0, 4), // 90 QCELP_OF(pfrac [0], 0, 1), // 86 QCELP_OF(plag [0], 0, 7), // 85 QCELP_OF(pgain [0], 0, 3), // 78 QCELP_OF(plag [1], 0, 6), // 75 QCELP_OF(pgain [1], 0, 3), // 69 QCELP_OF(cindex[0], 0, 7), // 66 QCELP_OF(pgain [2], 0, 2), // 59 QCELP_OF(cindex[1], 0, 7), // 57 QCELP_OF(cbsign[1], 0, 1), // 50 QCELP_OF(cbgain[1], 0, 4), // 49 QCELP_OF(pfrac [1], 0, 1), // 45 QCELP_OF(plag [1], 6, 1), // 44 QCELP_OF(cindex[2], 0, 2), // 43 QCELP_OF(cbsign[2], 0, 1), // 41 QCELP_OF(cbgain[2], 0, 4), // 40 QCELP_OF(pfrac [2], 0, 1), // 36 QCELP_OF(plag [2], 0, 7), // 35 QCELP_OF(pgain [2], 2, 1), // 28 QCELP_OF(pfrac [3], 0, 1), // 27 QCELP_OF(plag [3], 0, 7), // 26 QCELP_OF(pgain [3], 0, 3), // 19 QCELP_OF(cindex[2], 2, 5), // 16 QCELP_OF(cindex[3], 0, 7), // 11 QCELP_OF(cbsign[3], 0, 1), // 4 QCELP_OF(cbgain[3], 0, 4) // 3 }; /** * bitmap unpacking tables for RATE_QUARTER * * TIA/EIA/IS-733 Table 2.4.7.3-1 */ static const QCELPBitmap qcelp_rate_quarter_bitmap[] = { // start on bit QCELP_OF(lspv [2], 0, 3), // 53 QCELP_OF(lspv [1], 0, 7), // 50 QCELP_OF(lspv [0], 0, 6), // 43 QCELP_OF(lspv [4], 0, 6), // 37 QCELP_OF(lspv [3], 0, 6), // 31 QCELP_OF(lspv [2], 3, 4), // 25 QCELP_OF(cbgain[3], 0, 4), // 21 QCELP_OF(cbgain[2], 0, 4), // 17 QCELP_OF(cbgain[1], 0, 4), // 13 QCELP_OF(cbgain[0], 0, 4), // 9 QCELP_OF(reserved, 0, 2), // 5 QCELP_OF(cbgain[4], 0, 4) // 3 }; /** * bitmap unpacking tables for RATE_OCTAVE * * trick: CBSEED is written into QCELPContext.cbsign[15], * which is not used for RATE_OCTAVE. * CBSEED is only used to ensure the occurrence of random bit * patterns in the 16 first bits that are used as the seed. * * TIA/EIA/IS-733 Table 2.4.7.4-1 */ static const QCELPBitmap qcelp_rate_octave_bitmap[] = { // start on bit QCELP_OF(cbsign[15], 3, 1), // 19 QCELP_OF(lspv [0], 0, 1), // 18 QCELP_OF(lspv [1], 0, 1), // 17 QCELP_OF(lspv [2], 0, 1), // 16 QCELP_OF(cbsign[15], 2, 1), // 15 QCELP_OF(lspv [3], 0, 1), // 14 QCELP_OF(lspv [4], 0, 1), // 13 QCELP_OF(lspv [5], 0, 1), // 12 QCELP_OF(cbsign[15], 1, 1), // 11 QCELP_OF(lspv [6], 0, 1), // 10 QCELP_OF(lspv [7], 0, 1), // 9 QCELP_OF(lspv [8], 0, 1), // 8 QCELP_OF(cbsign[15], 0, 1), // 7 QCELP_OF(lspv [9], 0, 1), // 6 QCELP_OF(cbgain [0], 0, 2), // 7 QCELP_OF(reserved, 0, 4) // 3 }; /** * position of the bitmapping data for each packet type in * the QCELPContext */ static const QCELPBitmap * const qcelp_unpacking_bitmaps_per_rate[5] = { NULL, ///!< for SILENCE rate qcelp_rate_octave_bitmap, qcelp_rate_quarter_bitmap, qcelp_rate_half_bitmap, qcelp_rate_full_bitmap, }; static const uint16_t qcelp_unpacking_bitmaps_lengths[5] = { 0, ///!< for SILENCE rate FF_ARRAY_ELEMS(qcelp_rate_octave_bitmap), FF_ARRAY_ELEMS(qcelp_rate_quarter_bitmap), FF_ARRAY_ELEMS(qcelp_rate_half_bitmap), FF_ARRAY_ELEMS(qcelp_rate_full_bitmap), }; typedef uint16_t qcelp_vector[2]; /** * LSP vector quantization tables in x*10000 form * * TIA/EIA/IS-733 tables 2.4.3.2.6.3-1 through 2.4.3.2.6.3-5 */ static const qcelp_vector qcelp_lspvq1[64]= { { 327, 118},{ 919, 111},{ 427, 440},{1327, 185}, { 469, 50},{1272, 91},{ 892, 59},{1771, 193}, { 222, 158},{1100, 127},{ 827, 55},{ 978, 791}, { 665, 47},{ 700,1401},{ 670, 859},{1913,1048}, { 471, 215},{1046, 125},{ 645, 298},{1599, 160}, { 593, 39},{1187, 462},{ 749, 341},{1520, 511}, { 290, 792},{ 909, 362},{ 753, 81},{1111,1058}, { 519, 253},{ 828, 839},{ 685, 541},{1421,1258}, { 386, 130},{ 962, 119},{ 542, 387},{1431, 185}, { 526, 51},{1175, 260},{ 831, 167},{1728, 510}, { 273, 437},{1172, 113},{ 771, 144},{1122, 751}, { 619, 119},{ 492,1276},{ 658, 695},{1882, 615}, { 415, 200},{1018, 88},{ 681, 339},{1436, 325}, { 555, 122},{1042, 485},{ 826, 345},{1374, 743}, { 383,1018},{1005, 358},{ 704, 86},{1301, 586}, { 597, 241},{ 832, 621},{ 555, 573},{1504, 839}}; static const qcelp_vector qcelp_lspvq2[128]= { { 255, 293},{ 904, 219},{ 151,1211},{1447, 498}, { 470, 253},{1559, 177},{1547, 994},{2394, 242}, { 91, 813},{ 857, 590},{ 934,1326},{1889, 282}, { 813, 472},{1057,1494},{ 450,3315},{2163,1895}, { 538, 532},{1399, 218},{ 146,1552},{1755, 626}, { 822, 202},{1299, 663},{ 706,1732},{2656, 401}, { 418, 745},{ 762,1038},{ 583,1748},{1746,1285}, { 527,1169},{1314, 830},{ 556,2116},{1073,2321}, { 297, 570},{ 981, 403},{ 468,1103},{1740, 243}, { 725, 179},{1255, 474},{1374,1362},{1922, 912}, { 285, 947},{ 930, 700},{ 593,1372},{1909, 576}, { 588, 916},{1110,1116},{ 224,2719},{1633,2220}, { 402, 520},{1061, 448},{ 402,1352},{1499, 775}, { 664, 589},{1081, 727},{ 801,2206},{2165,1157}, { 566, 802},{ 911,1116},{ 306,1703},{1792, 836}, { 655, 999},{1061,1038},{ 298,2089},{1110,1753}, { 361, 311},{ 970, 239},{ 265,1231},{1495, 573}, { 566, 262},{1569, 293},{1341,1144},{2271, 544}, { 214, 877},{ 847, 719},{ 794,1384},{2067, 274}, { 703, 688},{1099,1306},{ 391,2947},{2024,1670}, { 471, 525},{1245, 290},{ 264,1557},{1568, 807}, { 718, 399},{1193, 685},{ 883,1594},{2729, 764}, { 500, 754},{ 809,1108},{ 541,1648},{1523,1385}, { 614,1196},{1209, 847},{ 345,2242},{1442,1747}, { 199, 560},{1092, 194},{ 349,1253},{1653, 507}, { 625, 354},{1376, 431},{1187,1465},{2164, 872}, { 360, 974},{1008, 698},{ 704,1346},{2114, 452}, { 720, 816},{1240,1089},{ 439,2475},{1498,2040}, { 336, 718},{1213, 187},{ 451,1450},{1368, 885}, { 592, 578},{1131, 531},{ 861,1855},{1764,1500}, { 444, 970},{ 935, 903},{ 424,1687},{1633,1102}, { 793, 897},{1060, 897},{ 185,2011},{1205,1855}}; static const qcelp_vector qcelp_lspvq3[128]= { { 225, 283},{1296, 355},{ 543, 343},{2073, 274}, { 204,1099},{1562, 523},{1388, 161},{2784, 274}, { 112, 849},{1870, 175},{1189, 160},{1490,1088}, { 969,1115},{ 659,3322},{1158,1073},{3183,1363}, { 517, 223},{1740, 223},{ 704, 387},{2637, 234}, { 692,1005},{1287,1610},{ 952, 532},{2393, 646}, { 490, 552},{1619, 657},{ 845, 670},{1784,2280}, { 191,1775},{ 272,2868},{ 942, 952},{2628,1479}, { 278, 579},{1565, 218},{ 814, 180},{2379, 187}, { 276,1444},{1199,1223},{1200, 349},{3009, 307}, { 312, 844},{1898, 306},{ 863, 470},{1685,1241}, { 513,1727},{ 711,2233},{1085, 864},{3398, 527}, { 414, 440},{1356, 612},{ 964, 147},{2173, 738}, { 465,1292},{ 877,1749},{1104, 689},{2105,1311}, { 580, 864},{1895, 752},{ 652, 609},{1485,1699}, { 514,1400},{ 386,2131},{ 933, 798},{2473, 986}, { 334, 360},{1375, 398},{ 621, 276},{2183, 280}, { 311,1114},{1382, 807},{1284, 175},{2605, 636}, { 230, 816},{1739, 408},{1074, 176},{1619,1120}, { 784,1371},{ 448,3050},{1189, 880},{3039,1165}, { 424, 241},{1672, 186},{ 815, 333},{2432, 324}, { 584,1029},{1137,1546},{1015, 585},{2198, 995}, { 574, 581},{1746, 647},{ 733, 740},{1938,1737}, { 347,1710},{ 373,2429},{ 787,1061},{2439,1438}, { 185, 536},{1489, 178},{ 703, 216},{2178, 487}, { 154,1421},{1414, 994},{1103, 352},{3072, 473}, { 408, 819},{2055, 168},{ 998, 354},{1917,1140}, { 665,1799},{ 993,2213},{1234, 631},{3003, 762}, { 373, 620},{1518, 425},{ 913, 300},{1966, 836}, { 402,1185},{ 948,1385},{1121, 555},{1802,1509}, { 474, 886},{1888, 610},{ 739, 585},{1231,2379}, { 661,1335},{ 205,2211},{ 823, 822},{2480,1179}}; static const qcelp_vector qcelp_lspvq4[64]= { { 348, 311},{ 812,1145},{ 552, 461},{1826, 263}, { 601, 675},{1730, 172},{1523, 193},{2449, 277}, { 334, 668},{ 805,1441},{1319, 207},{1684, 910}, { 582,1318},{1403,1098},{ 979, 832},{2700,1359}, { 624, 228},{1292, 979},{ 800, 195},{2226, 285}, { 730, 862},{1537, 601},{1115, 509},{2720, 354}, { 218,1167},{1212,1538},{1074, 247},{1674,1710}, { 322,2142},{1263, 777},{ 981, 556},{2119,1710}, { 193, 596},{1035, 957},{ 694, 397},{1997, 253}, { 743, 603},{1584, 321},{1346, 346},{2221, 708}, { 451, 732},{1040,1415},{1184, 230},{1853, 919}, { 310,1661},{1625, 706},{ 856, 843},{2902, 702}, { 467, 348},{1108,1048},{ 859, 306},{1964, 463}, { 560,1013},{1425, 533},{1142, 634},{2391, 879}, { 397,1084},{1345,1700},{ 976, 248},{1887,1189}, { 644,2087},{1262, 603},{ 877, 550},{2203,1307}}; static const qcelp_vector qcelp_lspvq5[64]= { { 360, 222},{ 820,1097},{ 601, 319},{1656, 198}, { 604, 513},{1552, 141},{1391, 155},{2474, 261}, { 269, 785},{1463, 646},{1123, 191},{2015, 223}, { 785, 844},{1202,1011},{ 980, 807},{3014, 793}, { 570, 180},{1135,1382},{ 778, 256},{1901, 179}, { 807, 622},{1461, 458},{1231, 178},{2028, 821}, { 387, 927},{1496,1004},{ 888, 392},{2246, 341}, { 295,1462},{1156, 694},{1022, 473},{2226,1364}, { 210, 478},{1029,1020},{ 722, 181},{1730, 251}, { 730, 488},{1465, 293},{1303, 326},{2595, 387}, { 458, 584},{1569, 742},{1029, 173},{1910, 495}, { 605,1159},{1268, 719},{ 973, 646},{2872, 428}, { 443, 334},{ 835,1465},{ 912, 138},{1716, 442}, { 620, 778},{1316, 450},{1186, 335},{1446,1665}, { 486,1050},{1675,1019},{ 880, 278},{2214, 202}, { 539,1564},{1142, 533},{ 984, 391},{2130,1089}}; static const qcelp_vector * const qcelp_lspvq[5] = { qcelp_lspvq1, qcelp_lspvq2, qcelp_lspvq3, qcelp_lspvq4, qcelp_lspvq5 }; /** * the final gain scalefactor before clipping into a usable output float */ #define QCELP_SCALE 8192. /** * table for computing Ga (decoded linear codebook gain magnitude) * * @note The table could fit in int16_t in x*8 form, but it seems * to be slower on x86 * * TIA/EIA/IS-733 2.4.6.2.1-3 */ static const float qcelp_g12ga[61] = { 1.000/QCELP_SCALE, 1.125/QCELP_SCALE, 1.250/QCELP_SCALE, 1.375/QCELP_SCALE, 1.625/QCELP_SCALE, 1.750/QCELP_SCALE, 2.000/QCELP_SCALE, 2.250/QCELP_SCALE, 2.500/QCELP_SCALE, 2.875/QCELP_SCALE, 3.125/QCELP_SCALE, 3.500/QCELP_SCALE, 4.000/QCELP_SCALE, 4.500/QCELP_SCALE, 5.000/QCELP_SCALE, 5.625/QCELP_SCALE, 6.250/QCELP_SCALE, 7.125/QCELP_SCALE, 8.000/QCELP_SCALE, 8.875/QCELP_SCALE, 10.000/QCELP_SCALE, 11.250/QCELP_SCALE, 12.625/QCELP_SCALE, 14.125/QCELP_SCALE, 15.875/QCELP_SCALE, 17.750/QCELP_SCALE, 20.000/QCELP_SCALE, 22.375/QCELP_SCALE, 25.125/QCELP_SCALE, 28.125/QCELP_SCALE, 31.625/QCELP_SCALE, 35.500/QCELP_SCALE, 39.750/QCELP_SCALE, 44.625/QCELP_SCALE, 50.125/QCELP_SCALE, 56.250/QCELP_SCALE, 63.125/QCELP_SCALE, 70.750/QCELP_SCALE, 79.375/QCELP_SCALE, 89.125/QCELP_SCALE, 100.000/QCELP_SCALE, 112.250/QCELP_SCALE, 125.875/QCELP_SCALE, 141.250/QCELP_SCALE, 158.500/QCELP_SCALE, 177.875/QCELP_SCALE, 199.500/QCELP_SCALE, 223.875/QCELP_SCALE, 251.250/QCELP_SCALE, 281.875/QCELP_SCALE, 316.250/QCELP_SCALE, 354.875/QCELP_SCALE, 398.125/QCELP_SCALE, 446.625/QCELP_SCALE, 501.125/QCELP_SCALE, 562.375/QCELP_SCALE, 631.000/QCELP_SCALE, 708.000/QCELP_SCALE, 794.375/QCELP_SCALE, 891.250/QCELP_SCALE, 1000.000/QCELP_SCALE}; /** * circular codebook for rate 1 frames in x*100 form * * TIA/EIA/IS-733 2.4.6.1-2 */ static const int16_t qcelp_rate_full_codebook[128] = { 10, -65, -59, 12, 110, 34, -134, 157, 104, -84, -34, -115, 23, -101, 3, 45, -101, -16, -59, 28, -45, 134, -67, 22, 61, -29, 226, -26, -55, -179, 157, -51, -220, -93, -37, 60, 118, 74, -48, -95, -181, 111, 36, -52, -215, 78, -112, 39, -17, -47, -223, 19, 12, -98, -142, 130, 54, -127, 21, -12, 39, -48, 12, 128, 6, -167, 82, -102, -79, 55, -44, 48, -20, -53, 8, -61, 11, -70, -157, -168, 20, -56, -74, 78, 33, -63, -173, -2, -75, -53, -146, 77, 66, -29, 9, -75, 65, 119, -43, 76, 233, 98, 125, -156, -27, 78, -9, 170, 176, 143, -148, -7, 27, -136, 5, 27, 18, 139, 204, 7, -184, -197, 52, -3, 78, -189, 8, -65 }; #define QCELP_RATE_FULL_CODEBOOK_RATIO .01 /** * circular codebook for rate 1/2 frames in x*2 form * * TIA/EIA/IS-733 2.4.6.1-1 */ static const int8_t qcelp_rate_half_codebook[128] = { 0, -4, 0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 4, 0, 0, 3, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, -3, 3, 0, 0, -2, 0, 3, 0, 0, 0, 0, 0, 0, 0, -5, 0, 0, 0, 0, 3, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 6, -3, -4, 0, -3, -3, 3, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #define QCELP_RATE_HALF_CODEBOOK_RATIO 0.5 /** * sqrt(1.887) is the maximum of the pseudorandom * white sequence used to generate the scaled codebook * vector for bitrate 1/4. * * TIA/EIA/IS-733 2.4.8.1.2 */ #define QCELP_SQRT1887 1.373681186 /** * table for impulse response of BPF used to filter * the white excitation for bitrate 1/4 synthesis * * Only half the tables are needed because of symmetry. * * TIA/EIA/IS-733 2.4.8.1.2-1.1 */ static const double qcelp_rnd_fir_coefs[11] = { -1.344519e-1, 1.735384e-2, -6.905826e-2, 2.434368e-2, -8.210701e-2, 3.041388e-2, -9.251384e-2, 3.501983e-2, -9.918777e-2, 3.749518e-2, 8.985137e-1 }; /** * This spread factor is used, for bitrate 1/8 and I_F_Q, * to force the LSP frequencies to be at least 80 Hz apart. * * TIA/EIA/IS-733 2.4.3.3.2 */ #define QCELP_LSP_SPREAD_FACTOR 0.02 /** * predictor coefficient for the conversion of LSP codes * to LSP frequencies for 1/8 and I_F_Q * * TIA/EIA/IS-733 2.4.3.2.7-2 */ #define QCELP_LSP_OCTAVE_PREDICTOR 29.0/32 /** * initial coefficient to perform bandwidth expansion on LPC * * @note: 0.9883 looks like an approximation of 253/256. * * TIA/EIA/IS-733 2.4.3.3.6 6 */ #define QCELP_BANDWIDTH_EXPANSION_COEFF 0.9883 #endif /* AVCODEC_QCELPDATA_H */
123linslouis-android-video-cutter
jni/libavcodec/qcelpdata.h
C
asf20
20,381
/* * audio conversion * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * audio conversion * @author Michael Niedermayer <michaelni@gmx.at> */ #include "libavutil/avstring.h" #include "libavutil/libm.h" #include "avcodec.h" #include "audioconvert.h" typedef struct SampleFmtInfo { const char *name; int bits; } SampleFmtInfo; /** this table gives more information about formats */ static const SampleFmtInfo sample_fmt_info[SAMPLE_FMT_NB] = { [SAMPLE_FMT_U8] = { .name = "u8", .bits = 8 }, [SAMPLE_FMT_S16] = { .name = "s16", .bits = 16 }, [SAMPLE_FMT_S32] = { .name = "s32", .bits = 32 }, [SAMPLE_FMT_FLT] = { .name = "flt", .bits = 32 }, [SAMPLE_FMT_DBL] = { .name = "dbl", .bits = 64 }, }; const char *avcodec_get_sample_fmt_name(int sample_fmt) { if (sample_fmt < 0 || sample_fmt >= SAMPLE_FMT_NB) return NULL; return sample_fmt_info[sample_fmt].name; } enum SampleFormat avcodec_get_sample_fmt(const char* name) { int i; for (i=0; i < SAMPLE_FMT_NB; i++) if (!strcmp(sample_fmt_info[i].name, name)) return i; return SAMPLE_FMT_NONE; } void avcodec_sample_fmt_string (char *buf, int buf_size, int sample_fmt) { /* print header */ if (sample_fmt < 0) snprintf (buf, buf_size, "name " " depth"); else if (sample_fmt < SAMPLE_FMT_NB) { SampleFmtInfo info= sample_fmt_info[sample_fmt]; snprintf (buf, buf_size, "%-6s" " %2d ", info.name, info.bits); } } static const char* const channel_names[]={ "FL", "FR", "FC", "LFE", "BL", "BR", "FLC", "FRC", "BC", "SL", "SR", "TC", "TFL", "TFC", "TFR", "TBL", "TBC", "TBR", [29] = "DL", [30] = "DR", }; static const char *get_channel_name(int channel_id) { if (channel_id<0 || channel_id>=FF_ARRAY_ELEMS(channel_names)) return NULL; return channel_names[channel_id]; } int64_t avcodec_guess_channel_layout(int nb_channels, enum CodecID codec_id, const char *fmt_name) { switch(nb_channels) { case 1: return CH_LAYOUT_MONO; case 2: return CH_LAYOUT_STEREO; case 3: return CH_LAYOUT_SURROUND; case 4: return CH_LAYOUT_QUAD; case 5: return CH_LAYOUT_5POINT0; case 6: return CH_LAYOUT_5POINT1; case 8: return CH_LAYOUT_7POINT1; default: return 0; } } static const struct { const char *name; int nb_channels; int64_t layout; } channel_layout_map[] = { { "mono", 1, CH_LAYOUT_MONO }, { "stereo", 2, CH_LAYOUT_STEREO }, { "4.0", 4, CH_LAYOUT_4POINT0 }, { "quad", 4, CH_LAYOUT_QUAD }, { "5.0", 5, CH_LAYOUT_5POINT0 }, { "5.0", 5, CH_LAYOUT_5POINT0_BACK }, { "5.1", 6, CH_LAYOUT_5POINT1 }, { "5.1", 6, CH_LAYOUT_5POINT1_BACK }, { "5.1+downmix", 8, CH_LAYOUT_5POINT1|CH_LAYOUT_STEREO_DOWNMIX, }, { "7.1", 8, CH_LAYOUT_7POINT1 }, { "7.1(wide)", 8, CH_LAYOUT_7POINT1_WIDE }, { "7.1+downmix", 10, CH_LAYOUT_7POINT1|CH_LAYOUT_STEREO_DOWNMIX, }, { 0 } }; void avcodec_get_channel_layout_string(char *buf, int buf_size, int nb_channels, int64_t channel_layout) { int i; for (i=0; channel_layout_map[i].name; i++) if (nb_channels == channel_layout_map[i].nb_channels && channel_layout == channel_layout_map[i].layout) { av_strlcpy(buf, channel_layout_map[i].name, buf_size); return; } snprintf(buf, buf_size, "%d channels", nb_channels); if (channel_layout) { int i,ch; av_strlcat(buf, " (", buf_size); for(i=0,ch=0; i<64; i++) { if ((channel_layout & (1L<<i))) { const char *name = get_channel_name(i); if (name) { if (ch>0) av_strlcat(buf, "|", buf_size); av_strlcat(buf, name, buf_size); } ch++; } } av_strlcat(buf, ")", buf_size); } } int avcodec_channel_layout_num_channels(int64_t channel_layout) { int count; uint64_t x = channel_layout; for (count = 0; x; count++) x &= x-1; // unset lowest set bit return count; } struct AVAudioConvert { int in_channels, out_channels; int fmt_pair; }; AVAudioConvert *av_audio_convert_alloc(enum SampleFormat out_fmt, int out_channels, enum SampleFormat in_fmt, int in_channels, const float *matrix, int flags) { AVAudioConvert *ctx; if (in_channels!=out_channels) return NULL; /* FIXME: not supported */ ctx = av_malloc(sizeof(AVAudioConvert)); if (!ctx) return NULL; ctx->in_channels = in_channels; ctx->out_channels = out_channels; ctx->fmt_pair = out_fmt + SAMPLE_FMT_NB*in_fmt; return ctx; } void av_audio_convert_free(AVAudioConvert *ctx) { av_free(ctx); } int av_audio_convert(AVAudioConvert *ctx, void * const out[6], const int out_stride[6], const void * const in[6], const int in_stride[6], int len) { int ch; //FIXME optimize common cases for(ch=0; ch<ctx->out_channels; ch++){ const int is= in_stride[ch]; const int os= out_stride[ch]; const uint8_t *pi= in[ch]; uint8_t *po= out[ch]; uint8_t *end= po + os*len; if(!out[ch]) continue; #define CONV(ofmt, otype, ifmt, expr)\ if(ctx->fmt_pair == ofmt + SAMPLE_FMT_NB*ifmt){\ do{\ *(otype*)po = expr; pi += is; po += os;\ }while(po < end);\ } //FIXME put things below under ifdefs so we do not waste space for cases no codec will need //FIXME rounding ? CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_U8 , *(const uint8_t*)pi) else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)<<8) else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)<<24) else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)*(1.0 / (1<<7))) else CONV(SAMPLE_FMT_DBL, double , SAMPLE_FMT_U8 , (*(const uint8_t*)pi - 0x80)*(1.0 / (1<<7))) else CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_S16, (*(const int16_t*)pi>>8) + 0x80) else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_S16, *(const int16_t*)pi) else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_S16, *(const int16_t*)pi<<16) else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_S16, *(const int16_t*)pi*(1.0 / (1<<15))) else CONV(SAMPLE_FMT_DBL, double , SAMPLE_FMT_S16, *(const int16_t*)pi*(1.0 / (1<<15))) else CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_S32, (*(const int32_t*)pi>>24) + 0x80) else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_S32, *(const int32_t*)pi>>16) else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_S32, *(const int32_t*)pi) else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_S32, *(const int32_t*)pi*(1.0 / (1<<31))) else CONV(SAMPLE_FMT_DBL, double , SAMPLE_FMT_S32, *(const int32_t*)pi*(1.0 / (1<<31))) else CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_FLT, av_clip_uint8( lrintf(*(const float*)pi * (1<<7)) + 0x80)) else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_FLT, av_clip_int16( lrintf(*(const float*)pi * (1<<15)))) else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float*)pi * (1U<<31)))) else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_FLT, *(const float*)pi) else CONV(SAMPLE_FMT_DBL, double , SAMPLE_FMT_FLT, *(const float*)pi) else CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_DBL, av_clip_uint8( lrint(*(const double*)pi * (1<<7)) + 0x80)) else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_DBL, av_clip_int16( lrint(*(const double*)pi * (1<<15)))) else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double*)pi * (1U<<31)))) else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_DBL, *(const double*)pi) else CONV(SAMPLE_FMT_DBL, double , SAMPLE_FMT_DBL, *(const double*)pi) else return -1; } return 0; }
123linslouis-android-video-cutter
jni/libavcodec/audioconvert.c
C
asf20
8,928
/* * AAC coefficients encoder * Copyright (C) 2008-2009 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * AAC coefficients encoder */ /*********************************** * TODOs: * speedup quantizer selection * add sane pulse detection ***********************************/ #include "avcodec.h" #include "put_bits.h" #include "aac.h" #include "aacenc.h" #include "aactab.h" /** bits needed to code codebook run value for long windows */ static const uint8_t run_value_bits_long[64] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 15 }; /** bits needed to code codebook run value for short windows */ static const uint8_t run_value_bits_short[16] = { 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 9 }; static const uint8_t *run_value_bits[2] = { run_value_bits_long, run_value_bits_short }; /** * Quantize one coefficient. * @return absolute value of the quantized coefficient * @see 3GPP TS26.403 5.6.2 "Scalefactor determination" */ static av_always_inline int quant(float coef, const float Q) { float a = coef * Q; return sqrtf(a * sqrtf(a)) + 0.4054; } static void quantize_bands(int (*out)[2], const float *in, const float *scaled, int size, float Q34, int is_signed, int maxval) { int i; double qc; for (i = 0; i < size; i++) { qc = scaled[i] * Q34; out[i][0] = (int)FFMIN(qc, (double)maxval); out[i][1] = (int)FFMIN(qc + 0.4054, (double)maxval); if (is_signed && in[i] < 0.0f) { out[i][0] = -out[i][0]; out[i][1] = -out[i][1]; } } } static void abs_pow34_v(float *out, const float *in, const int size) { #ifndef USE_REALLY_FULL_SEARCH int i; for (i = 0; i < size; i++) { float a = fabsf(in[i]); out[i] = sqrtf(a * sqrtf(a)); } #endif /* USE_REALLY_FULL_SEARCH */ } static const uint8_t aac_cb_range [12] = {0, 3, 3, 3, 3, 9, 9, 8, 8, 13, 13, 17}; static const uint8_t aac_cb_maxval[12] = {0, 1, 1, 2, 2, 4, 4, 7, 7, 12, 12, 16}; /** * Calculate rate distortion cost for quantizing with given codebook * * @return quantization distortion */ static float quantize_and_encode_band_cost(struct AACEncContext *s, PutBitContext *pb, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits) { const float IQ = ff_aac_pow2sf_tab[200 + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; const float Q = ff_aac_pow2sf_tab[200 - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; const float CLIPPED_ESCAPE = 165140.0f*IQ; int i, j, k; float cost = 0; const int dim = cb < FIRST_PAIR_BT ? 4 : 2; int resbits = 0; #ifndef USE_REALLY_FULL_SEARCH const float Q34 = sqrtf(Q * sqrtf(Q)); const int range = aac_cb_range[cb]; const int maxval = aac_cb_maxval[cb]; int offs[4]; #endif /* USE_REALLY_FULL_SEARCH */ if (!cb) { for (i = 0; i < size; i++) cost += in[i]*in[i]; if (bits) *bits = 0; return cost * lambda; } #ifndef USE_REALLY_FULL_SEARCH offs[0] = 1; for (i = 1; i < dim; i++) offs[i] = offs[i-1]*range; if (!scaled) { abs_pow34_v(s->scoefs, in, size); scaled = s->scoefs; } quantize_bands(s->qcoefs, in, scaled, size, Q34, !IS_CODEBOOK_UNSIGNED(cb), maxval); #endif /* USE_REALLY_FULL_SEARCH */ for (i = 0; i < size; i += dim) { float mincost; int minidx = 0; int minbits = 0; const float *vec; #ifndef USE_REALLY_FULL_SEARCH int (*quants)[2] = &s->qcoefs[i]; mincost = 0.0f; for (j = 0; j < dim; j++) mincost += in[i+j]*in[i+j]; minidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40; minbits = ff_aac_spectral_bits[cb-1][minidx]; mincost = mincost * lambda + minbits; for (j = 0; j < (1<<dim); j++) { float rd = 0.0f; int curbits; int curidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40; int same = 0; for (k = 0; k < dim; k++) { if ((j & (1 << k)) && quants[k][0] == quants[k][1]) { same = 1; break; } } if (same) continue; for (k = 0; k < dim; k++) curidx += quants[k][!!(j & (1 << k))] * offs[dim - 1 - k]; curbits = ff_aac_spectral_bits[cb-1][curidx]; vec = &ff_aac_codebook_vectors[cb-1][curidx*dim]; #else mincost = INFINITY; vec = ff_aac_codebook_vectors[cb-1]; for (j = 0; j < ff_aac_spectral_sizes[cb-1]; j++, vec += dim) { float rd = 0.0f; int curbits = ff_aac_spectral_bits[cb-1][j]; int curidx = j; #endif /* USE_REALLY_FULL_SEARCH */ if (IS_CODEBOOK_UNSIGNED(cb)) { for (k = 0; k < dim; k++) { float t = fabsf(in[i+k]); float di; if (vec[k] == 64.0f) { //FIXME: slow //do not code with escape sequence small values if (t < 39.0f*IQ) { rd = INFINITY; break; } if (t >= CLIPPED_ESCAPE) { di = t - CLIPPED_ESCAPE; curbits += 21; } else { int c = av_clip(quant(t, Q), 0, 8191); di = t - c*cbrtf(c)*IQ; curbits += av_log2(c)*2 - 4 + 1; } } else { di = t - vec[k]*IQ; } if (vec[k] != 0.0f) curbits++; rd += di*di; } } else { for (k = 0; k < dim; k++) { float di = in[i+k] - vec[k]*IQ; rd += di*di; } } rd = rd * lambda + curbits; if (rd < mincost) { mincost = rd; minidx = curidx; minbits = curbits; } } cost += mincost; resbits += minbits; if (cost >= uplim) return uplim; if (pb) { put_bits(pb, ff_aac_spectral_bits[cb-1][minidx], ff_aac_spectral_codes[cb-1][minidx]); if (IS_CODEBOOK_UNSIGNED(cb)) for (j = 0; j < dim; j++) if (ff_aac_codebook_vectors[cb-1][minidx*dim+j] != 0.0f) put_bits(pb, 1, in[i+j] < 0.0f); if (cb == ESC_BT) { for (j = 0; j < 2; j++) { if (ff_aac_codebook_vectors[cb-1][minidx*2+j] == 64.0f) { int coef = av_clip(quant(fabsf(in[i+j]), Q), 0, 8191); int len = av_log2(coef); put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2); put_bits(pb, len, coef & ((1 << len) - 1)); } } } } } if (bits) *bits = resbits; return cost; } static float quantize_band_cost(struct AACEncContext *s, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits) { return quantize_and_encode_band_cost(s, NULL, in, scaled, size, scale_idx, cb, lambda, uplim, bits); } static void quantize_and_encode_band(struct AACEncContext *s, PutBitContext *pb, const float *in, int size, int scale_idx, int cb, const float lambda) { quantize_and_encode_band_cost(s, pb, in, NULL, size, scale_idx, cb, lambda, INFINITY, NULL); } /** * structure used in optimal codebook search */ typedef struct BandCodingPath { int prev_idx; ///< pointer to the previous path point float cost; ///< path cost int run; } BandCodingPath; /** * Encode band info for single window group bands. */ static void encode_window_bands_info(AACEncContext *s, SingleChannelElement *sce, int win, int group_len, const float lambda) { BandCodingPath path[120][12]; int w, swb, cb, start, start2, size; int i, j; const int max_sfb = sce->ics.max_sfb; const int run_bits = sce->ics.num_windows == 1 ? 5 : 3; const int run_esc = (1 << run_bits) - 1; int idx, ppos, count; int stackrun[120], stackcb[120], stack_len; float next_minrd = INFINITY; int next_mincb = 0; abs_pow34_v(s->scoefs, sce->coeffs, 1024); start = win*128; for (cb = 0; cb < 12; cb++) { path[0][cb].cost = 0.0f; path[0][cb].prev_idx = -1; path[0][cb].run = 0; } for (swb = 0; swb < max_sfb; swb++) { start2 = start; size = sce->ics.swb_sizes[swb]; if (sce->zeroes[win*16 + swb]) { for (cb = 0; cb < 12; cb++) { path[swb+1][cb].prev_idx = cb; path[swb+1][cb].cost = path[swb][cb].cost; path[swb+1][cb].run = path[swb][cb].run + 1; } } else { float minrd = next_minrd; int mincb = next_mincb; next_minrd = INFINITY; next_mincb = 0; for (cb = 0; cb < 12; cb++) { float cost_stay_here, cost_get_here; float rd = 0.0f; for (w = 0; w < group_len; w++) { FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(win+w)*16+swb]; rd += quantize_band_cost(s, sce->coeffs + start + w*128, s->scoefs + start + w*128, size, sce->sf_idx[(win+w)*16+swb], cb, lambda / band->threshold, INFINITY, NULL); } cost_stay_here = path[swb][cb].cost + rd; cost_get_here = minrd + rd + run_bits + 4; if ( run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run] != run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run+1]) cost_stay_here += run_bits; if (cost_get_here < cost_stay_here) { path[swb+1][cb].prev_idx = mincb; path[swb+1][cb].cost = cost_get_here; path[swb+1][cb].run = 1; } else { path[swb+1][cb].prev_idx = cb; path[swb+1][cb].cost = cost_stay_here; path[swb+1][cb].run = path[swb][cb].run + 1; } if (path[swb+1][cb].cost < next_minrd) { next_minrd = path[swb+1][cb].cost; next_mincb = cb; } } } start += sce->ics.swb_sizes[swb]; } //convert resulting path from backward-linked list stack_len = 0; idx = 0; for (cb = 1; cb < 12; cb++) if (path[max_sfb][cb].cost < path[max_sfb][idx].cost) idx = cb; ppos = max_sfb; while (ppos > 0) { cb = idx; stackrun[stack_len] = path[ppos][cb].run; stackcb [stack_len] = cb; idx = path[ppos-path[ppos][cb].run+1][cb].prev_idx; ppos -= path[ppos][cb].run; stack_len++; } //perform actual band info encoding start = 0; for (i = stack_len - 1; i >= 0; i--) { put_bits(&s->pb, 4, stackcb[i]); count = stackrun[i]; memset(sce->zeroes + win*16 + start, !stackcb[i], count); //XXX: memset when band_type is also uint8_t for (j = 0; j < count; j++) { sce->band_type[win*16 + start] = stackcb[i]; start++; } while (count >= run_esc) { put_bits(&s->pb, run_bits, run_esc); count -= run_esc; } put_bits(&s->pb, run_bits, count); } } typedef struct TrellisPath { float cost; int prev; int min_val; int max_val; } TrellisPath; #define TRELLIS_STAGES 121 #define TRELLIS_STATES 256 static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int q, w, w2, g, start = 0; int i, j; int idx; TrellisPath paths[TRELLIS_STAGES][TRELLIS_STATES]; int bandaddr[TRELLIS_STAGES]; int minq; float mincost; for (i = 0; i < TRELLIS_STATES; i++) { paths[0][i].cost = 0.0f; paths[0][i].prev = -1; paths[0][i].min_val = i; paths[0][i].max_val = i; } for (j = 1; j < TRELLIS_STAGES; j++) { for (i = 0; i < TRELLIS_STATES; i++) { paths[j][i].cost = INFINITY; paths[j][i].prev = -2; paths[j][i].min_val = INT_MAX; paths[j][i].max_val = 0; } } idx = 1; abs_pow34_v(s->scoefs, sce->coeffs, 1024); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = sce->coeffs + start; float qmin, qmax; int nz = 0; bandaddr[idx] = w * 16 + g; qmin = INT_MAX; qmax = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; continue; } sce->zeroes[(w+w2)*16+g] = 0; nz = 1; for (i = 0; i < sce->ics.swb_sizes[g]; i++) { float t = fabsf(coefs[w2*128+i]); if (t > 0.0f) qmin = FFMIN(qmin, t); qmax = FFMAX(qmax, t); } } if (nz) { int minscale, maxscale; float minrd = INFINITY; //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped minscale = av_clip_uint8(log2(qmin)*4 - 69 + SCALE_ONE_POS - SCALE_DIV_512); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero maxscale = av_clip_uint8(log2(qmax)*4 + 6 + SCALE_ONE_POS - SCALE_DIV_512); for (q = minscale; q < maxscale; q++) { float dists[12], dist; memset(dists, 0, sizeof(dists)); for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; int cb; for (cb = 0; cb <= ESC_BT; cb++) dists[cb] += quantize_band_cost(s, coefs + w2*128, s->scoefs + start + w2*128, sce->ics.swb_sizes[g], q, cb, lambda / band->threshold, INFINITY, NULL); } dist = dists[0]; for (i = 1; i <= ESC_BT; i++) dist = FFMIN(dist, dists[i]); minrd = FFMIN(minrd, dist); for (i = FFMAX(q - SCALE_MAX_DIFF, 0); i < FFMIN(q + SCALE_MAX_DIFF, TRELLIS_STATES); i++) { float cost; int minv, maxv; if (isinf(paths[idx - 1][i].cost)) continue; cost = paths[idx - 1][i].cost + dist + ff_aac_scalefactor_bits[q - i + SCALE_DIFF_ZERO]; minv = FFMIN(paths[idx - 1][i].min_val, q); maxv = FFMAX(paths[idx - 1][i].max_val, q); if (cost < paths[idx][q].cost && maxv-minv < SCALE_MAX_DIFF) { paths[idx][q].cost = cost; paths[idx][q].prev = i; paths[idx][q].min_val = minv; paths[idx][q].max_val = maxv; } } } } else { for (q = 0; q < TRELLIS_STATES; q++) { if (!isinf(paths[idx - 1][q].cost)) { paths[idx][q].cost = paths[idx - 1][q].cost + 1; paths[idx][q].prev = q; paths[idx][q].min_val = FFMIN(paths[idx - 1][q].min_val, q); paths[idx][q].max_val = FFMAX(paths[idx - 1][q].max_val, q); continue; } for (i = FFMAX(q - SCALE_MAX_DIFF, 0); i < FFMIN(q + SCALE_MAX_DIFF, TRELLIS_STATES); i++) { float cost; int minv, maxv; if (isinf(paths[idx - 1][i].cost)) continue; cost = paths[idx - 1][i].cost + ff_aac_scalefactor_bits[q - i + SCALE_DIFF_ZERO]; minv = FFMIN(paths[idx - 1][i].min_val, q); maxv = FFMAX(paths[idx - 1][i].max_val, q); if (cost < paths[idx][q].cost && maxv-minv < SCALE_MAX_DIFF) { paths[idx][q].cost = cost; paths[idx][q].prev = i; paths[idx][q].min_val = minv; paths[idx][q].max_val = maxv; } } } } sce->zeroes[w*16+g] = !nz; start += sce->ics.swb_sizes[g]; idx++; } } idx--; mincost = paths[idx][0].cost; minq = 0; for (i = 1; i < TRELLIS_STATES; i++) { if (paths[idx][i].cost < mincost) { mincost = paths[idx][i].cost; minq = i; } } while (idx) { sce->sf_idx[bandaddr[idx]] = minq; minq = paths[idx][minq].prev; idx--; } //set the same quantizers inside window groups for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) for (g = 0; g < sce->ics.num_swb; g++) for (w2 = 1; w2 < sce->ics.group_len[w]; w2++) sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g]; } /** * two-loop quantizers search taken from ISO 13818-7 Appendix C */ static void search_for_quantizers_twoloop(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int start = 0, i, w, w2, g; int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels; float dists[128], uplims[128]; int fflag, minscaler; int its = 0; int allz = 0; float minthr = INFINITY; //XXX: some heuristic to determine initial quantizers will reduce search time memset(dists, 0, sizeof(dists)); //determine zero bands and upper limits for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { for (g = 0; g < sce->ics.num_swb; g++) { int nz = 0; float uplim = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; uplim += band->threshold; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; continue; } nz = 1; } uplims[w*16+g] = uplim *512; sce->zeroes[w*16+g] = !nz; if (nz) minthr = FFMIN(minthr, uplim); allz = FFMAX(allz, nz); } } for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { for (g = 0; g < sce->ics.num_swb; g++) { if (sce->zeroes[w*16+g]) { sce->sf_idx[w*16+g] = SCALE_ONE_POS; continue; } sce->sf_idx[w*16+g] = SCALE_ONE_POS + FFMIN(log2(uplims[w*16+g]/minthr)*4,59); } } if (!allz) return; abs_pow34_v(s->scoefs, sce->coeffs, 1024); //perform two-loop search //outer loop - improve quality do { int tbits, qstep; minscaler = sce->sf_idx[0]; //inner loop - quantize spectrum to fit into given number of bits qstep = its ? 1 : 32; do { int prev = -1; tbits = 0; fflag = 0; for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = sce->coeffs + start; const float *scaled = s->scoefs + start; int bits = 0; int cb; float mindist = INFINITY; int minbits = 0; if (sce->zeroes[w*16+g] || sce->sf_idx[w*16+g] >= 218) { start += sce->ics.swb_sizes[g]; continue; } minscaler = FFMIN(minscaler, sce->sf_idx[w*16+g]); for (cb = 0; cb <= ESC_BT; cb++) { float dist = 0.0f; int bb = 0; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { int b; dist += quantize_band_cost(s, coefs + w2*128, scaled + w2*128, sce->ics.swb_sizes[g], sce->sf_idx[w*16+g], cb, lambda, INFINITY, &b); bb += b; } if (dist < mindist) { mindist = dist; minbits = bb; } } dists[w*16+g] = (mindist - minbits) / lambda; bits = minbits; if (prev != -1) { bits += ff_aac_scalefactor_bits[sce->sf_idx[w*16+g] - prev + SCALE_DIFF_ZERO]; } tbits += bits; start += sce->ics.swb_sizes[g]; prev = sce->sf_idx[w*16+g]; } } if (tbits > destbits) { for (i = 0; i < 128; i++) if (sce->sf_idx[i] < 218 - qstep) sce->sf_idx[i] += qstep; } else { for (i = 0; i < 128; i++) if (sce->sf_idx[i] > 60 - qstep) sce->sf_idx[i] -= qstep; } qstep >>= 1; if (!qstep && tbits > destbits*1.02) qstep = 1; if (sce->sf_idx[0] >= 217) break; } while (qstep); fflag = 0; minscaler = av_clip(minscaler, 60, 255 - SCALE_MAX_DIFF); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { int prevsc = sce->sf_idx[w*16+g]; if (dists[w*16+g] > uplims[w*16+g] && sce->sf_idx[w*16+g] > 60) sce->sf_idx[w*16+g]--; sce->sf_idx[w*16+g] = av_clip(sce->sf_idx[w*16+g], minscaler, minscaler + SCALE_MAX_DIFF); sce->sf_idx[w*16+g] = FFMIN(sce->sf_idx[w*16+g], 219); if (sce->sf_idx[w*16+g] != prevsc) fflag = 1; } } its++; } while (fflag && its < 10); } static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int start = 0, i, w, w2, g; float uplim[128], maxq[128]; int minq, maxsf; float distfact = ((sce->ics.num_windows > 1) ? 85.80 : 147.84) / lambda; int last = 0, lastband = 0, curband = 0; float avg_energy = 0.0; if (sce->ics.num_windows == 1) { start = 0; for (i = 0; i < 1024; i++) { if (i - start >= sce->ics.swb_sizes[curband]) { start += sce->ics.swb_sizes[curband]; curband++; } if (sce->coeffs[i]) { avg_energy += sce->coeffs[i] * sce->coeffs[i]; last = i; lastband = curband; } } } else { for (w = 0; w < 8; w++) { const float *coeffs = sce->coeffs + w*128; start = 0; for (i = 0; i < 128; i++) { if (i - start >= sce->ics.swb_sizes[curband]) { start += sce->ics.swb_sizes[curband]; curband++; } if (coeffs[i]) { avg_energy += coeffs[i] * coeffs[i]; last = FFMAX(last, i); lastband = FFMAX(lastband, curband); } } } } last++; avg_energy /= last; if (avg_energy == 0.0f) { for (i = 0; i < FF_ARRAY_ELEMS(sce->sf_idx); i++) sce->sf_idx[i] = SCALE_ONE_POS; return; } for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { float *coefs = sce->coeffs + start; const int size = sce->ics.swb_sizes[g]; int start2 = start, end2 = start + size, peakpos = start; float maxval = -1, thr = 0.0f, t; maxq[w*16+g] = 0.0f; if (g > lastband) { maxq[w*16+g] = 0.0f; start += size; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) memset(coefs + w2*128, 0, sizeof(coefs[0])*size); continue; } for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { for (i = 0; i < size; i++) { float t = coefs[w2*128+i]*coefs[w2*128+i]; maxq[w*16+g] = FFMAX(maxq[w*16+g], fabsf(coefs[w2*128 + i])); thr += t; if (sce->ics.num_windows == 1 && maxval < t) { maxval = t; peakpos = start+i; } } } if (sce->ics.num_windows == 1) { start2 = FFMAX(peakpos - 2, start2); end2 = FFMIN(peakpos + 3, end2); } else { start2 -= start; end2 -= start; } start += size; thr = pow(thr / (avg_energy * (end2 - start2)), 0.3 + 0.1*(lastband - g) / lastband); t = 1.0 - (1.0 * start2 / last); uplim[w*16+g] = distfact / (1.4 * thr + t*t*t + 0.075); } } memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); abs_pow34_v(s->scoefs, sce->coeffs, 1024); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = sce->coeffs + start; const float *scaled = s->scoefs + start; const int size = sce->ics.swb_sizes[g]; int scf, prev_scf, step; int min_scf = -1, max_scf = 256; float curdiff; if (maxq[w*16+g] < 21.544) { sce->zeroes[w*16+g] = 1; start += size; continue; } sce->zeroes[w*16+g] = 0; scf = prev_scf = av_clip(SCALE_ONE_POS - SCALE_DIV_512 - log2(1/maxq[w*16+g])*16/3, 60, 218); step = 16; for (;;) { float dist = 0.0f; int quant_max; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { int b; dist += quantize_band_cost(s, coefs + w2*128, scaled + w2*128, sce->ics.swb_sizes[g], scf, ESC_BT, lambda, INFINITY, &b); dist -= b; } dist *= 1.0f / 512.0f / lambda; quant_max = quant(maxq[w*16+g], ff_aac_pow2sf_tab[200 - scf + SCALE_ONE_POS - SCALE_DIV_512]); if (quant_max >= 8191) { // too much, return to the previous quantizer sce->sf_idx[w*16+g] = prev_scf; break; } prev_scf = scf; curdiff = fabsf(dist - uplim[w*16+g]); if (curdiff <= 1.0f) step = 0; else step = log2(curdiff); if (dist > uplim[w*16+g]) step = -step; scf += step; scf = av_clip_uint8(scf); step = scf - prev_scf; if (FFABS(step) <= 1 || (step > 0 && scf >= max_scf) || (step < 0 && scf <= min_scf)) { sce->sf_idx[w*16+g] = av_clip(scf, min_scf, max_scf); break; } if (step > 0) min_scf = prev_scf; else max_scf = prev_scf; } start += size; } } minq = sce->sf_idx[0] ? sce->sf_idx[0] : INT_MAX; for (i = 1; i < 128; i++) { if (!sce->sf_idx[i]) sce->sf_idx[i] = sce->sf_idx[i-1]; else minq = FFMIN(minq, sce->sf_idx[i]); } if (minq == INT_MAX) minq = 0; minq = FFMIN(minq, SCALE_MAX_POS); maxsf = FFMIN(minq + SCALE_MAX_DIFF, SCALE_MAX_POS); for (i = 126; i >= 0; i--) { if (!sce->sf_idx[i]) sce->sf_idx[i] = sce->sf_idx[i+1]; sce->sf_idx[i] = av_clip(sce->sf_idx[i], minq, maxsf); } } static void search_for_quantizers_fast(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int start = 0, i, w, w2, g; int minq = 255; memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; if (band->energy <= band->threshold) { sce->sf_idx[(w+w2)*16+g] = 218; sce->zeroes[(w+w2)*16+g] = 1; } else { sce->sf_idx[(w+w2)*16+g] = av_clip(SCALE_ONE_POS - SCALE_DIV_512 + log2(band->threshold), 80, 218); sce->zeroes[(w+w2)*16+g] = 0; } minq = FFMIN(minq, sce->sf_idx[(w+w2)*16+g]); } } } for (i = 0; i < 128; i++) { sce->sf_idx[i] = 140; //av_clip(sce->sf_idx[i], minq, minq + SCALE_MAX_DIFF - 1); } //set the same quantizers inside window groups for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) for (g = 0; g < sce->ics.num_swb; g++) for (w2 = 1; w2 < sce->ics.group_len[w]; w2++) sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g]; } static void search_for_ms(AACEncContext *s, ChannelElement *cpe, const float lambda) { int start = 0, i, w, w2, g; float M[128], S[128]; float *L34 = s->scoefs, *R34 = s->scoefs + 128, *M34 = s->scoefs + 128*2, *S34 = s->scoefs + 128*3; SingleChannelElement *sce0 = &cpe->ch[0]; SingleChannelElement *sce1 = &cpe->ch[1]; if (!cpe->common_window) return; for (w = 0; w < sce0->ics.num_windows; w += sce0->ics.group_len[w]) { for (g = 0; g < sce0->ics.num_swb; g++) { if (!cpe->ch[0].zeroes[w*16+g] && !cpe->ch[1].zeroes[w*16+g]) { float dist1 = 0.0f, dist2 = 0.0f; for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) { FFPsyBand *band0 = &s->psy.psy_bands[(s->cur_channel+0)*PSY_MAX_BANDS+(w+w2)*16+g]; FFPsyBand *band1 = &s->psy.psy_bands[(s->cur_channel+1)*PSY_MAX_BANDS+(w+w2)*16+g]; float minthr = FFMIN(band0->threshold, band1->threshold); float maxthr = FFMAX(band0->threshold, band1->threshold); for (i = 0; i < sce0->ics.swb_sizes[g]; i++) { M[i] = (sce0->coeffs[start+w2*128+i] + sce1->coeffs[start+w2*128+i]) * 0.5; S[i] = sce0->coeffs[start+w2*128+i] - sce1->coeffs[start+w2*128+i]; } abs_pow34_v(L34, sce0->coeffs+start+w2*128, sce0->ics.swb_sizes[g]); abs_pow34_v(R34, sce1->coeffs+start+w2*128, sce0->ics.swb_sizes[g]); abs_pow34_v(M34, M, sce0->ics.swb_sizes[g]); abs_pow34_v(S34, S, sce0->ics.swb_sizes[g]); dist1 += quantize_band_cost(s, sce0->coeffs + start + w2*128, L34, sce0->ics.swb_sizes[g], sce0->sf_idx[(w+w2)*16+g], sce0->band_type[(w+w2)*16+g], lambda / band0->threshold, INFINITY, NULL); dist1 += quantize_band_cost(s, sce1->coeffs + start + w2*128, R34, sce1->ics.swb_sizes[g], sce1->sf_idx[(w+w2)*16+g], sce1->band_type[(w+w2)*16+g], lambda / band1->threshold, INFINITY, NULL); dist2 += quantize_band_cost(s, M, M34, sce0->ics.swb_sizes[g], sce0->sf_idx[(w+w2)*16+g], sce0->band_type[(w+w2)*16+g], lambda / maxthr, INFINITY, NULL); dist2 += quantize_band_cost(s, S, S34, sce1->ics.swb_sizes[g], sce1->sf_idx[(w+w2)*16+g], sce1->band_type[(w+w2)*16+g], lambda / minthr, INFINITY, NULL); } cpe->ms_mask[w*16+g] = dist2 < dist1; } start += sce0->ics.swb_sizes[g]; } } } AACCoefficientsEncoder ff_aac_coders[] = { { search_for_quantizers_faac, encode_window_bands_info, quantize_and_encode_band, search_for_ms, }, { search_for_quantizers_anmr, encode_window_bands_info, quantize_and_encode_band, search_for_ms, }, { search_for_quantizers_twoloop, encode_window_bands_info, quantize_and_encode_band, search_for_ms, }, { search_for_quantizers_fast, encode_window_bands_info, quantize_and_encode_band, search_for_ms, }, };
123linslouis-android-video-cutter
jni/libavcodec/aaccoder.c
C
asf20
37,998
/* * internals for BMP codecs * Copyright (c) 2005 Mans Rullgard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_BMP_H #define AVCODEC_BMP_H #include "avcodec.h" typedef struct BMPContext { AVFrame picture; } BMPContext; typedef enum { BMP_RGB =0, BMP_RLE8 =1, BMP_RLE4 =2, BMP_BITFIELDS =3, } BiCompression; #endif /* AVCODEC_BMP_H */
123linslouis-android-video-cutter
jni/libavcodec/bmp.h
C
asf20
1,118
/* * LCL (LossLess Codec Library) Codec * Copyright (c) 2002-2004 Roberto Togni * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * LCL (LossLess Codec Library) Video Codec * Decoder for MSZH and ZLIB codecs * Experimental encoder for ZLIB RGB24 * * Fourcc: MSZH, ZLIB * * Original Win32 dll: * Ver2.23 By Kenji Oshima 2000.09.20 * avimszh.dll, avizlib.dll * * A description of the decoding algorithm can be found here: * http://www.pcisys.net/~melanson/codecs * * Supports: BGR24 (RGB 24bpp) * */ #include <stdio.h> #include <stdlib.h> #include "avcodec.h" #include "lcl.h" #include <zlib.h> /* * Decoder context */ typedef struct LclEncContext { AVCodecContext *avctx; AVFrame pic; // Image type int imgtype; // Compression type int compression; // Flags int flags; z_stream zstream; } LclEncContext; /* * * Encode a frame * */ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ LclEncContext *c = avctx->priv_data; AVFrame *pict = data; AVFrame * const p = &c->pic; int i; int zret; // Zlib return code *p = *pict; p->pict_type= FF_I_TYPE; p->key_frame= 1; if(avctx->pix_fmt != PIX_FMT_BGR24){ av_log(avctx, AV_LOG_ERROR, "Format not supported!\n"); return -1; } zret = deflateReset(&c->zstream); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Deflate reset error: %d\n", zret); return -1; } c->zstream.next_out = buf; c->zstream.avail_out = buf_size; for(i = avctx->height - 1; i >= 0; i--) { c->zstream.next_in = p->data[0]+p->linesize[0]*i; c->zstream.avail_in = avctx->width*3; zret = deflate(&c->zstream, Z_NO_FLUSH); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Deflate error: %d\n", zret); return -1; } } zret = deflate(&c->zstream, Z_FINISH); if (zret != Z_STREAM_END) { av_log(avctx, AV_LOG_ERROR, "Deflate error: %d\n", zret); return -1; } return c->zstream.total_out; } /* * * Init lcl encoder * */ static av_cold int encode_init(AVCodecContext *avctx) { LclEncContext *c = avctx->priv_data; int zret; // Zlib return code c->avctx= avctx; assert(avctx->width && avctx->height); avctx->extradata= av_mallocz(8); avctx->coded_frame= &c->pic; // Will be user settable someday c->compression = 6; c->flags = 0; switch(avctx->pix_fmt){ case PIX_FMT_BGR24: c->imgtype = IMGTYPE_RGB24; avctx->bits_per_coded_sample= 24; break; default: av_log(avctx, AV_LOG_ERROR, "Input pixel format %s not supported\n", avcodec_get_pix_fmt_name(avctx->pix_fmt)); return -1; } avctx->extradata[0]= 4; avctx->extradata[1]= 0; avctx->extradata[2]= 0; avctx->extradata[3]= 0; avctx->extradata[4]= c->imgtype; avctx->extradata[5]= c->compression; avctx->extradata[6]= c->flags; avctx->extradata[7]= CODEC_ZLIB; c->avctx->extradata_size= 8; c->zstream.zalloc = Z_NULL; c->zstream.zfree = Z_NULL; c->zstream.opaque = Z_NULL; zret = deflateInit(&c->zstream, c->compression); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Deflate init error: %d\n", zret); return 1; } return 0; } /* * * Uninit lcl encoder * */ static av_cold int encode_end(AVCodecContext *avctx) { LclEncContext *c = avctx->priv_data; av_freep(&avctx->extradata); deflateEnd(&c->zstream); return 0; } AVCodec zlib_encoder = { "zlib", AVMEDIA_TYPE_VIDEO, CODEC_ID_ZLIB, sizeof(LclEncContext), encode_init, encode_frame, encode_end, .long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) ZLIB"), };
123linslouis-android-video-cutter
jni/libavcodec/lclenc.c
C
asf20
4,575
/* * S3 Texture Compression (S3TC) decoding functions * Copyright (c) 2007 by Ivo van Poorten * * see also: http://wiki.multimedia.cx/index.php?title=S3TC * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "s3tc.h" static inline void dxt1_decode_pixels(const uint8_t *s, uint32_t *d, unsigned int qstride, unsigned int flag, uint64_t alpha) { unsigned int x, y, c0, c1, a = (!flag * 255) << 24; unsigned int rb0, rb1, rb2, rb3, g0, g1, g2, g3; uint32_t colors[4], pixels; c0 = AV_RL16(s); c1 = AV_RL16(s+2); rb0 = (c0<<3 | c0<<8) & 0xf800f8; rb1 = (c1<<3 | c1<<8) & 0xf800f8; rb0 += (rb0>>5) & 0x070007; rb1 += (rb1>>5) & 0x070007; g0 = (c0 <<5) & 0x00fc00; g1 = (c1 <<5) & 0x00fc00; g0 += (g0 >>6) & 0x000300; g1 += (g1 >>6) & 0x000300; colors[0] = rb0 + g0 + a; colors[1] = rb1 + g1 + a; if (c0 > c1 || flag) { rb2 = (((2*rb0+rb1) * 21) >> 6) & 0xff00ff; rb3 = (((2*rb1+rb0) * 21) >> 6) & 0xff00ff; g2 = (((2*g0 +g1 ) * 21) >> 6) & 0x00ff00; g3 = (((2*g1 +g0 ) * 21) >> 6) & 0x00ff00; colors[3] = rb3 + g3 + a; } else { rb2 = ((rb0+rb1) >> 1) & 0xff00ff; g2 = ((g0 +g1 ) >> 1) & 0x00ff00; colors[3] = 0; } colors[2] = rb2 + g2 + a; pixels = AV_RL32(s+4); for (y=0; y<4; y++) { for (x=0; x<4; x++) { a = (alpha & 0x0f) << 28; a += a >> 4; d[x] = a + colors[pixels&3]; pixels >>= 2; alpha >>= 4; } d += qstride; } } void ff_decode_dxt1(const uint8_t *s, uint8_t *dst, const unsigned int w, const unsigned int h, const unsigned int stride) { unsigned int bx, by, qstride = stride/4; uint32_t *d = (uint32_t *) dst; for (by=0; by < h/4; by++, d += stride-w) for (bx=0; bx < w/4; bx++, s+=8, d+=4) dxt1_decode_pixels(s, d, qstride, 0, 0LL); } void ff_decode_dxt3(const uint8_t *s, uint8_t *dst, const unsigned int w, const unsigned int h, const unsigned int stride) { unsigned int bx, by, qstride = stride/4; uint32_t *d = (uint32_t *) dst; for (by=0; by < h/4; by++, d += stride-w) for (bx=0; bx < w/4; bx++, s+=16, d+=4) dxt1_decode_pixels(s+8, d, qstride, 1, AV_RL64(s)); }
123linslouis-android-video-cutter
jni/libavcodec/s3tc.c
C
asf20
3,295
/* * copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_AVCODEC_H #define AVCODEC_AVCODEC_H /** * @file * external API header */ #include <errno.h> #include "libavutil/avutil.h" #define LIBAVCODEC_VERSION_MAJOR 52 #define LIBAVCODEC_VERSION_MINOR 72 #define LIBAVCODEC_VERSION_MICRO 2 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ LIBAVCODEC_VERSION_MINOR, \ LIBAVCODEC_VERSION_MICRO) #define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ LIBAVCODEC_VERSION_MINOR, \ LIBAVCODEC_VERSION_MICRO) #define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT #define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) #define AV_NOPTS_VALUE INT64_C(0x8000000000000000) #define AV_TIME_BASE 1000000 #define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE} /** * Identifies the syntax and semantics of the bitstream. * The principle is roughly: * Two decoders with the same ID can decode the same streams. * Two encoders with the same ID can encode compatible streams. * There may be slight deviations from the principle due to implementation * details. * * If you add a codec ID to this list, add it so that * 1. no value of a existing codec ID changes (that would break ABI), * 2. it is as close as possible to similar codecs. */ enum CodecID { CODEC_ID_NONE, /* video codecs */ CODEC_ID_MPEG1VIDEO, CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding CODEC_ID_MPEG2VIDEO_XVMC, CODEC_ID_H261, CODEC_ID_H263, CODEC_ID_RV10, CODEC_ID_RV20, CODEC_ID_MJPEG, CODEC_ID_MJPEGB, CODEC_ID_LJPEG, CODEC_ID_SP5X, CODEC_ID_JPEGLS, CODEC_ID_MPEG4, CODEC_ID_RAWVIDEO, CODEC_ID_MSMPEG4V1, CODEC_ID_MSMPEG4V2, CODEC_ID_MSMPEG4V3, CODEC_ID_WMV1, CODEC_ID_WMV2, CODEC_ID_H263P, CODEC_ID_H263I, CODEC_ID_FLV1, CODEC_ID_SVQ1, CODEC_ID_SVQ3, CODEC_ID_DVVIDEO, CODEC_ID_HUFFYUV, CODEC_ID_CYUV, CODEC_ID_H264, CODEC_ID_INDEO3, CODEC_ID_VP3, CODEC_ID_THEORA, CODEC_ID_ASV1, CODEC_ID_ASV2, CODEC_ID_FFV1, CODEC_ID_4XM, CODEC_ID_VCR1, CODEC_ID_CLJR, CODEC_ID_MDEC, CODEC_ID_ROQ, CODEC_ID_INTERPLAY_VIDEO, CODEC_ID_XAN_WC3, CODEC_ID_XAN_WC4, CODEC_ID_RPZA, CODEC_ID_CINEPAK, CODEC_ID_WS_VQA, CODEC_ID_MSRLE, CODEC_ID_MSVIDEO1, CODEC_ID_IDCIN, CODEC_ID_8BPS, CODEC_ID_SMC, CODEC_ID_FLIC, CODEC_ID_TRUEMOTION1, CODEC_ID_VMDVIDEO, CODEC_ID_MSZH, CODEC_ID_ZLIB, CODEC_ID_QTRLE, CODEC_ID_SNOW, CODEC_ID_TSCC, CODEC_ID_ULTI, CODEC_ID_QDRAW, CODEC_ID_VIXL, CODEC_ID_QPEG, #if LIBAVCODEC_VERSION_MAJOR < 53 CODEC_ID_XVID, #endif CODEC_ID_PNG, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PAM, CODEC_ID_FFVHUFF, CODEC_ID_RV30, CODEC_ID_RV40, CODEC_ID_VC1, CODEC_ID_WMV3, CODEC_ID_LOCO, CODEC_ID_WNV1, CODEC_ID_AASC, CODEC_ID_INDEO2, CODEC_ID_FRAPS, CODEC_ID_TRUEMOTION2, CODEC_ID_BMP, CODEC_ID_CSCD, CODEC_ID_MMVIDEO, CODEC_ID_ZMBV, CODEC_ID_AVS, CODEC_ID_SMACKVIDEO, CODEC_ID_NUV, CODEC_ID_KMVC, CODEC_ID_FLASHSV, CODEC_ID_CAVS, CODEC_ID_JPEG2000, CODEC_ID_VMNC, CODEC_ID_VP5, CODEC_ID_VP6, CODEC_ID_VP6F, CODEC_ID_TARGA, CODEC_ID_DSICINVIDEO, CODEC_ID_TIERTEXSEQVIDEO, CODEC_ID_TIFF, CODEC_ID_GIF, CODEC_ID_FFH264, CODEC_ID_DXA, CODEC_ID_DNXHD, CODEC_ID_THP, CODEC_ID_SGI, CODEC_ID_C93, CODEC_ID_BETHSOFTVID, CODEC_ID_PTX, CODEC_ID_TXD, CODEC_ID_VP6A, CODEC_ID_AMV, CODEC_ID_VB, CODEC_ID_PCX, CODEC_ID_SUNRAST, CODEC_ID_INDEO4, CODEC_ID_INDEO5, CODEC_ID_MIMIC, CODEC_ID_RL2, CODEC_ID_8SVX_EXP, CODEC_ID_8SVX_FIB, CODEC_ID_ESCAPE124, CODEC_ID_DIRAC, CODEC_ID_BFI, CODEC_ID_CMV, CODEC_ID_MOTIONPIXELS, CODEC_ID_TGV, CODEC_ID_TGQ, CODEC_ID_TQI, CODEC_ID_AURA, CODEC_ID_AURA2, CODEC_ID_V210X, CODEC_ID_TMV, CODEC_ID_V210, CODEC_ID_DPX, CODEC_ID_MAD, CODEC_ID_FRWU, CODEC_ID_FLASHSV2, CODEC_ID_CDGRAPHICS, CODEC_ID_R210, CODEC_ID_ANM, CODEC_ID_BINKVIDEO, CODEC_ID_IFF_ILBM, CODEC_ID_IFF_BYTERUN1, CODEC_ID_KGV1, CODEC_ID_YOP, CODEC_ID_VP8, /* various PCM "codecs" */ CODEC_ID_PCM_S16LE= 0x10000, CODEC_ID_PCM_S16BE, CODEC_ID_PCM_U16LE, CODEC_ID_PCM_U16BE, CODEC_ID_PCM_S8, CODEC_ID_PCM_U8, CODEC_ID_PCM_MULAW, CODEC_ID_PCM_ALAW, CODEC_ID_PCM_S32LE, CODEC_ID_PCM_S32BE, CODEC_ID_PCM_U32LE, CODEC_ID_PCM_U32BE, CODEC_ID_PCM_S24LE, CODEC_ID_PCM_S24BE, CODEC_ID_PCM_U24LE, CODEC_ID_PCM_U24BE, CODEC_ID_PCM_S24DAUD, CODEC_ID_PCM_ZORK, CODEC_ID_PCM_S16LE_PLANAR, CODEC_ID_PCM_DVD, CODEC_ID_PCM_F32BE, CODEC_ID_PCM_F32LE, CODEC_ID_PCM_F64BE, CODEC_ID_PCM_F64LE, CODEC_ID_PCM_BLURAY, /* various ADPCM codecs */ CODEC_ID_ADPCM_IMA_QT= 0x11000, CODEC_ID_ADPCM_IMA_WAV, CODEC_ID_ADPCM_IMA_DK3, CODEC_ID_ADPCM_IMA_DK4, CODEC_ID_ADPCM_IMA_WS, CODEC_ID_ADPCM_IMA_SMJPEG, CODEC_ID_ADPCM_MS, CODEC_ID_ADPCM_4XM, CODEC_ID_ADPCM_XA, CODEC_ID_ADPCM_ADX, CODEC_ID_ADPCM_EA, CODEC_ID_ADPCM_G726, CODEC_ID_ADPCM_CT, CODEC_ID_ADPCM_SWF, CODEC_ID_ADPCM_YAMAHA, CODEC_ID_ADPCM_SBPRO_4, CODEC_ID_ADPCM_SBPRO_3, CODEC_ID_ADPCM_SBPRO_2, CODEC_ID_ADPCM_THP, CODEC_ID_ADPCM_IMA_AMV, CODEC_ID_ADPCM_EA_R1, CODEC_ID_ADPCM_EA_R3, CODEC_ID_ADPCM_EA_R2, CODEC_ID_ADPCM_IMA_EA_SEAD, CODEC_ID_ADPCM_IMA_EA_EACS, CODEC_ID_ADPCM_EA_XAS, CODEC_ID_ADPCM_EA_MAXIS_XA, CODEC_ID_ADPCM_IMA_ISS, /* AMR */ CODEC_ID_AMR_NB= 0x12000, CODEC_ID_AMR_WB, /* RealAudio codecs*/ CODEC_ID_RA_144= 0x13000, CODEC_ID_RA_288, /* various DPCM codecs */ CODEC_ID_ROQ_DPCM= 0x14000, CODEC_ID_INTERPLAY_DPCM, CODEC_ID_XAN_DPCM, CODEC_ID_SOL_DPCM, /* audio codecs */ CODEC_ID_MP2= 0x15000, CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 CODEC_ID_AAC, CODEC_ID_AC3, CODEC_ID_DTS, CODEC_ID_VORBIS, CODEC_ID_DVAUDIO, CODEC_ID_WMAV1, CODEC_ID_WMAV2, CODEC_ID_MACE3, CODEC_ID_MACE6, CODEC_ID_VMDAUDIO, CODEC_ID_SONIC, CODEC_ID_SONIC_LS, CODEC_ID_FLAC, CODEC_ID_MP3ADU, CODEC_ID_MP3ON4, CODEC_ID_SHORTEN, CODEC_ID_ALAC, CODEC_ID_WESTWOOD_SND1, CODEC_ID_GSM, ///< as in Berlin toast format CODEC_ID_QDM2, CODEC_ID_COOK, CODEC_ID_TRUESPEECH, CODEC_ID_TTA, CODEC_ID_SMACKAUDIO, CODEC_ID_QCELP, CODEC_ID_WAVPACK, CODEC_ID_DSICINAUDIO, CODEC_ID_IMC, CODEC_ID_MUSEPACK7, CODEC_ID_MLP, CODEC_ID_GSM_MS, /* as found in WAV */ CODEC_ID_ATRAC3, CODEC_ID_VOXWARE, CODEC_ID_APE, CODEC_ID_NELLYMOSER, CODEC_ID_MUSEPACK8, CODEC_ID_SPEEX, CODEC_ID_WMAVOICE, CODEC_ID_WMAPRO, CODEC_ID_WMALOSSLESS, CODEC_ID_ATRAC3P, CODEC_ID_EAC3, CODEC_ID_SIPR, CODEC_ID_MP1, CODEC_ID_TWINVQ, CODEC_ID_TRUEHD, CODEC_ID_MP4ALS, CODEC_ID_ATRAC1, CODEC_ID_BINKAUDIO_RDFT, CODEC_ID_BINKAUDIO_DCT, /* subtitle codecs */ CODEC_ID_DVD_SUBTITLE= 0x17000, CODEC_ID_DVB_SUBTITLE, CODEC_ID_TEXT, ///< raw UTF-8 text CODEC_ID_XSUB, CODEC_ID_SSA, CODEC_ID_MOV_TEXT, CODEC_ID_HDMV_PGS_SUBTITLE, CODEC_ID_DVB_TELETEXT, /* other specific kind of codecs (generally used for attachments) */ CODEC_ID_TTF= 0x18000, CODEC_ID_PROBE= 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it CODEC_ID_MPEG2TS= 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS * stream (only used by libavformat) */ }; #if LIBAVCODEC_VERSION_MAJOR < 53 #define CodecType AVMediaType #define CODEC_TYPE_UNKNOWN AVMEDIA_TYPE_UNKNOWN #define CODEC_TYPE_VIDEO AVMEDIA_TYPE_VIDEO #define CODEC_TYPE_AUDIO AVMEDIA_TYPE_AUDIO #define CODEC_TYPE_DATA AVMEDIA_TYPE_DATA #define CODEC_TYPE_SUBTITLE AVMEDIA_TYPE_SUBTITLE #define CODEC_TYPE_ATTACHMENT AVMEDIA_TYPE_ATTACHMENT #define CODEC_TYPE_NB AVMEDIA_TYPE_NB #endif /** * all in native-endian format */ enum SampleFormat { SAMPLE_FMT_NONE = -1, SAMPLE_FMT_U8, ///< unsigned 8 bits SAMPLE_FMT_S16, ///< signed 16 bits SAMPLE_FMT_S32, ///< signed 32 bits SAMPLE_FMT_FLT, ///< float SAMPLE_FMT_DBL, ///< double SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if dynamically linking to libavcodec }; /* Audio channel masks */ #define CH_FRONT_LEFT 0x00000001 #define CH_FRONT_RIGHT 0x00000002 #define CH_FRONT_CENTER 0x00000004 #define CH_LOW_FREQUENCY 0x00000008 #define CH_BACK_LEFT 0x00000010 #define CH_BACK_RIGHT 0x00000020 #define CH_FRONT_LEFT_OF_CENTER 0x00000040 #define CH_FRONT_RIGHT_OF_CENTER 0x00000080 #define CH_BACK_CENTER 0x00000100 #define CH_SIDE_LEFT 0x00000200 #define CH_SIDE_RIGHT 0x00000400 #define CH_TOP_CENTER 0x00000800 #define CH_TOP_FRONT_LEFT 0x00001000 #define CH_TOP_FRONT_CENTER 0x00002000 #define CH_TOP_FRONT_RIGHT 0x00004000 #define CH_TOP_BACK_LEFT 0x00008000 #define CH_TOP_BACK_CENTER 0x00010000 #define CH_TOP_BACK_RIGHT 0x00020000 #define CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. #define CH_STEREO_RIGHT 0x40000000 ///< See CH_STEREO_LEFT. /** Channel mask value used for AVCodecContext.request_channel_layout to indicate that the user requests the channel order of the decoder output to be the native codec channel order. */ #define CH_LAYOUT_NATIVE 0x8000000000000000LL /* Audio channel convenience macros */ #define CH_LAYOUT_MONO (CH_FRONT_CENTER) #define CH_LAYOUT_STEREO (CH_FRONT_LEFT|CH_FRONT_RIGHT) #define CH_LAYOUT_2_1 (CH_LAYOUT_STEREO|CH_BACK_CENTER) #define CH_LAYOUT_SURROUND (CH_LAYOUT_STEREO|CH_FRONT_CENTER) #define CH_LAYOUT_4POINT0 (CH_LAYOUT_SURROUND|CH_BACK_CENTER) #define CH_LAYOUT_2_2 (CH_LAYOUT_STEREO|CH_SIDE_LEFT|CH_SIDE_RIGHT) #define CH_LAYOUT_QUAD (CH_LAYOUT_STEREO|CH_BACK_LEFT|CH_BACK_RIGHT) #define CH_LAYOUT_5POINT0 (CH_LAYOUT_SURROUND|CH_SIDE_LEFT|CH_SIDE_RIGHT) #define CH_LAYOUT_5POINT1 (CH_LAYOUT_5POINT0|CH_LOW_FREQUENCY) #define CH_LAYOUT_5POINT0_BACK (CH_LAYOUT_SURROUND|CH_BACK_LEFT|CH_BACK_RIGHT) #define CH_LAYOUT_5POINT1_BACK (CH_LAYOUT_5POINT0_BACK|CH_LOW_FREQUENCY) #define CH_LAYOUT_7POINT0 (CH_LAYOUT_5POINT0|CH_BACK_LEFT|CH_BACK_RIGHT) #define CH_LAYOUT_7POINT1 (CH_LAYOUT_5POINT1|CH_BACK_LEFT|CH_BACK_RIGHT) #define CH_LAYOUT_7POINT1_WIDE (CH_LAYOUT_5POINT1_BACK|\ CH_FRONT_LEFT_OF_CENTER|CH_FRONT_RIGHT_OF_CENTER) #define CH_LAYOUT_STEREO_DOWNMIX (CH_STEREO_LEFT|CH_STEREO_RIGHT) /* in bytes */ #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio /** * Required number of additionally allocated bytes at the end of the input bitstream for decoding. * This is mainly needed because some optimized bitstream readers read * 32 or 64 bit at once and could read over the end.<br> * Note: If the first 23 bits of the additional bytes are not 0, then damaged * MPEG bitstreams could cause overread and segfault. */ #define FF_INPUT_BUFFER_PADDING_SIZE 8 /** * minimum encoding buffer size * Used to avoid some checks during header writing. */ #define FF_MIN_BUFFER_SIZE 16384 /** * motion estimation type. */ enum Motion_Est_ID { ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed ME_FULL, ME_LOG, ME_PHODS, ME_EPZS, ///< enhanced predictive zonal search ME_X1, ///< reserved for experiments ME_HEX, ///< hexagon based search ME_UMH, ///< uneven multi-hexagon search ME_ITER, ///< iterative search ME_TESA, ///< transformed exhaustive search algorithm }; enum AVDiscard{ /* We leave some space between them for extensions (drop some * keyframes for intra-only or drop just some bidir frames). */ AVDISCARD_NONE =-16, ///< discard nothing AVDISCARD_DEFAULT= 0, ///< discard useless packets like 0 size packets in avi AVDISCARD_NONREF = 8, ///< discard all non reference AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes AVDISCARD_ALL = 48, ///< discard all }; enum AVColorPrimaries{ AVCOL_PRI_BT709 =1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B AVCOL_PRI_UNSPECIFIED=2, AVCOL_PRI_BT470M =4, AVCOL_PRI_BT470BG =5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM AVCOL_PRI_SMPTE170M =6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC AVCOL_PRI_SMPTE240M =7, ///< functionally identical to above AVCOL_PRI_FILM =8, AVCOL_PRI_NB , ///< Not part of ABI }; enum AVColorTransferCharacteristic{ AVCOL_TRC_BT709 =1, ///< also ITU-R BT1361 AVCOL_TRC_UNSPECIFIED=2, AVCOL_TRC_GAMMA22 =4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM AVCOL_TRC_GAMMA28 =5, ///< also ITU-R BT470BG AVCOL_TRC_NB , ///< Not part of ABI }; enum AVColorSpace{ AVCOL_SPC_RGB =0, AVCOL_SPC_BT709 =1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B AVCOL_SPC_UNSPECIFIED=2, AVCOL_SPC_FCC =4, AVCOL_SPC_BT470BG =5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 AVCOL_SPC_SMPTE170M =6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above AVCOL_SPC_SMPTE240M =7, AVCOL_SPC_NB , ///< Not part of ABI }; enum AVColorRange{ AVCOL_RANGE_UNSPECIFIED=0, AVCOL_RANGE_MPEG =1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges AVCOL_RANGE_JPEG =2, ///< the normal 2^n-1 "JPEG" YUV ranges AVCOL_RANGE_NB , ///< Not part of ABI }; /** * X X 3 4 X X are luma samples, * 1 2 1-6 are possible chroma positions * X X 5 6 X 0 is undefined/unknown position */ enum AVChromaLocation{ AVCHROMA_LOC_UNSPECIFIED=0, AVCHROMA_LOC_LEFT =1, ///< mpeg2/4, h264 default AVCHROMA_LOC_CENTER =2, ///< mpeg1, jpeg, h263 AVCHROMA_LOC_TOPLEFT =3, ///< DV AVCHROMA_LOC_TOP =4, AVCHROMA_LOC_BOTTOMLEFT =5, AVCHROMA_LOC_BOTTOM =6, AVCHROMA_LOC_NB , ///< Not part of ABI }; typedef struct RcOverride{ int start_frame; int end_frame; int qscale; // If this is 0 then quality_factor will be used instead. float quality_factor; } RcOverride; #define FF_MAX_B_FRAMES 16 /* encoding support These flags can be passed in AVCodecContext.flags before initialization. Note: Not everything is supported yet. */ #define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale. #define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263. #define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC. #define CODEC_FLAG_GMC 0x0020 ///< Use GMC. #define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>. #define CODEC_FLAG_PART 0x0080 ///< Use data partitioning. /** * The parent program guarantees that the input for B-frames containing * streams is not written to for at least s->max_b_frames+1 frames, if * this is not set the input will be copied. */ #define CODEC_FLAG_INPUT_PRESERVED 0x0100 #define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode. #define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode. #define CODEC_FLAG_EXTERN_HUFF 0x1000 ///< Use external Huffman table (for MJPEG). #define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale. #define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges. #define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding. #define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random location instead of only at frame boundaries. */ #define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization. #define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT. #define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay. #define CODEC_FLAG_ALT_SCAN 0x00100000 ///< Use alternate scan. #define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe. #define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT). /* Fx : Flag for h263+ extra options */ #define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction #define CODEC_FLAG_H263P_UMV 0x02000000 ///< unlimited motion vector #define CODEC_FLAG_CBP_RD 0x04000000 ///< Use rate distortion optimization for cbp. #define CODEC_FLAG_QP_RD 0x08000000 ///< Use rate distortion optimization for qp selectioon. #define CODEC_FLAG_H263P_AIV 0x00000008 ///< H.263 alternative inter VLC #define CODEC_FLAG_OBMC 0x00000001 ///< OBMC #define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter #define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000 #define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation #define CODEC_FLAG_SVCD_SCAN_OFFSET 0x40000000 ///< Will reserve space for SVCD scan offset user data. #define CODEC_FLAG_CLOSED_GOP 0x80000000 #define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks. #define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size. #define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding. #define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata. #define CODEC_FLAG2_BPYRAMID 0x00000010 ///< H.264 allow B-frames to be used as references. #define CODEC_FLAG2_WPRED 0x00000020 ///< H.264 weighted biprediction for B-frames #define CODEC_FLAG2_MIXED_REFS 0x00000040 ///< H.264 one reference per partition, as opposed to one reference per macroblock #define CODEC_FLAG2_8X8DCT 0x00000080 ///< H.264 high profile 8x8 transform #define CODEC_FLAG2_FASTPSKIP 0x00000100 ///< H.264 fast pskip #define CODEC_FLAG2_AUD 0x00000200 ///< H.264 access unit delimiters #define CODEC_FLAG2_BRDO 0x00000400 ///< B-frame rate-distortion optimization #define CODEC_FLAG2_INTRA_VLC 0x00000800 ///< Use MPEG-2 intra VLC table. #define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< Only do ME/MC (I frames -> ref, P frame -> ME+MC). #define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. #define CODEC_FLAG2_SKIP_RD 0x00004000 ///< RD optimal MB level residual skipping #define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries. #define CODEC_FLAG2_NON_LINEAR_QUANT 0x00010000 ///< Use MPEG-2 nonlinear quantizer. #define CODEC_FLAG2_BIT_RESERVOIR 0x00020000 ///< Use a bit reservoir when encoding if possible #define CODEC_FLAG2_MBTREE 0x00040000 ///< Use macroblock tree ratecontrol (x264 only) #define CODEC_FLAG2_PSY 0x00080000 ///< Use psycho visual optimizations. #define CODEC_FLAG2_SSIM 0x00100000 ///< Compute SSIM during encoding, error[] values are undefined. /* Unsupported options : * Syntax Arithmetic coding (SAC) * Reference Picture Selection * Independent Segment Decoding */ /* /Fx */ /* codec capabilities */ #define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback. /** * Codec uses get_buffer() for allocating buffers and supports custom allocators. * If not set, it might not use get_buffer() at all or use operations that * assume the buffer was allocated by avcodec_default_get_buffer. */ #define CODEC_CAP_DR1 0x0002 /* If 'parse_only' field is true, then avcodec_parse_frame() can be used. */ #define CODEC_CAP_PARSE_ONLY 0x0004 #define CODEC_CAP_TRUNCATED 0x0008 /* Codec can export data for HW decoding (XvMC). */ #define CODEC_CAP_HWACCEL 0x0010 /** * Codec has a nonzero delay and needs to be fed with NULL at the end to get the delayed data. * If this is not set, the codec is guaranteed to never be fed with NULL data. */ #define CODEC_CAP_DELAY 0x0020 /** * Codec can be fed a final frame with a smaller size. * This can be used to prevent truncation of the last audio samples. */ #define CODEC_CAP_SMALL_LAST_FRAME 0x0040 /** * Codec can export data for HW decoding (VDPAU). */ #define CODEC_CAP_HWACCEL_VDPAU 0x0080 /** * Codec can output multiple frames per AVPacket * Normally demuxers return one frame at a time, demuxers which do not do * are connected to a parser to split what they return into proper frames. * This flag is reserved to the very rare category of codecs which have a * bitstream that cannot be split into frames without timeconsuming * operations like full decoding. Demuxers carring such bitstreams thus * may return multiple frames in a packet. This has many disadvantages like * prohibiting stream copy in many cases thus it should only be considered * as a last resort. */ #define CODEC_CAP_SUBFRAMES 0x0100 /** * Codec is experimental and is thus avoided in favor of non experimental * encoders */ #define CODEC_CAP_EXPERIMENTAL 0x0200 //The following defines may change, don't expect compatibility if you use them. #define MB_TYPE_INTRA4x4 0x0001 #define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific #define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific #define MB_TYPE_16x16 0x0008 #define MB_TYPE_16x8 0x0010 #define MB_TYPE_8x16 0x0020 #define MB_TYPE_8x8 0x0040 #define MB_TYPE_INTERLACED 0x0080 #define MB_TYPE_DIRECT2 0x0100 //FIXME #define MB_TYPE_ACPRED 0x0200 #define MB_TYPE_GMC 0x0400 #define MB_TYPE_SKIP 0x0800 #define MB_TYPE_P0L0 0x1000 #define MB_TYPE_P1L0 0x2000 #define MB_TYPE_P0L1 0x4000 #define MB_TYPE_P1L1 0x8000 #define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) #define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) #define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) #define MB_TYPE_QUANT 0x00010000 #define MB_TYPE_CBP 0x00020000 //Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...) /** * Pan Scan area. * This specifies the area which should be displayed. * Note there may be multiple such areas for one frame. */ typedef struct AVPanScan{ /** * id * - encoding: Set by user. * - decoding: Set by libavcodec. */ int id; /** * width and height in 1/16 pel * - encoding: Set by user. * - decoding: Set by libavcodec. */ int width; int height; /** * position of the top left corner in 1/16 pel for up to 3 fields/frames * - encoding: Set by user. * - decoding: Set by libavcodec. */ int16_t position[3][2]; }AVPanScan; #define FF_COMMON_FRAME \ /**\ * pointer to the picture planes.\ * This might be different from the first allocated byte\ * - encoding: \ * - decoding: \ */\ uint8_t *data[4];\ int linesize[4];\ /**\ * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.\ * This isn't used by libavcodec unless the default get/release_buffer() is used.\ * - encoding: \ * - decoding: \ */\ uint8_t *base[4];\ /**\ * 1 -> keyframe, 0-> not\ * - encoding: Set by libavcodec.\ * - decoding: Set by libavcodec.\ */\ int key_frame;\ \ /**\ * Picture type of the frame, see ?_TYPE below.\ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).\ * - decoding: Set by libavcodec.\ */\ int pict_type;\ \ /**\ * presentation timestamp in time_base units (time when frame should be shown to user)\ * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.\ * - encoding: MUST be set by user.\ * - decoding: Set by libavcodec.\ */\ int64_t pts;\ \ /**\ * picture number in bitstream order\ * - encoding: set by\ * - decoding: Set by libavcodec.\ */\ int coded_picture_number;\ /**\ * picture number in display order\ * - encoding: set by\ * - decoding: Set by libavcodec.\ */\ int display_picture_number;\ \ /**\ * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) \ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).\ * - decoding: Set by libavcodec.\ */\ int quality; \ \ /**\ * buffer age (1->was last buffer and dint change, 2->..., ...).\ * Set to INT_MAX if the buffer has not been used yet.\ * - encoding: unused\ * - decoding: MUST be set by get_buffer().\ */\ int age;\ \ /**\ * is this picture used as reference\ * The values for this are the same as the MpegEncContext.picture_structure\ * variable, that is 1->top field, 2->bottom field, 3->frame/both fields.\ * Set to 4 for delayed, non-reference frames.\ * - encoding: unused\ * - decoding: Set by libavcodec. (before get_buffer() call)).\ */\ int reference;\ \ /**\ * QP table\ * - encoding: unused\ * - decoding: Set by libavcodec.\ */\ int8_t *qscale_table;\ /**\ * QP store stride\ * - encoding: unused\ * - decoding: Set by libavcodec.\ */\ int qstride;\ \ /**\ * mbskip_table[mb]>=1 if MB didn't change\ * stride= mb_width = (width+15)>>4\ * - encoding: unused\ * - decoding: Set by libavcodec.\ */\ uint8_t *mbskip_table;\ \ /**\ * motion vector table\ * @code\ * example:\ * int mv_sample_log2= 4 - motion_subsample_log2;\ * int mb_width= (width+15)>>4;\ * int mv_stride= (mb_width << mv_sample_log2) + 1;\ * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];\ * @endcode\ * - encoding: Set by user.\ * - decoding: Set by libavcodec.\ */\ int16_t (*motion_val[2])[2];\ \ /**\ * macroblock type table\ * mb_type_base + mb_width + 2\ * - encoding: Set by user.\ * - decoding: Set by libavcodec.\ */\ uint32_t *mb_type;\ \ /**\ * log2 of the size of the block which a single vector in motion_val represents: \ * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)\ * - encoding: unused\ * - decoding: Set by libavcodec.\ */\ uint8_t motion_subsample_log2;\ \ /**\ * for some private data of the user\ * - encoding: unused\ * - decoding: Set by user.\ */\ void *opaque;\ \ /**\ * error\ * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.\ * - decoding: unused\ */\ uint64_t error[4];\ \ /**\ * type of the buffer (to keep track of who has to deallocate data[*])\ * - encoding: Set by the one who allocates it.\ * - decoding: Set by the one who allocates it.\ * Note: User allocated (direct rendering) & internal buffers cannot coexist currently.\ */\ int type;\ \ /**\ * When decoding, this signals how much the picture must be delayed.\ * extra_delay = repeat_pict / (2*fps)\ * - encoding: unused\ * - decoding: Set by libavcodec.\ */\ int repeat_pict;\ \ /**\ * \ */\ int qscale_type;\ \ /**\ * The content of the picture is interlaced.\ * - encoding: Set by user.\ * - decoding: Set by libavcodec. (default 0)\ */\ int interlaced_frame;\ \ /**\ * If the content is interlaced, is top field displayed first.\ * - encoding: Set by user.\ * - decoding: Set by libavcodec.\ */\ int top_field_first;\ \ /**\ * Pan scan.\ * - encoding: Set by user.\ * - decoding: Set by libavcodec.\ */\ AVPanScan *pan_scan;\ \ /**\ * Tell user application that palette has changed from previous frame.\ * - encoding: ??? (no palette-enabled encoder yet)\ * - decoding: Set by libavcodec. (default 0).\ */\ int palette_has_changed;\ \ /**\ * codec suggestion on buffer type if != 0\ * - encoding: unused\ * - decoding: Set by libavcodec. (before get_buffer() call)).\ */\ int buffer_hints;\ \ /**\ * DCT coefficients\ * - encoding: unused\ * - decoding: Set by libavcodec.\ */\ short *dct_coeff;\ \ /**\ * motion reference frame index\ * the order in which these are stored can depend on the codec.\ * - encoding: Set by user.\ * - decoding: Set by libavcodec.\ */\ int8_t *ref_index[2];\ \ /**\ * reordered opaque 64bit number (generally a PTS) from AVCodecContext.reordered_opaque\ * output in AVFrame.reordered_opaque\ * - encoding: unused\ * - decoding: Read by user.\ */\ int64_t reordered_opaque;\ \ /**\ * hardware accelerator private data (FFmpeg allocated)\ * - encoding: unused\ * - decoding: Set by libavcodec\ */\ void *hwaccel_picture_private;\ #define FF_QSCALE_TYPE_MPEG1 0 #define FF_QSCALE_TYPE_MPEG2 1 #define FF_QSCALE_TYPE_H264 2 #define FF_QSCALE_TYPE_VP56 3 #define FF_BUFFER_TYPE_INTERNAL 1 #define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user) #define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared. #define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything. #define FF_I_TYPE 1 ///< Intra #define FF_P_TYPE 2 ///< Predicted #define FF_B_TYPE 3 ///< Bi-dir predicted #define FF_S_TYPE 4 ///< S(GMC)-VOP MPEG4 #define FF_SI_TYPE 5 ///< Switching Intra #define FF_SP_TYPE 6 ///< Switching Predicted #define FF_BI_TYPE 7 #define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore). #define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer. #define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content. #define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update). typedef struct AVPacket { /** * Presentation timestamp in AVStream->time_base units; the time at which * the decompressed packet will be presented to the user. * Can be AV_NOPTS_VALUE if it is not stored in the file. * pts MUST be larger or equal to dts as presentation cannot happen before * decompression, unless one wants to view hex dumps. Some formats misuse * the terms dts and pts/cts to mean something different. Such timestamps * must be converted to true pts/dts before they are stored in AVPacket. */ int64_t pts; /** * Decompression timestamp in AVStream->time_base units; the time at which * the packet is decompressed. * Can be AV_NOPTS_VALUE if it is not stored in the file. */ int64_t dts; uint8_t *data; int size; int stream_index; int flags; /** * Duration of this packet in AVStream->time_base units, 0 if unknown. * Equals next_pts - this_pts in presentation order. */ int duration; void (*destruct)(struct AVPacket *); void *priv; int64_t pos; ///< byte position in stream, -1 if unknown /** * Time difference in AVStream->time_base units from the pts of this * packet to the point at which the output from the decoder has converged * independent from the availability of previous frames. That is, the * frames are virtually identical no matter if decoding started from * the very first frame or from this keyframe. * Is AV_NOPTS_VALUE if unknown. * This field is not the display duration of the current packet. * * The purpose of this field is to allow seeking in streams that have no * keyframes in the conventional sense. It corresponds to the * recovery point SEI in H.264 and match_time_delta in NUT. It is also * essential for some types of subtitle streams to ensure that all * subtitles are correctly displayed after seeking. */ int64_t convergence_duration; } AVPacket; #define AV_PKT_FLAG_KEY 0x0001 #if LIBAVCODEC_VERSION_MAJOR < 53 #define PKT_FLAG_KEY AV_PKT_FLAG_KEY #endif /** * Audio Video Frame. * New fields can be added to the end of FF_COMMON_FRAME with minor version * bumps. * Removal, reordering and changes to existing fields require a major * version bump. No fields should be added into AVFrame before or after * FF_COMMON_FRAME! * sizeof(AVFrame) must not be used outside libav*. */ typedef struct AVFrame { FF_COMMON_FRAME } AVFrame; /** * main external API structure. * New fields can be added to the end with minor version bumps. * Removal, reordering and changes to existing fields require a major * version bump. * sizeof(AVCodecContext) must not be used outside libav*. */ typedef struct AVCodecContext { /** * information on struct for av_log * - set by avcodec_alloc_context */ const AVClass *av_class; /** * the average bitrate * - encoding: Set by user; unused for constant quantizer encoding. * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream. */ int bit_rate; /** * number of bits the bitstream is allowed to diverge from the reference. * the reference can be CBR (for CBR pass1) or VBR (for pass2) * - encoding: Set by user; unused for constant quantizer encoding. * - decoding: unused */ int bit_rate_tolerance; /** * CODEC_FLAG_*. * - encoding: Set by user. * - decoding: Set by user. */ int flags; /** * Some codecs need additional format info. It is stored here. * If any muxer uses this then ALL demuxers/parsers AND encoders for the * specific codec MUST set it correctly otherwise stream copy breaks. * In general use of this field by muxers is not recommanded. * - encoding: Set by libavcodec. * - decoding: Set by libavcodec. (FIXME: Is this OK?) */ int sub_id; /** * Motion estimation algorithm used for video coding. * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex), * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific] * - encoding: MUST be set by user. * - decoding: unused */ int me_method; /** * some codecs need / can use extradata like Huffman tables. * mjpeg: Huffman tables * rv10: additional flags * mpeg4: global headers (they can be in the bitstream or here) * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger * than extradata_size to avoid prolems if it is read with the bitstream reader. * The bytewise contents of extradata must not depend on the architecture or CPU endianness. * - encoding: Set/allocated/freed by libavcodec. * - decoding: Set/allocated/freed by user. */ uint8_t *extradata; int extradata_size; /** * This is the fundamental unit of time (in seconds) in terms * of which frame timestamps are represented. For fixed-fps content, * timebase should be 1/framerate and timestamp increments should be * identically 1. * - encoding: MUST be set by user. * - decoding: Set by libavcodec. */ AVRational time_base; /* video only */ /** * picture width / height. * - encoding: MUST be set by user. * - decoding: Set by libavcodec. * Note: For compatibility it is possible to set this instead of * coded_width/height before decoding. */ int width, height; #define FF_ASPECT_EXTENDED 15 /** * the number of pictures in a group of pictures, or 0 for intra_only * - encoding: Set by user. * - decoding: unused */ int gop_size; /** * Pixel format, see PIX_FMT_xxx. * - encoding: Set by user. * - decoding: Set by libavcodec. */ enum PixelFormat pix_fmt; /** * Frame rate emulation. If not zero, the lower layer (i.e. format handler) * has to read frames at native frame rate. * - encoding: Set by user. * - decoding: unused */ int rate_emu; /** * If non NULL, 'draw_horiz_band' is called by the libavcodec * decoder to draw a horizontal band. It improves cache usage. Not * all codecs can do that. You must check the codec capabilities * beforehand. * The function is also used by hardware acceleration APIs. * It is called at least once during frame decoding to pass * the data needed for hardware render. * In that mode instead of pixel data, AVFrame points to * a structure specific to the acceleration API. The application * reads the structure and can change some fields to indicate progress * or mark state. * - encoding: unused * - decoding: Set by user. * @param height the height of the slice * @param y the y position of the slice * @param type 1->top field, 2->bottom field, 3->frame * @param offset offset into the AVFrame.data from which the slice should be read */ void (*draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[4], int y, int type, int height); /* audio only */ int sample_rate; ///< samples per second int channels; ///< number of audio channels /** * audio sample format * - encoding: Set by user. * - decoding: Set by libavcodec. */ enum SampleFormat sample_fmt; ///< sample format /* The following data should not be initialized. */ /** * Samples per packet, initialized when calling 'init'. */ int frame_size; int frame_number; ///< audio or video frame number #if LIBAVCODEC_VERSION_MAJOR < 53 int real_pict_num; ///< Returns the real picture number of previous encoded frame. #endif /** * Number of frames the decoded output will be delayed relative to * the encoded input. * - encoding: Set by libavcodec. * - decoding: unused */ int delay; /* - encoding parameters */ float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) float qblur; ///< amount of qscale smoothing over time (0.0-1.0) /** * minimum quantizer * - encoding: Set by user. * - decoding: unused */ int qmin; /** * maximum quantizer * - encoding: Set by user. * - decoding: unused */ int qmax; /** * maximum quantizer difference between frames * - encoding: Set by user. * - decoding: unused */ int max_qdiff; /** * maximum number of B-frames between non-B-frames * Note: The output will be delayed by max_b_frames+1 relative to the input. * - encoding: Set by user. * - decoding: unused */ int max_b_frames; /** * qscale factor between IP and B-frames * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). * - encoding: Set by user. * - decoding: unused */ float b_quant_factor; /** obsolete FIXME remove */ int rc_strategy; #define FF_RC_STRATEGY_XVID 1 int b_frame_strategy; /** * hurry up amount * - encoding: unused * - decoding: Set by user. 1-> Skip B-frames, 2-> Skip IDCT/dequant too, 5-> Skip everything except header * @deprecated Deprecated in favor of skip_idct and skip_frame. */ int hurry_up; struct AVCodec *codec; void *priv_data; int rtp_payload_size; /* The size of the RTP payload: the coder will */ /* do its best to deliver a chunk with size */ /* below rtp_payload_size, the chunk will start */ /* with a start code on some codecs like H.263. */ /* This doesn't take account of any particular */ /* headers inside the transmitted RTP payload. */ /* The RTP callback: This function is called */ /* every time the encoder has a packet to send. */ /* It depends on the encoder if the data starts */ /* with a Start Code (it should). H.263 does. */ /* mb_nb contains the number of macroblocks */ /* encoded in the RTP payload. */ void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); /* statistics, used for 2-pass encoding */ int mv_bits; int header_bits; int i_tex_bits; int p_tex_bits; int i_count; int p_count; int skip_count; int misc_bits; /** * number of bits used for the previously encoded frame * - encoding: Set by libavcodec. * - decoding: unused */ int frame_bits; /** * Private data of the user, can be used to carry app specific stuff. * - encoding: Set by user. * - decoding: Set by user. */ void *opaque; char codec_name[32]; enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ enum CodecID codec_id; /* see CODEC_ID_xxx */ /** * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). * This is used to work around some encoder bugs. * A demuxer should set this to what is stored in the field used to identify the codec. * If there are multiple such fields in a container then the demuxer should choose the one * which maximizes the information about the used codec. * If the codec tag field in a container is larger then 32 bits then the demuxer should * remap the longer ID to 32 bits with a table or other structure. Alternatively a new * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated * first. * - encoding: Set by user, if not then the default based on codec_id will be used. * - decoding: Set by user, will be converted to uppercase by libavcodec during init. */ unsigned int codec_tag; /** * Work around bugs in encoders which sometimes cannot be detected automatically. * - encoding: Set by user * - decoding: Set by user */ int workaround_bugs; #define FF_BUG_AUTODETECT 1 ///< autodetection #define FF_BUG_OLD_MSMPEG4 2 #define FF_BUG_XVID_ILACE 4 #define FF_BUG_UMP4 8 #define FF_BUG_NO_PADDING 16 #define FF_BUG_AMV 32 #define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default. #define FF_BUG_QPEL_CHROMA 64 #define FF_BUG_STD_QPEL 128 #define FF_BUG_QPEL_CHROMA2 256 #define FF_BUG_DIRECT_BLOCKSIZE 512 #define FF_BUG_EDGE 1024 #define FF_BUG_HPEL_CHROMA 2048 #define FF_BUG_DC_CLIP 4096 #define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. #define FF_BUG_TRUNCATED 16384 //#define FF_BUG_FAKE_SCALABILITY 16 //Autodetection should work 100%. /** * luma single coefficient elimination threshold * - encoding: Set by user. * - decoding: unused */ int luma_elim_threshold; /** * chroma single coeff elimination threshold * - encoding: Set by user. * - decoding: unused */ int chroma_elim_threshold; /** * strictly follow the standard (MPEG4, ...). * - encoding: Set by user. * - decoding: Set by user. * Setting this to STRICT or higher means the encoder and decoder will * generally do stupid things. While setting it to inofficial or lower * will mean the encoder might use things that are not supported by all * spec compliant decoders. Decoders make no difference between normal, * inofficial and experimental, that is they always try to decode things * when they can unless they are explicitly asked to behave stupid * (=strictly conform to the specs) */ int strict_std_compliance; #define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to a older more strict version of the spec or reference software. #define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. #define FF_COMPLIANCE_NORMAL 0 #define FF_COMPLIANCE_INOFFICIAL -1 ///< Allow inofficial extensions. #define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. /** * qscale offset between IP and B-frames * - encoding: Set by user. * - decoding: unused */ float b_quant_offset; /** * Error recognization; higher values will detect more errors but may * misdetect some more or less valid parts as errors. * - encoding: unused * - decoding: Set by user. */ int error_recognition; #define FF_ER_CAREFUL 1 #define FF_ER_COMPLIANT 2 #define FF_ER_AGGRESSIVE 3 #define FF_ER_VERY_AGGRESSIVE 4 /** * Called at the beginning of each frame to get a buffer for it. * If pic.reference is set then the frame will be read later by libavcodec. * avcodec_align_dimensions2() should be used to find the required width and * height, as they normally need to be rounded up to the next multiple of 16. * if CODEC_CAP_DR1 is not set then get_buffer() must call * avcodec_default_get_buffer() instead of providing buffers allocated by * some other means. * - encoding: unused * - decoding: Set by libavcodec., user can override. */ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic); /** * Called to release buffers which were allocated with get_buffer. * A released buffer can be reused in get_buffer(). * pic.data[*] must be set to NULL. * - encoding: unused * - decoding: Set by libavcodec., user can override. */ void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic); /** * Size of the frame reordering buffer in the decoder. * For MPEG-2 it is 1 IPB or 0 low delay IP. * - encoding: Set by libavcodec. * - decoding: Set by libavcodec. */ int has_b_frames; /** * number of bytes per packet if constant and known or 0 * Used by some WAV based audio codecs. */ int block_align; int parse_only; /* - decoding only: If true, only parsing is done (function avcodec_parse_frame()). The frame data is returned. Only MPEG codecs support this now. */ /** * 0-> h263 quant 1-> mpeg quant * - encoding: Set by user. * - decoding: unused */ int mpeg_quant; /** * pass1 encoding statistics output buffer * - encoding: Set by libavcodec. * - decoding: unused */ char *stats_out; /** * pass2 encoding statistics input buffer * Concatenated stuff from stats_out of pass1 should be placed here. * - encoding: Allocated/set/freed by user. * - decoding: unused */ char *stats_in; /** * ratecontrol qmin qmax limiting method * 0-> clipping, 1-> use a nice continous function to limit qscale wthin qmin/qmax. * - encoding: Set by user. * - decoding: unused */ float rc_qsquish; float rc_qmod_amp; int rc_qmod_freq; /** * ratecontrol override, see RcOverride * - encoding: Allocated/set/freed by user. * - decoding: unused */ RcOverride *rc_override; int rc_override_count; /** * rate control equation * - encoding: Set by user * - decoding: unused */ const char *rc_eq; /** * maximum bitrate * - encoding: Set by user. * - decoding: unused */ int rc_max_rate; /** * minimum bitrate * - encoding: Set by user. * - decoding: unused */ int rc_min_rate; /** * decoder bitstream buffer size * - encoding: Set by user. * - decoding: unused */ int rc_buffer_size; float rc_buffer_aggressivity; /** * qscale factor between P and I-frames * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset). * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). * - encoding: Set by user. * - decoding: unused */ float i_quant_factor; /** * qscale offset between P and I-frames * - encoding: Set by user. * - decoding: unused */ float i_quant_offset; /** * initial complexity for pass1 ratecontrol * - encoding: Set by user. * - decoding: unused */ float rc_initial_cplx; /** * DCT algorithm, see FF_DCT_* below * - encoding: Set by user. * - decoding: unused */ int dct_algo; #define FF_DCT_AUTO 0 #define FF_DCT_FASTINT 1 #define FF_DCT_INT 2 #define FF_DCT_MMX 3 #define FF_DCT_MLIB 4 #define FF_DCT_ALTIVEC 5 #define FF_DCT_FAAN 6 /** * luminance masking (0-> disabled) * - encoding: Set by user. * - decoding: unused */ float lumi_masking; /** * temporary complexity masking (0-> disabled) * - encoding: Set by user. * - decoding: unused */ float temporal_cplx_masking; /** * spatial complexity masking (0-> disabled) * - encoding: Set by user. * - decoding: unused */ float spatial_cplx_masking; /** * p block masking (0-> disabled) * - encoding: Set by user. * - decoding: unused */ float p_masking; /** * darkness masking (0-> disabled) * - encoding: Set by user. * - decoding: unused */ float dark_masking; /** * IDCT algorithm, see FF_IDCT_* below. * - encoding: Set by user. * - decoding: Set by user. */ int idct_algo; #define FF_IDCT_AUTO 0 #define FF_IDCT_INT 1 #define FF_IDCT_SIMPLE 2 #define FF_IDCT_SIMPLEMMX 3 #define FF_IDCT_LIBMPEG2MMX 4 #define FF_IDCT_PS2 5 #define FF_IDCT_MLIB 6 #define FF_IDCT_ARM 7 #define FF_IDCT_ALTIVEC 8 #define FF_IDCT_SH4 9 #define FF_IDCT_SIMPLEARM 10 #define FF_IDCT_H264 11 #define FF_IDCT_VP3 12 #define FF_IDCT_IPP 13 #define FF_IDCT_XVIDMMX 14 #define FF_IDCT_CAVS 15 #define FF_IDCT_SIMPLEARMV5TE 16 #define FF_IDCT_SIMPLEARMV6 17 #define FF_IDCT_SIMPLEVIS 18 #define FF_IDCT_WMV2 19 #define FF_IDCT_FAAN 20 #define FF_IDCT_EA 21 #define FF_IDCT_SIMPLENEON 22 #define FF_IDCT_SIMPLEALPHA 23 #define FF_IDCT_BINK 24 /** * slice count * - encoding: Set by libavcodec. * - decoding: Set by user (or 0). */ int slice_count; /** * slice offsets in the frame in bytes * - encoding: Set/allocated by libavcodec. * - decoding: Set/allocated by user (or NULL). */ int *slice_offset; /** * error concealment flags * - encoding: unused * - decoding: Set by user. */ int error_concealment; #define FF_EC_GUESS_MVS 1 #define FF_EC_DEBLOCK 2 /** * dsp_mask could be add used to disable unwanted CPU features * CPU features (i.e. MMX, SSE. ...) * * With the FORCE flag you may instead enable given CPU features. * (Dangerous: Usable in case of misdetection, improper usage however will * result into program crash.) */ unsigned dsp_mask; #define FF_MM_FORCE 0x80000000 /* Force usage of selected flags (OR) */ /* lower 16 bits - CPU features */ #define FF_MM_MMX 0x0001 ///< standard MMX #define FF_MM_3DNOW 0x0004 ///< AMD 3DNOW #if LIBAVCODEC_VERSION_MAJOR < 53 #define FF_MM_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext #endif #define FF_MM_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext #define FF_MM_SSE 0x0008 ///< SSE functions #define FF_MM_SSE2 0x0010 ///< PIV SSE2 functions #define FF_MM_3DNOWEXT 0x0020 ///< AMD 3DNowExt #define FF_MM_SSE3 0x0040 ///< Prescott SSE3 functions #define FF_MM_SSSE3 0x0080 ///< Conroe SSSE3 functions #define FF_MM_SSE4 0x0100 ///< Penryn SSE4.1 functions #define FF_MM_SSE42 0x0200 ///< Nehalem SSE4.2 functions #define FF_MM_IWMMXT 0x0100 ///< XScale IWMMXT #define FF_MM_ALTIVEC 0x0001 ///< standard AltiVec /** * bits per sample/pixel from the demuxer (needed for huffyuv). * - encoding: Set by libavcodec. * - decoding: Set by user. */ int bits_per_coded_sample; /** * prediction method (needed for huffyuv) * - encoding: Set by user. * - decoding: unused */ int prediction_method; #define FF_PRED_LEFT 0 #define FF_PRED_PLANE 1 #define FF_PRED_MEDIAN 2 /** * sample aspect ratio (0 if unknown) * That is the width of a pixel divided by the height of the pixel. * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. * - encoding: Set by user. * - decoding: Set by libavcodec. */ AVRational sample_aspect_ratio; /** * the picture in the bitstream * - encoding: Set by libavcodec. * - decoding: Set by libavcodec. */ AVFrame *coded_frame; /** * debug * - encoding: Set by user. * - decoding: Set by user. */ int debug; #define FF_DEBUG_PICT_INFO 1 #define FF_DEBUG_RC 2 #define FF_DEBUG_BITSTREAM 4 #define FF_DEBUG_MB_TYPE 8 #define FF_DEBUG_QP 16 #define FF_DEBUG_MV 32 #define FF_DEBUG_DCT_COEFF 0x00000040 #define FF_DEBUG_SKIP 0x00000080 #define FF_DEBUG_STARTCODE 0x00000100 #define FF_DEBUG_PTS 0x00000200 #define FF_DEBUG_ER 0x00000400 #define FF_DEBUG_MMCO 0x00000800 #define FF_DEBUG_BUGS 0x00001000 #define FF_DEBUG_VIS_QP 0x00002000 #define FF_DEBUG_VIS_MB_TYPE 0x00004000 #define FF_DEBUG_BUFFERS 0x00008000 /** * debug * - encoding: Set by user. * - decoding: Set by user. */ int debug_mv; #define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames #define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames #define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames /** * error * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR. * - decoding: unused */ uint64_t error[4]; /** * minimum MB quantizer * - encoding: unused * - decoding: unused */ int mb_qmin; /** * maximum MB quantizer * - encoding: unused * - decoding: unused */ int mb_qmax; /** * motion estimation comparison function * - encoding: Set by user. * - decoding: unused */ int me_cmp; /** * subpixel motion estimation comparison function * - encoding: Set by user. * - decoding: unused */ int me_sub_cmp; /** * macroblock comparison function (not supported yet) * - encoding: Set by user. * - decoding: unused */ int mb_cmp; /** * interlaced DCT comparison function * - encoding: Set by user. * - decoding: unused */ int ildct_cmp; #define FF_CMP_SAD 0 #define FF_CMP_SSE 1 #define FF_CMP_SATD 2 #define FF_CMP_DCT 3 #define FF_CMP_PSNR 4 #define FF_CMP_BIT 5 #define FF_CMP_RD 6 #define FF_CMP_ZERO 7 #define FF_CMP_VSAD 8 #define FF_CMP_VSSE 9 #define FF_CMP_NSSE 10 #define FF_CMP_W53 11 #define FF_CMP_W97 12 #define FF_CMP_DCTMAX 13 #define FF_CMP_DCT264 14 #define FF_CMP_CHROMA 256 /** * ME diamond size & shape * - encoding: Set by user. * - decoding: unused */ int dia_size; /** * amount of previous MV predictors (2a+1 x 2a+1 square) * - encoding: Set by user. * - decoding: unused */ int last_predictor_count; /** * prepass for motion estimation * - encoding: Set by user. * - decoding: unused */ int pre_me; /** * motion estimation prepass comparison function * - encoding: Set by user. * - decoding: unused */ int me_pre_cmp; /** * ME prepass diamond size & shape * - encoding: Set by user. * - decoding: unused */ int pre_dia_size; /** * subpel ME quality * - encoding: Set by user. * - decoding: unused */ int me_subpel_quality; /** * callback to negotiate the pixelFormat * @param fmt is the list of formats which are supported by the codec, * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. * The first is always the native one. * @return the chosen format * - encoding: unused * - decoding: Set by user, if not set the native format will be chosen. */ enum PixelFormat (*get_format)(struct AVCodecContext *s, const enum PixelFormat * fmt); /** * DTG active format information (additional aspect ratio * information only used in DVB MPEG-2 transport streams) * 0 if not set. * * - encoding: unused * - decoding: Set by decoder. */ int dtg_active_format; #define FF_DTG_AFD_SAME 8 #define FF_DTG_AFD_4_3 9 #define FF_DTG_AFD_16_9 10 #define FF_DTG_AFD_14_9 11 #define FF_DTG_AFD_4_3_SP_14_9 13 #define FF_DTG_AFD_16_9_SP_14_9 14 #define FF_DTG_AFD_SP_4_3 15 /** * maximum motion estimation search range in subpel units * If 0 then no limit. * * - encoding: Set by user. * - decoding: unused */ int me_range; /** * intra quantizer bias * - encoding: Set by user. * - decoding: unused */ int intra_quant_bias; #define FF_DEFAULT_QUANT_BIAS 999999 /** * inter quantizer bias * - encoding: Set by user. * - decoding: unused */ int inter_quant_bias; /** * color table ID * - encoding: unused * - decoding: Which clrtable should be used for 8bit RGB images. * Tables have to be stored somewhere. FIXME */ int color_table_id; /** * internal_buffer count * Don't touch, used by libavcodec default_get_buffer(). */ int internal_buffer_count; /** * internal_buffers * Don't touch, used by libavcodec default_get_buffer(). */ void *internal_buffer; #define FF_LAMBDA_SHIFT 7 #define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT) #define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda #define FF_LAMBDA_MAX (256*128-1) #define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove /** * Global quality for codecs which cannot change it per frame. * This should be proportional to MPEG-1/2/4 qscale. * - encoding: Set by user. * - decoding: unused */ int global_quality; #define FF_CODER_TYPE_VLC 0 #define FF_CODER_TYPE_AC 1 #define FF_CODER_TYPE_RAW 2 #define FF_CODER_TYPE_RLE 3 #define FF_CODER_TYPE_DEFLATE 4 /** * coder type * - encoding: Set by user. * - decoding: unused */ int coder_type; /** * context model * - encoding: Set by user. * - decoding: unused */ int context_model; #if 0 /** * * - encoding: unused * - decoding: Set by user. */ uint8_t * (*realloc)(struct AVCodecContext *s, uint8_t *buf, int buf_size); #endif /** * slice flags * - encoding: unused * - decoding: Set by user. */ int slice_flags; #define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display #define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics) #define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) /** * XVideo Motion Acceleration * - encoding: forbidden * - decoding: set by decoder */ int xvmc_acceleration; /** * macroblock decision mode * - encoding: Set by user. * - decoding: unused */ int mb_decision; #define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp #define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits #define FF_MB_DECISION_RD 2 ///< rate distortion /** * custom intra quantization matrix * - encoding: Set by user, can be NULL. * - decoding: Set by libavcodec. */ uint16_t *intra_matrix; /** * custom inter quantization matrix * - encoding: Set by user, can be NULL. * - decoding: Set by libavcodec. */ uint16_t *inter_matrix; /** * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). * This is used to work around some encoder bugs. * - encoding: unused * - decoding: Set by user, will be converted to uppercase by libavcodec during init. */ unsigned int stream_codec_tag; /** * scene change detection threshold * 0 is default, larger means fewer detected scene changes. * - encoding: Set by user. * - decoding: unused */ int scenechange_threshold; /** * minimum Lagrange multipler * - encoding: Set by user. * - decoding: unused */ int lmin; /** * maximum Lagrange multipler * - encoding: Set by user. * - decoding: unused */ int lmax; /** * palette control structure * - encoding: ??? (no palette-enabled encoder yet) * - decoding: Set by user. */ struct AVPaletteControl *palctrl; /** * noise reduction strength * - encoding: Set by user. * - decoding: unused */ int noise_reduction; /** * Called at the beginning of a frame to get cr buffer for it. * Buffer type (size, hints) must be the same. libavcodec won't check it. * libavcodec will pass previous buffer in pic, function should return * same buffer or new buffer with old frame "painted" into it. * If pic.data[0] == NULL must behave like get_buffer(). * if CODEC_CAP_DR1 is not set then reget_buffer() must call * avcodec_default_reget_buffer() instead of providing buffers allocated by * some other means. * - encoding: unused * - decoding: Set by libavcodec., user can override */ int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic); /** * Number of bits which should be loaded into the rc buffer before decoding starts. * - encoding: Set by user. * - decoding: unused */ int rc_initial_buffer_occupancy; /** * * - encoding: Set by user. * - decoding: unused */ int inter_threshold; /** * CODEC_FLAG2_* * - encoding: Set by user. * - decoding: Set by user. */ int flags2; /** * Simulates errors in the bitstream to test error concealment. * - encoding: Set by user. * - decoding: unused */ int error_rate; /** * MP3 antialias algorithm, see FF_AA_* below. * - encoding: unused * - decoding: Set by user. */ int antialias_algo; #define FF_AA_AUTO 0 #define FF_AA_FASTINT 1 //not implemented yet #define FF_AA_INT 2 #define FF_AA_FLOAT 3 /** * quantizer noise shaping * - encoding: Set by user. * - decoding: unused */ int quantizer_noise_shaping; /** * thread count * is used to decide how many independent tasks should be passed to execute() * - encoding: Set by user. * - decoding: Set by user. */ int thread_count; /** * The codec may call this to execute several independent things. * It will return only after finishing all tasks. * The user may replace this with some multithreaded implementation, * the default implementation will execute the parts serially. * @param count the number of things to execute * - encoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override. */ int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); /** * thread opaque * Can be used by execute() to store some per AVCodecContext stuff. * - encoding: set by execute() * - decoding: set by execute() */ void *thread_opaque; /** * Motion estimation threshold below which no motion estimation is * performed, but instead the user specified motion vectors are used. * * - encoding: Set by user. * - decoding: unused */ int me_threshold; /** * Macroblock threshold below which the user specified macroblock types will be used. * - encoding: Set by user. * - decoding: unused */ int mb_threshold; /** * precision of the intra DC coefficient - 8 * - encoding: Set by user. * - decoding: unused */ int intra_dc_precision; /** * noise vs. sse weight for the nsse comparsion function * - encoding: Set by user. * - decoding: unused */ int nsse_weight; /** * Number of macroblock rows at the top which are skipped. * - encoding: unused * - decoding: Set by user. */ int skip_top; /** * Number of macroblock rows at the bottom which are skipped. * - encoding: unused * - decoding: Set by user. */ int skip_bottom; /** * profile * - encoding: Set by user. * - decoding: Set by libavcodec. */ int profile; #define FF_PROFILE_UNKNOWN -99 #define FF_PROFILE_AAC_MAIN 0 #define FF_PROFILE_AAC_LOW 1 #define FF_PROFILE_AAC_SSR 2 #define FF_PROFILE_AAC_LTP 3 #define FF_PROFILE_H264_BASELINE 66 #define FF_PROFILE_H264_MAIN 77 #define FF_PROFILE_H264_EXTENDED 88 #define FF_PROFILE_H264_HIGH 100 #define FF_PROFILE_H264_HIGH_10 110 #define FF_PROFILE_H264_HIGH_422 122 #define FF_PROFILE_H264_HIGH_444 244 #define FF_PROFILE_H264_CAVLC_444 44 /** * level * - encoding: Set by user. * - decoding: Set by libavcodec. */ int level; #define FF_LEVEL_UNKNOWN -99 /** * low resolution decoding, 1-> 1/2 size, 2->1/4 size * - encoding: unused * - decoding: Set by user. */ int lowres; /** * Bitstream width / height, may be different from width/height if lowres * or other things are used. * - encoding: unused * - decoding: Set by user before init if known. Codec should override / dynamically change if needed. */ int coded_width, coded_height; /** * frame skip threshold * - encoding: Set by user. * - decoding: unused */ int frame_skip_threshold; /** * frame skip factor * - encoding: Set by user. * - decoding: unused */ int frame_skip_factor; /** * frame skip exponent * - encoding: Set by user. * - decoding: unused */ int frame_skip_exp; /** * frame skip comparison function * - encoding: Set by user. * - decoding: unused */ int frame_skip_cmp; /** * Border processing masking, raises the quantizer for mbs on the borders * of the picture. * - encoding: Set by user. * - decoding: unused */ float border_masking; /** * minimum MB lagrange multipler * - encoding: Set by user. * - decoding: unused */ int mb_lmin; /** * maximum MB lagrange multipler * - encoding: Set by user. * - decoding: unused */ int mb_lmax; /** * * - encoding: Set by user. * - decoding: unused */ int me_penalty_compensation; /** * * - encoding: unused * - decoding: Set by user. */ enum AVDiscard skip_loop_filter; /** * * - encoding: unused * - decoding: Set by user. */ enum AVDiscard skip_idct; /** * * - encoding: unused * - decoding: Set by user. */ enum AVDiscard skip_frame; /** * * - encoding: Set by user. * - decoding: unused */ int bidir_refine; /** * * - encoding: Set by user. * - decoding: unused */ int brd_scale; /** * constant rate factor - quality-based VBR - values ~correspond to qps * - encoding: Set by user. * - decoding: unused */ float crf; /** * constant quantization parameter rate control method * - encoding: Set by user. * - decoding: unused */ int cqp; /** * minimum GOP size * - encoding: Set by user. * - decoding: unused */ int keyint_min; /** * number of reference frames * - encoding: Set by user. * - decoding: Set by lavc. */ int refs; /** * chroma qp offset from luma * - encoding: Set by user. * - decoding: unused */ int chromaoffset; /** * Influences how often B-frames are used. * - encoding: Set by user. * - decoding: unused */ int bframebias; /** * trellis RD quantization * - encoding: Set by user. * - decoding: unused */ int trellis; /** * Reduce fluctuations in qp (before curve compression). * - encoding: Set by user. * - decoding: unused */ float complexityblur; /** * in-loop deblocking filter alphac0 parameter * alpha is in the range -6...6 * - encoding: Set by user. * - decoding: unused */ int deblockalpha; /** * in-loop deblocking filter beta parameter * beta is in the range -6...6 * - encoding: Set by user. * - decoding: unused */ int deblockbeta; /** * macroblock subpartition sizes to consider - p8x8, p4x4, b8x8, i8x8, i4x4 * - encoding: Set by user. * - decoding: unused */ int partitions; #define X264_PART_I4X4 0x001 /* Analyze i4x4 */ #define X264_PART_I8X8 0x002 /* Analyze i8x8 (requires 8x8 transform) */ #define X264_PART_P8X8 0x010 /* Analyze p16x8, p8x16 and p8x8 */ #define X264_PART_P4X4 0x020 /* Analyze p8x4, p4x8, p4x4 */ #define X264_PART_B8X8 0x100 /* Analyze b16x8, b8x16 and b8x8 */ /** * direct MV prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto) * - encoding: Set by user. * - decoding: unused */ int directpred; /** * Audio cutoff bandwidth (0 means "automatic") * - encoding: Set by user. * - decoding: unused */ int cutoff; /** * Multiplied by qscale for each frame and added to scene_change_score. * - encoding: Set by user. * - decoding: unused */ int scenechange_factor; /** * * Note: Value depends upon the compare function used for fullpel ME. * - encoding: Set by user. * - decoding: unused */ int mv0_threshold; /** * Adjusts sensitivity of b_frame_strategy 1. * - encoding: Set by user. * - decoding: unused */ int b_sensitivity; /** * - encoding: Set by user. * - decoding: unused */ int compression_level; #define FF_COMPRESSION_DEFAULT -1 /** * Sets whether to use LPC mode - used by FLAC encoder. * - encoding: Set by user. * - decoding: unused */ int use_lpc; /** * LPC coefficient precision - used by FLAC encoder * - encoding: Set by user. * - decoding: unused */ int lpc_coeff_precision; /** * - encoding: Set by user. * - decoding: unused */ int min_prediction_order; /** * - encoding: Set by user. * - decoding: unused */ int max_prediction_order; /** * search method for selecting prediction order * - encoding: Set by user. * - decoding: unused */ int prediction_order_method; /** * - encoding: Set by user. * - decoding: unused */ int min_partition_order; /** * - encoding: Set by user. * - decoding: unused */ int max_partition_order; /** * GOP timecode frame start number, in non drop frame format * - encoding: Set by user. * - decoding: unused */ int64_t timecode_frame_start; #if LIBAVCODEC_VERSION_MAJOR < 53 /** * Decoder should decode to this many channels if it can (0 for default) * - encoding: unused * - decoding: Set by user. * @deprecated Deprecated in favor of request_channel_layout. */ int request_channels; #endif /** * Percentage of dynamic range compression to be applied by the decoder. * The default value is 1.0, corresponding to full compression. * - encoding: unused * - decoding: Set by user. */ float drc_scale; /** * opaque 64bit number (generally a PTS) that will be reordered and * output in AVFrame.reordered_opaque * - encoding: unused * - decoding: Set by user. */ int64_t reordered_opaque; /** * Bits per sample/pixel of internal libavcodec pixel/sample format. * This field is applicable only when sample_fmt is SAMPLE_FMT_S32. * - encoding: set by user. * - decoding: set by libavcodec. */ int bits_per_raw_sample; /** * Audio channel layout. * - encoding: set by user. * - decoding: set by libavcodec. */ int64_t channel_layout; /** * Request decoder to use this channel layout if it can (0 for default) * - encoding: unused * - decoding: Set by user. */ int64_t request_channel_layout; /** * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow. * - encoding: Set by user. * - decoding: unused. */ float rc_max_available_vbv_use; /** * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow. * - encoding: Set by user. * - decoding: unused. */ float rc_min_vbv_overflow_use; /** * Hardware accelerator in use * - encoding: unused. * - decoding: Set by libavcodec */ struct AVHWAccel *hwaccel; /** * For some codecs, the time base is closer to the field rate than the frame rate. * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration * if no telecine is used ... * * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. */ int ticks_per_frame; /** * Hardware accelerator context. * For some hardware accelerators, a global context needs to be * provided by the user. In that case, this holds display-dependent * data FFmpeg cannot instantiate itself. Please refer to the * FFmpeg HW accelerator documentation to know how to fill this * is. e.g. for VA API, this is a struct vaapi_context. * - encoding: unused * - decoding: Set by user */ void *hwaccel_context; /** * Chromaticity coordinates of the source primaries. * - encoding: Set by user * - decoding: Set by libavcodec */ enum AVColorPrimaries color_primaries; /** * Color Transfer Characteristic. * - encoding: Set by user * - decoding: Set by libavcodec */ enum AVColorTransferCharacteristic color_trc; /** * YUV colorspace type. * - encoding: Set by user * - decoding: Set by libavcodec */ enum AVColorSpace colorspace; /** * MPEG vs JPEG YUV range. * - encoding: Set by user * - decoding: Set by libavcodec */ enum AVColorRange color_range; /** * This defines the location of chroma samples. * - encoding: Set by user * - decoding: Set by libavcodec */ enum AVChromaLocation chroma_sample_location; /** * The codec may call this to execute several independent things. * It will return only after finishing all tasks. * The user may replace this with some multithreaded implementation, * the default implementation will execute the parts serially. * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. * @param c context passed also to func * @param count the number of things to execute * @param arg2 argument passed unchanged to func * @param ret return values of executed functions, must have space for "count" values. May be NULL. * @param func function that will be called count times, with jobnr from 0 to count-1. * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no * two instances of func executing at the same time will have the same threadnr. * @return always 0 currently, but code should handle a future improvement where when any call to func * returns < 0 no further calls to func may be done and < 0 is returned. * - encoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override. */ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); /** * explicit P-frame weighted prediction analysis method * 0: off * 1: fast blind weighting (one reference duplicate with -1 offset) * 2: smart weighting (full fade detection analysis) * - encoding: Set by user. * - decoding: unused */ int weighted_p_pred; /** * AQ mode * 0: Disabled * 1: Variance AQ (complexity mask) * 2: Auto-variance AQ (experimental) * - encoding: Set by user * - decoding: unused */ int aq_mode; /** * AQ strength * Reduces blocking and blurring in flat and textured areas. * - encoding: Set by user * - decoding: unused */ float aq_strength; /** * PSY RD * Strength of psychovisual optimization * - encoding: Set by user * - decoding: unused */ float psy_rd; /** * PSY trellis * Strength of psychovisual optimization * - encoding: Set by user * - decoding: unused */ float psy_trellis; /** * RC lookahead * Number of frames for frametype and ratecontrol lookahead * - encoding: Set by user * - decoding: unused */ int rc_lookahead; } AVCodecContext; /** * AVCodec. */ typedef struct AVCodec { /** * Name of the codec implementation. * The name is globally unique among encoders and among decoders (but an * encoder and a decoder can share the same name). * This is the primary way to find a codec from the user perspective. */ const char *name; enum AVMediaType type; enum CodecID id; int priv_data_size; int (*init)(AVCodecContext *); int (*encode)(AVCodecContext *, uint8_t *buf, int buf_size, void *data); int (*close)(AVCodecContext *); int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); /** * Codec capabilities. * see CODEC_CAP_* */ int capabilities; struct AVCodec *next; /** * Flush buffers. * Will be called when seeking */ void (*flush)(AVCodecContext *); const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} const enum PixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 /** * Descriptive name for the codec, meant to be more human readable than name. * You should use the NULL_IF_CONFIG_SMALL() macro to define it. */ const char *long_name; const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 const enum SampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 const int64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 } AVCodec; /** * AVHWAccel. */ typedef struct AVHWAccel { /** * Name of the hardware accelerated codec. * The name is globally unique among encoders and among decoders (but an * encoder and a decoder can share the same name). */ const char *name; /** * Type of codec implemented by the hardware accelerator. * * See AVMEDIA_TYPE_xxx */ enum AVMediaType type; /** * Codec implemented by the hardware accelerator. * * See CODEC_ID_xxx */ enum CodecID id; /** * Supported pixel format. * * Only hardware accelerated formats are supported here. */ enum PixelFormat pix_fmt; /** * Hardware accelerated codec capabilities. * see FF_HWACCEL_CODEC_CAP_* */ int capabilities; struct AVHWAccel *next; /** * Called at the beginning of each frame or field picture. * * Meaningful frame information (codec specific) is guaranteed to * be parsed at this point. This function is mandatory. * * Note that buf can be NULL along with buf_size set to 0. * Otherwise, this means the whole frame is available at this point. * * @param avctx the codec context * @param buf the frame data buffer base * @param buf_size the size of the frame in bytes * @return zero if successful, a negative value otherwise */ int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); /** * Callback for each slice. * * Meaningful slice information (codec specific) is guaranteed to * be parsed at this point. This function is mandatory. * * @param avctx the codec context * @param buf the slice data buffer base * @param buf_size the size of the slice in bytes * @return zero if successful, a negative value otherwise */ int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); /** * Called at the end of each frame or field picture. * * The whole picture is parsed at this point and can now be sent * to the hardware accelerator. This function is mandatory. * * @param avctx the codec context * @return zero if successful, a negative value otherwise */ int (*end_frame)(AVCodecContext *avctx); /** * Size of HW accelerator private data. * * Private data is allocated with av_mallocz() before * AVCodecContext.get_buffer() and deallocated after * AVCodecContext.release_buffer(). */ int priv_data_size; } AVHWAccel; /** * four components are given, that's all. * the last component is alpha */ typedef struct AVPicture { uint8_t *data[4]; int linesize[4]; ///< number of bytes per line } AVPicture; #if LIBAVCODEC_VERSION_MAJOR < 53 /** * AVPaletteControl * This structure defines a method for communicating palette changes * between and demuxer and a decoder. * * @deprecated Use AVPacket to send palette changes instead. * This is totally broken. */ #define AVPALETTE_SIZE 1024 #define AVPALETTE_COUNT 256 typedef struct AVPaletteControl { /* Demuxer sets this to 1 to indicate the palette has changed; * decoder resets to 0. */ int palette_changed; /* 4-byte ARGB palette entries, stored in native byte order; note that * the individual palette components should be on a 8-bit scale; if * the palette data comes from an IBM VGA native format, the component * data is probably 6 bits in size and needs to be scaled. */ unsigned int palette[AVPALETTE_COUNT]; } AVPaletteControl attribute_deprecated; #endif enum AVSubtitleType { SUBTITLE_NONE, SUBTITLE_BITMAP, ///< A bitmap, pict will be set /** * Plain text, the text field must be set by the decoder and is * authoritative. ass and pict fields may contain approximations. */ SUBTITLE_TEXT, /** * Formatted text, the ass field must be set by the decoder and is * authoritative. pict and text fields may contain approximations. */ SUBTITLE_ASS, }; typedef struct AVSubtitleRect { int x; ///< top left corner of pict, undefined when pict is not set int y; ///< top left corner of pict, undefined when pict is not set int w; ///< width of pict, undefined when pict is not set int h; ///< height of pict, undefined when pict is not set int nb_colors; ///< number of colors in pict, undefined when pict is not set /** * data+linesize for the bitmap of this subtitle. * can be set for text/ass as well once they where rendered */ AVPicture pict; enum AVSubtitleType type; char *text; ///< 0 terminated plain UTF-8 text /** * 0 terminated ASS/SSA compatible event line. * The pressentation of this is unaffected by the other values in this * struct. */ char *ass; } AVSubtitleRect; typedef struct AVSubtitle { uint16_t format; /* 0 = graphics */ uint32_t start_display_time; /* relative to packet pts, in ms */ uint32_t end_display_time; /* relative to packet pts, in ms */ unsigned num_rects; AVSubtitleRect **rects; int64_t pts; ///< Same as packet pts, in AV_TIME_BASE } AVSubtitle; /* packet functions */ /** * @deprecated use NULL instead */ attribute_deprecated void av_destruct_packet_nofree(AVPacket *pkt); /** * Default packet destructor. */ void av_destruct_packet(AVPacket *pkt); /** * Initialize optional fields of a packet with default values. * * @param pkt packet */ void av_init_packet(AVPacket *pkt); /** * Allocate the payload of a packet and initialize its fields with * default values. * * @param pkt packet * @param size wanted payload size * @return 0 if OK, AVERROR_xxx otherwise */ int av_new_packet(AVPacket *pkt, int size); /** * Reduce packet size, correctly zeroing padding * * @param pkt packet * @param size new size */ void av_shrink_packet(AVPacket *pkt, int size); /** * @warning This is a hack - the packet memory allocation stuff is broken. The * packet is allocated if it was not really allocated. */ int av_dup_packet(AVPacket *pkt); /** * Free a packet. * * @param pkt packet to free */ void av_free_packet(AVPacket *pkt); /* resample.c */ struct ReSampleContext; struct AVResampleContext; typedef struct ReSampleContext ReSampleContext; #if LIBAVCODEC_VERSION_MAJOR < 53 /** * @deprecated Use av_audio_resample_init() instead. */ attribute_deprecated ReSampleContext *audio_resample_init(int output_channels, int input_channels, int output_rate, int input_rate); #endif /** * Initializes audio resampling context * * @param output_channels number of output channels * @param input_channels number of input channels * @param output_rate output sample rate * @param input_rate input sample rate * @param sample_fmt_out requested output sample format * @param sample_fmt_in input sample format * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq * @param log2_phase_count log2 of the number of entries in the polyphase filterbank * @param linear If 1 then the used FIR filter will be linearly interpolated between the 2 closest, if 0 the closest will be used * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate * @return allocated ReSampleContext, NULL if error occured */ ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, int output_rate, int input_rate, enum SampleFormat sample_fmt_out, enum SampleFormat sample_fmt_in, int filter_length, int log2_phase_count, int linear, double cutoff); int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); void audio_resample_close(ReSampleContext *s); /** * Initializes an audio resampler. * Note, if either rate is not an integer then simply scale both rates up so they are. * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq * @param log2_phase_count log2 of the number of entries in the polyphase filterbank * @param linear If 1 then the used FIR filter will be linearly interpolated between the 2 closest, if 0 the closest will be used * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate */ struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); /** * resamples. * @param src an array of unconsumed samples * @param consumed the number of samples of src which have been consumed are returned here * @param src_size the number of unconsumed samples available * @param dst_size the amount of space in samples available in dst * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. * @return the number of samples written in dst or -1 if an error occurred */ int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); /** * Compensates samplerate/timestamp drift. The compensation is done by changing * the resampler parameters, so no audible clicks or similar distortions occur * @param compensation_distance distance in output samples over which the compensation should be performed * @param sample_delta number of output samples which should be output less * * example: av_resample_compensate(c, 10, 500) * here instead of 510 samples only 500 samples would be output * * note, due to rounding the actual compensation might be slightly different, * especially if the compensation_distance is large and the in_rate used during init is small */ void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); void av_resample_close(struct AVResampleContext *c); /** * Allocate memory for a picture. Call avpicture_free to free it. * * @param picture the picture to be filled in * @param pix_fmt the format of the picture * @param width the width of the picture * @param height the height of the picture * @return zero if successful, a negative value if not */ int avpicture_alloc(AVPicture *picture, enum PixelFormat pix_fmt, int width, int height); /** * Free a picture previously allocated by avpicture_alloc(). * * @param picture the AVPicture to be freed */ void avpicture_free(AVPicture *picture); /** * Fill in the AVPicture fields. * The fields of the given AVPicture are filled in by using the 'ptr' address * which points to the image data buffer. Depending on the specified picture * format, one or multiple image data pointers and line sizes will be set. * If a planar format is specified, several pointers will be set pointing to * the different picture planes and the line sizes of the different planes * will be stored in the lines_sizes array. * Call with ptr == NULL to get the required size for the ptr buffer. * * @param picture AVPicture whose fields are to be filled in * @param ptr Buffer which will contain or contains the actual image data * @param pix_fmt The format in which the picture data is stored. * @param width the width of the image in pixels * @param height the height of the image in pixels * @return size of the image data in bytes */ int avpicture_fill(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt, int width, int height); int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width, int height, unsigned char *dest, int dest_size); /** * Calculate the size in bytes that a picture of the given width and height * would occupy if stored in the given picture format. * Note that this returns the size of a compact representation as generated * by avpicture_layout, which can be smaller than the size required for e.g. * avpicture_fill. * * @param pix_fmt the given picture format * @param width the width of the image * @param height the height of the image * @return Image data size in bytes or -1 on error (e.g. too large dimensions). */ int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height); void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift); const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt); void avcodec_set_dimensions(AVCodecContext *s, int width, int height); #if LIBAVCODEC_VERSION_MAJOR < 53 /** * Returns the pixel format corresponding to the name name. * * If there is no pixel format with name name, then looks for a * pixel format with the name corresponding to the native endian * format of name. * For example in a little-endian system, first looks for "gray16", * then for "gray16le". * * Finally if no pixel format has been found, returns PIX_FMT_NONE. * * @deprecated Deprecated in favor of av_get_pix_fmt(). */ attribute_deprecated enum PixelFormat avcodec_get_pix_fmt(const char* name); #endif /** * Returns a value representing the fourCC code associated to the * pixel format pix_fmt, or 0 if no associated fourCC code can be * found. */ unsigned int avcodec_pix_fmt_to_codec_tag(enum PixelFormat pix_fmt); #define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ #define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ #define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ #define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ #define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ #define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ /** * Computes what kind of losses will occur when converting from one specific * pixel format to another. * When converting from one pixel format to another, information loss may occur. * For example, when converting from RGB24 to GRAY, the color information will * be lost. Similarly, other losses occur when converting from some formats to * other formats. These losses can involve loss of chroma, but also loss of * resolution, loss of color depth, loss due to the color space conversion, loss * of the alpha bits or loss due to color quantization. * avcodec_get_fix_fmt_loss() informs you about the various types of losses * which will occur when converting from one pixel format to another. * * @param[in] dst_pix_fmt destination pixel format * @param[in] src_pix_fmt source pixel format * @param[in] has_alpha Whether the source pixel format alpha channel is used. * @return Combination of flags informing you what kind of losses will occur. */ int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_pix_fmt, int has_alpha); /** * Finds the best pixel format to convert to given a certain source pixel * format. When converting from one pixel format to another, information loss * may occur. For example, when converting from RGB24 to GRAY, the color * information will be lost. Similarly, other losses occur when converting from * some formats to other formats. avcodec_find_best_pix_fmt() searches which of * the given pixel formats should be used to suffer the least amount of loss. * The pixel formats from which it chooses one, are determined by the * pix_fmt_mask parameter. * * @code * src_pix_fmt = PIX_FMT_YUV420P; * pix_fmt_mask = (1 << PIX_FMT_YUV422P) || (1 << PIX_FMT_RGB24); * dst_pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src_pix_fmt, alpha, &loss); * @endcode * * @param[in] pix_fmt_mask bitmask determining which pixel format to choose from * @param[in] src_pix_fmt source pixel format * @param[in] has_alpha Whether the source pixel format alpha channel is used. * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. * @return The best pixel format to convert to or -1 if none was found. */ enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); /** * Print in buf the string corresponding to the pixel format with * number pix_fmt, or an header if pix_fmt is negative. * * @param[in] buf the buffer where to write the string * @param[in] buf_size the size of buf * @param[in] pix_fmt the number of the pixel format to print the corresponding info string, or * a negative value to print the corresponding header. * Meaningful values for obtaining a pixel format info vary from 0 to PIX_FMT_NB -1. */ void avcodec_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt); #define FF_ALPHA_TRANSP 0x0001 /* image has some totally transparent pixels */ #define FF_ALPHA_SEMI_TRANSP 0x0002 /* image has some transparent pixels */ /** * Tell if an image really has transparent alpha values. * @return ored mask of FF_ALPHA_xxx constants */ int img_get_alpha_info(const AVPicture *src, enum PixelFormat pix_fmt, int width, int height); /* deinterlace a picture */ /* deinterlace - if not supported return -1 */ int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, enum PixelFormat pix_fmt, int width, int height); /* external high level API */ /** * If c is NULL, returns the first registered codec, * if c is non-NULL, returns the next registered codec after c, * or NULL if c is the last one. */ AVCodec *av_codec_next(AVCodec *c); /** * Returns the LIBAVCODEC_VERSION_INT constant. */ unsigned avcodec_version(void); /** * Returns the libavcodec build-time configuration. */ const char *avcodec_configuration(void); /** * Returns the libavcodec license. */ const char *avcodec_license(void); /** * Initializes libavcodec. * * @warning This function must be called before any other libavcodec * function. */ void avcodec_init(void); #if LIBAVCODEC_VERSION_MAJOR < 53 /** * @deprecated Deprecated in favor of avcodec_register(). */ attribute_deprecated void register_avcodec(AVCodec *codec); #endif /** * Register the codec codec and initialize libavcodec. * * @see avcodec_init() */ void avcodec_register(AVCodec *codec); /** * Finds a registered encoder with a matching codec ID. * * @param id CodecID of the requested encoder * @return An encoder if one was found, NULL otherwise. */ AVCodec *avcodec_find_encoder(enum CodecID id); /** * Finds a registered encoder with the specified name. * * @param name name of the requested encoder * @return An encoder if one was found, NULL otherwise. */ AVCodec *avcodec_find_encoder_by_name(const char *name); /** * Finds a registered decoder with a matching codec ID. * * @param id CodecID of the requested decoder * @return A decoder if one was found, NULL otherwise. */ AVCodec *avcodec_find_decoder(enum CodecID id); /** * Finds a registered decoder with the specified name. * * @param name name of the requested decoder * @return A decoder if one was found, NULL otherwise. */ AVCodec *avcodec_find_decoder_by_name(const char *name); void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); /** * Sets the fields of the given AVCodecContext to default values. * * @param s The AVCodecContext of which the fields should be set to default values. */ void avcodec_get_context_defaults(AVCodecContext *s); /** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! * we WILL change its arguments and name a few times! */ void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType); /** * Allocates an AVCodecContext and sets its fields to default values. The * resulting struct can be deallocated by simply calling av_free(). * * @return An AVCodecContext filled with default values or NULL on failure. * @see avcodec_get_context_defaults */ AVCodecContext *avcodec_alloc_context(void); /** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! * we WILL change its arguments and name a few times! */ AVCodecContext *avcodec_alloc_context2(enum AVMediaType); /** * Copy the settings of the source AVCodecContext into the destination * AVCodecContext. The resulting destination codec context will be * unopened, i.e. you are required to call avcodec_open() before you * can use this AVCodecContext to decode/encode video/audio data. * * @param dest target codec context, should be initialized with * avcodec_alloc_context(), but otherwise uninitialized * @param src source codec context * @return AVERROR() on error (e.g. memory allocation error), 0 on success */ int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); /** * Sets the fields of the given AVFrame to default values. * * @param pic The AVFrame of which the fields should be set to default values. */ void avcodec_get_frame_defaults(AVFrame *pic); /** * Allocates an AVFrame and sets its fields to default values. The resulting * struct can be deallocated by simply calling av_free(). * * @return An AVFrame filled with default values or NULL on failure. * @see avcodec_get_frame_defaults */ AVFrame *avcodec_alloc_frame(void); int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); /** * Returns the amount of padding in pixels which the get_buffer callback must * provide around the edge of the image for codecs which do not have the * CODEC_FLAG_EMU_EDGE flag. * * @return Required padding in pixels. */ unsigned avcodec_get_edge_width(void); /** * Modifies width and height values so that they will result in a memory * buffer that is acceptable for the codec if you do not use any horizontal * padding. * * May only be used if a codec with CODEC_CAP_DR1 has been opened. * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased * according to avcodec_get_edge_width() before. */ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); /** * Modifies width and height values so that they will result in a memory * buffer that is acceptable for the codec if you also ensure that all * line sizes are a multiple of the respective linesize_align[i]. * * May only be used if a codec with CODEC_CAP_DR1 has been opened. * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased * according to avcodec_get_edge_width() before. */ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]); /** * Checks if the given dimension of a picture is valid, meaning that all * bytes of the picture can be addressed with a signed int. * * @param[in] w Width of the picture. * @param[in] h Height of the picture. * @return Zero if valid, a negative value if invalid. */ int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h); enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt); int avcodec_thread_init(AVCodecContext *s, int thread_count); void avcodec_thread_free(AVCodecContext *s); int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); //FIXME func typedef /** * Initializes the AVCodecContext to use the given AVCodec. Prior to using this * function the context has to be allocated. * * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for * retrieving a codec. * * @warning This function is not thread safe! * * @code * avcodec_register_all(); * codec = avcodec_find_decoder(CODEC_ID_H264); * if (!codec) * exit(1); * * context = avcodec_alloc_context(); * * if (avcodec_open(context, codec) < 0) * exit(1); * @endcode * * @param avctx The context which will be set up to use the given codec. * @param codec The codec to use within the context. * @return zero on success, a negative value on error * @see avcodec_alloc_context, avcodec_find_decoder, avcodec_find_encoder */ int avcodec_open(AVCodecContext *avctx, AVCodec *codec); #if LIBAVCODEC_VERSION_MAJOR < 53 /** * Decodes an audio frame from buf into samples. * Wrapper function which calls avcodec_decode_audio3. * * @deprecated Use avcodec_decode_audio3 instead. * @param avctx the codec context * @param[out] samples the output buffer * @param[in,out] frame_size_ptr the output buffer size in bytes * @param[in] buf the input buffer * @param[in] buf_size the input buffer size in bytes * @return On error a negative value is returned, otherwise the number of bytes * used or zero if no frame could be decompressed. */ attribute_deprecated int avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples, int *frame_size_ptr, const uint8_t *buf, int buf_size); #endif /** * Decodes the audio frame of size avpkt->size from avpkt->data into samples. * Some decoders may support multiple frames in a single AVPacket, such * decoders would then just decode the first frame. In this case, * avcodec_decode_audio3 has to be called again with an AVPacket that contains * the remaining data in order to decode the second frame etc. * If no frame * could be outputted, frame_size_ptr is zero. Otherwise, it is the * decompressed frame size in bytes. * * @warning You must set frame_size_ptr to the allocated size of the * output buffer before calling avcodec_decode_audio3(). * * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than * the actual read bytes because some optimized bitstream readers read 32 or 64 * bits at once and could read over the end. * * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that * no overreading happens for damaged MPEG streams. * * @note You might have to align the input buffer avpkt->data and output buffer * samples. The alignment requirements depend on the CPU: On some CPUs it isn't * necessary at all, on others it won't work at all if not aligned and on others * it will work but it will have an impact on performance. * * In practice, avpkt->data should have 4 byte alignment at minimum and * samples should be 16 byte aligned unless the CPU doesn't need it * (AltiVec and SSE do). * * @param avctx the codec context * @param[out] samples the output buffer, sample type in avctx->sample_fmt * @param[in,out] frame_size_ptr the output buffer size in bytes * @param[in] avpkt The input AVPacket containing the input buffer. * You can create such packet with av_init_packet() and by then setting * data and size, some decoders might in addition need other fields. * All decoders are designed to use the least fields possible though. * @return On error a negative value is returned, otherwise the number of bytes * used or zero if no frame data was decompressed (used) from the input AVPacket. */ int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, int *frame_size_ptr, AVPacket *avpkt); #if LIBAVCODEC_VERSION_MAJOR < 53 /** * Decodes a video frame from buf into picture. * Wrapper function which calls avcodec_decode_video2. * * @deprecated Use avcodec_decode_video2 instead. * @param avctx the codec context * @param[out] picture The AVFrame in which the decoded video frame will be stored. * @param[in] buf the input buffer * @param[in] buf_size the size of the input buffer in bytes * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. * @return On error a negative value is returned, otherwise the number of bytes * used or zero if no frame could be decompressed. */ attribute_deprecated int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const uint8_t *buf, int buf_size); #endif /** * Decodes the video frame of size avpkt->size from avpkt->data into picture. * Some decoders may support multiple frames in a single AVPacket, such * decoders would then just decode the first frame. * * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than * the actual read bytes because some optimized bitstream readers read 32 or 64 * bits at once and could read over the end. * * @warning The end of the input buffer buf should be set to 0 to ensure that * no overreading happens for damaged MPEG streams. * * @note You might have to align the input buffer avpkt->data. * The alignment requirements depend on the CPU: on some CPUs it isn't * necessary at all, on others it won't work at all if not aligned and on others * it will work but it will have an impact on performance. * * In practice, avpkt->data should have 4 byte alignment at minimum. * * @note Some codecs have a delay between input and output, these need to be * fed with avpkt->data=NULL, avpkt->size=0 at the end to return the remaining frames. * * @param avctx the codec context * @param[out] picture The AVFrame in which the decoded video frame will be stored. * Use avcodec_alloc_frame to get an AVFrame, the codec will * allocate memory for the actual bitmap. * @param[in] avpkt The input AVpacket containing the input buffer. * You can create such packet with av_init_packet() and by then setting * data and size, some decoders might in addition need other fields like * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least * fields possible. * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. * @return On error a negative value is returned, otherwise the number of bytes * used or zero if no frame could be decompressed. */ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt); #if LIBAVCODEC_VERSION_MAJOR < 53 /* Decode a subtitle message. Return -1 if error, otherwise return the * number of bytes used. If no subtitle could be decompressed, * got_sub_ptr is zero. Otherwise, the subtitle is stored in *sub. */ attribute_deprecated int avcodec_decode_subtitle(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const uint8_t *buf, int buf_size); #endif /** * Decodes a subtitle message. * Returns a negative value on error, otherwise returns the number of bytes used. * If no subtitle could be decompressed, got_sub_ptr is zero. * Otherwise, the subtitle is stored in *sub. * * @param avctx the codec context * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored. * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. * @param[in] avpkt The input AVPacket containing the input buffer. */ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt); int avcodec_parse_frame(AVCodecContext *avctx, uint8_t **pdata, int *data_size_ptr, uint8_t *buf, int buf_size); /** * Encodes an audio frame from samples into buf. * * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large. * However, for PCM audio the user will know how much space is needed * because it depends on the value passed in buf_size as described * below. In that case a lower value can be used. * * @param avctx the codec context * @param[out] buf the output buffer * @param[in] buf_size the output buffer size * @param[in] samples the input buffer containing the samples * The number of samples read from this buffer is frame_size*channels, * both of which are defined in avctx. * For PCM audio the number of samples read from samples is equal to * buf_size * input_sample_size / output_sample_size. * @return On error a negative value is returned, on success zero or the number * of bytes used to encode the data read from the input buffer. */ int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size, const short *samples); /** * Encodes a video frame from pict into buf. * The input picture should be * stored using a specific format, namely avctx.pix_fmt. * * @param avctx the codec context * @param[out] buf the output buffer for the bitstream of encoded frame * @param[in] buf_size the size of the output buffer in bytes * @param[in] pict the input picture to encode * @return On error a negative value is returned, on success zero or the number * of bytes used from the output buffer. */ int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVFrame *pict); int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub); int avcodec_close(AVCodecContext *avctx); /** * Register all the codecs, parsers and bitstream filters which were enabled at * configuration time. If you do not call this function you can select exactly * which formats you want to support, by using the individual registration * functions. * * @see avcodec_register * @see av_register_codec_parser * @see av_register_bitstream_filter */ void avcodec_register_all(void); /** * Flush buffers, should be called when seeking or when switching to a different stream. */ void avcodec_flush_buffers(AVCodecContext *avctx); void avcodec_default_free_buffers(AVCodecContext *s); /* misc useful functions */ /** * Returns a single letter to describe the given picture type pict_type. * * @param[in] pict_type the picture type * @return A single character representing the picture type. */ char av_get_pict_type_char(int pict_type); /** * Returns codec bits per sample. * * @param[in] codec_id the codec * @return Number of bits per sample or zero if unknown for the given codec. */ int av_get_bits_per_sample(enum CodecID codec_id); /** * Returns sample format bits per sample. * * @param[in] sample_fmt the sample format * @return Number of bits per sample or zero if unknown for the given sample format. */ int av_get_bits_per_sample_format(enum SampleFormat sample_fmt); /* frame parsing */ typedef struct AVCodecParserContext { void *priv_data; struct AVCodecParser *parser; int64_t frame_offset; /* offset of the current frame */ int64_t cur_offset; /* current offset (incremented by each av_parser_parse()) */ int64_t next_frame_offset; /* offset of the next frame */ /* video info */ int pict_type; /* XXX: Put it back in AVCodecContext. */ /** * This field is used for proper frame duration computation in lavf. * It signals, how much longer the frame duration of the current frame * is compared to normal frame duration. * * frame_duration = (1 + repeat_pict) * time_base * * It is used by codecs like H.264 to display telecined material. */ int repeat_pict; /* XXX: Put it back in AVCodecContext. */ int64_t pts; /* pts of the current frame */ int64_t dts; /* dts of the current frame */ /* private data */ int64_t last_pts; int64_t last_dts; int fetch_timestamp; #define AV_PARSER_PTS_NB 4 int cur_frame_start_index; int64_t cur_frame_offset[AV_PARSER_PTS_NB]; int64_t cur_frame_pts[AV_PARSER_PTS_NB]; int64_t cur_frame_dts[AV_PARSER_PTS_NB]; int flags; #define PARSER_FLAG_COMPLETE_FRAMES 0x0001 int64_t offset; ///< byte offset from starting packet start int64_t cur_frame_end[AV_PARSER_PTS_NB]; /*! * Set by parser to 1 for key frames and 0 for non-key frames. * It is initialized to -1, so if the parser doesn't set this flag, * old-style fallback using FF_I_TYPE picture type as key frames * will be used. */ int key_frame; /** * Time difference in stream time base units from the pts of this * packet to the point at which the output from the decoder has converged * independent from the availability of previous frames. That is, the * frames are virtually identical no matter if decoding started from * the very first frame or from this keyframe. * Is AV_NOPTS_VALUE if unknown. * This field is not the display duration of the current frame. * * The purpose of this field is to allow seeking in streams that have no * keyframes in the conventional sense. It corresponds to the * recovery point SEI in H.264 and match_time_delta in NUT. It is also * essential for some types of subtitle streams to ensure that all * subtitles are correctly displayed after seeking. */ int64_t convergence_duration; // Timestamp generation support: /** * Synchronization point for start of timestamp generation. * * Set to >0 for sync point, 0 for no sync point and <0 for undefined * (default). * * For example, this corresponds to presence of H.264 buffering period * SEI message. */ int dts_sync_point; /** * Offset of the current timestamp against last timestamp sync point in * units of AVCodecContext.time_base. * * Set to INT_MIN when dts_sync_point unused. Otherwise, it must * contain a valid timestamp offset. * * Note that the timestamp of sync point has usually a nonzero * dts_ref_dts_delta, which refers to the previous sync point. Offset of * the next frame after timestamp sync point will be usually 1. * * For example, this corresponds to H.264 cpb_removal_delay. */ int dts_ref_dts_delta; /** * Presentation delay of current frame in units of AVCodecContext.time_base. * * Set to INT_MIN when dts_sync_point unused. Otherwise, it must * contain valid non-negative timestamp delta (presentation time of a frame * must not lie in the past). * * This delay represents the difference between decoding and presentation * time of the frame. * * For example, this corresponds to H.264 dpb_output_delay. */ int pts_dts_delta; /** * Position of the packet in file. * * Analogous to cur_frame_pts/dts */ int64_t cur_frame_pos[AV_PARSER_PTS_NB]; /** * Byte position of currently parsed frame in stream. */ int64_t pos; /** * Previous frame byte position. */ int64_t last_pos; } AVCodecParserContext; typedef struct AVCodecParser { int codec_ids[5]; /* several codec IDs are permitted */ int priv_data_size; int (*parser_init)(AVCodecParserContext *s); int (*parser_parse)(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size); void (*parser_close)(AVCodecParserContext *s); int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); struct AVCodecParser *next; } AVCodecParser; AVCodecParser *av_parser_next(AVCodecParser *c); void av_register_codec_parser(AVCodecParser *parser); AVCodecParserContext *av_parser_init(int codec_id); #if LIBAVCODEC_VERSION_MAJOR < 53 attribute_deprecated int av_parser_parse(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts); #endif /** * Parse a packet. * * @param s parser context. * @param avctx codec context. * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. * @param buf input buffer. * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). * @param pts input presentation timestamp. * @param dts input decoding timestamp. * @param pos input byte position in stream. * @return the number of bytes of the input bitstream used. * * Example: * @code * while(in_len){ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, * in_data, in_len, * pts, dts, pos); * in_data += len; * in_len -= len; * * if(size) * decode_frame(data, size); * } * @endcode */ int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts, int64_t pos); int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe); void av_parser_close(AVCodecParserContext *s); typedef struct AVBitStreamFilterContext { void *priv_data; struct AVBitStreamFilter *filter; AVCodecParserContext *parser; struct AVBitStreamFilterContext *next; } AVBitStreamFilterContext; typedef struct AVBitStreamFilter { const char *name; int priv_data_size; int (*filter)(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe); void (*close)(AVBitStreamFilterContext *bsfc); struct AVBitStreamFilter *next; } AVBitStreamFilter; void av_register_bitstream_filter(AVBitStreamFilter *bsf); AVBitStreamFilterContext *av_bitstream_filter_init(const char *name); int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe); void av_bitstream_filter_close(AVBitStreamFilterContext *bsf); AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f); /* memory */ /** * Reallocates the given block if it is not large enough, otherwise it * does nothing. * * @see av_realloc */ void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size); /** * Allocates a buffer, reusing the given one if large enough. * * Contrary to av_fast_realloc the current buffer contents might not be * preserved and on error the old buffer is freed, thus no special * handling to avoid memleaks is necessary. * * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer * @param size size of the buffer *ptr points to * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and * *size 0 if an error occurred. */ void av_fast_malloc(void *ptr, unsigned int *size, unsigned int min_size); /** * Copy image 'src' to 'dst'. */ void av_picture_copy(AVPicture *dst, const AVPicture *src, enum PixelFormat pix_fmt, int width, int height); /** * Crop image top and left side. */ int av_picture_crop(AVPicture *dst, const AVPicture *src, enum PixelFormat pix_fmt, int top_band, int left_band); /** * Pad image. */ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum PixelFormat pix_fmt, int padtop, int padbottom, int padleft, int padright, int *color); /** * Encodes extradata length to a buffer. Used by xiph codecs. * * @param s buffer to write to; must be at least (v/255+1) bytes long * @param v size of extradata in bytes * @return number of bytes written to the buffer. */ unsigned int av_xiphlacing(unsigned char *s, unsigned int v); /** * Parses str and put in width_ptr and height_ptr the detected values. * * @return 0 in case of a successful parsing, a negative value otherwise * @param[in] str the string to parse: it has to be a string in the format * <width>x<height> or a valid video frame size abbreviation. * @param[in,out] width_ptr pointer to the variable which will contain the detected * frame width value * @param[in,out] height_ptr pointer to the variable which will contain the detected * frame height value */ int av_parse_video_frame_size(int *width_ptr, int *height_ptr, const char *str); /** * Parses str and put in frame_rate the detected values. * * @return 0 in case of a successful parsing, a negative value otherwise * @param[in] str the string to parse: it has to be a string in the format * <frame_rate_num>/<frame_rate_den>, a float number or a valid video rate abbreviation * @param[in,out] frame_rate pointer to the AVRational which will contain the detected * frame rate */ int av_parse_video_frame_rate(AVRational *frame_rate, const char *str); /** * Logs a generic warning message about a missing feature. This function is * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) * only, and would normally not be used by applications. * @param[in] avc a pointer to an arbitrary struct of which the first field is * a pointer to an AVClass struct * @param[in] feature string containing the name of the missing feature * @param[in] want_sample indicates if samples are wanted which exhibit this feature. * If want_sample is non-zero, additional verbage will be added to the log * message which tells the user how to report samples to the development * mailing list. */ void av_log_missing_feature(void *avc, const char *feature, int want_sample); /** * Logs a generic warning message asking for a sample. This function is * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) * only, and would normally not be used by applications. * @param[in] avc a pointer to an arbitrary struct of which the first field is * a pointer to an AVClass struct * @param[in] msg string containing an optional message, or NULL if no message */ void av_log_ask_for_sample(void *avc, const char *msg); /** * Registers the hardware accelerator hwaccel. */ void av_register_hwaccel(AVHWAccel *hwaccel); /** * If hwaccel is NULL, returns the first registered hardware accelerator, * if hwaccel is non-NULL, returns the next registered hardware accelerator * after hwaccel, or NULL if hwaccel is the last one. */ AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel); /** * Lock operation used by lockmgr */ enum AVLockOp { AV_LOCK_CREATE, ///< Create a mutex AV_LOCK_OBTAIN, ///< Lock the mutex AV_LOCK_RELEASE, ///< Unlock the mutex AV_LOCK_DESTROY, ///< Free mutex resources }; /** * Register a user provided lock manager supporting the operations * specified by AVLockOp. mutex points to a (void *) where the * lockmgr should store/get a pointer to a user allocated mutex. It's * NULL upon AV_LOCK_CREATE and != NULL for all other ops. * * @param cb User defined callback. Note: FFmpeg may invoke calls to this * callback during the call to av_lockmgr_register(). * Thus, the application must be prepared to handle that. * If cb is set to NULL the lockmgr will be unregistered. * Also note that during unregistration the previously registered * lockmgr callback may also be invoked. */ int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); #endif /* AVCODEC_AVCODEC_H */
123linslouis-android-video-cutter
jni/libavcodec/avcodec.h
C
asf20
130,822
/* * Interface to libgsm for gsm encoding/decoding * Copyright (c) 2005 Alban Bedel <albeu@free.fr> * Copyright (c) 2006, 2007 Michel Bardiaux <mbardiaux@mediaxim.be> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Interface to libgsm for gsm encoding/decoding */ // The idiosyncrasies of GSM-in-WAV are explained at http://kbs.cs.tu-berlin.de/~jutta/toast.html #include "avcodec.h" #include <gsm/gsm.h> // gsm.h misses some essential constants #define GSM_BLOCK_SIZE 33 #define GSM_MS_BLOCK_SIZE 65 #define GSM_FRAME_SIZE 160 static av_cold int libgsm_init(AVCodecContext *avctx) { if (avctx->channels > 1) { av_log(avctx, AV_LOG_ERROR, "Mono required for GSM, got %d channels\n", avctx->channels); return -1; } if(avctx->codec->decode){ if(!avctx->channels) avctx->channels= 1; if(!avctx->sample_rate) avctx->sample_rate= 8000; avctx->sample_fmt = SAMPLE_FMT_S16; }else{ if (avctx->sample_rate != 8000) { av_log(avctx, AV_LOG_ERROR, "Sample rate 8000Hz required for GSM, got %dHz\n", avctx->sample_rate); if(avctx->strict_std_compliance > FF_COMPLIANCE_INOFFICIAL) return -1; } if (avctx->bit_rate != 13000 /* Official */ && avctx->bit_rate != 13200 /* Very common */ && avctx->bit_rate != 0 /* Unknown; a.o. mov does not set bitrate when decoding */ ) { av_log(avctx, AV_LOG_ERROR, "Bitrate 13000bps required for GSM, got %dbps\n", avctx->bit_rate); if(avctx->strict_std_compliance > FF_COMPLIANCE_INOFFICIAL) return -1; } } avctx->priv_data = gsm_create(); switch(avctx->codec_id) { case CODEC_ID_GSM: avctx->frame_size = GSM_FRAME_SIZE; avctx->block_align = GSM_BLOCK_SIZE; break; case CODEC_ID_GSM_MS: { int one = 1; gsm_option(avctx->priv_data, GSM_OPT_WAV49, &one); avctx->frame_size = 2*GSM_FRAME_SIZE; avctx->block_align = GSM_MS_BLOCK_SIZE; } } avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; } static av_cold int libgsm_close(AVCodecContext *avctx) { av_freep(&avctx->coded_frame); gsm_destroy(avctx->priv_data); avctx->priv_data = NULL; return 0; } static int libgsm_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { // we need a full block if(buf_size < avctx->block_align) return 0; switch(avctx->codec_id) { case CODEC_ID_GSM: gsm_encode(avctx->priv_data,data,frame); break; case CODEC_ID_GSM_MS: gsm_encode(avctx->priv_data,data,frame); gsm_encode(avctx->priv_data,((short*)data)+GSM_FRAME_SIZE,frame+32); } return avctx->block_align; } AVCodec libgsm_encoder = { "libgsm", AVMEDIA_TYPE_AUDIO, CODEC_ID_GSM, 0, libgsm_init, libgsm_encode_frame, libgsm_close, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"), }; AVCodec libgsm_ms_encoder = { "libgsm_ms", AVMEDIA_TYPE_AUDIO, CODEC_ID_GSM_MS, 0, libgsm_init, libgsm_encode_frame, libgsm_close, .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), }; static int libgsm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; *data_size = 0; /* In case of error */ if(buf_size < avctx->block_align) return -1; switch(avctx->codec_id) { case CODEC_ID_GSM: if(gsm_decode(avctx->priv_data,buf,data)) return -1; *data_size = GSM_FRAME_SIZE*sizeof(int16_t); break; case CODEC_ID_GSM_MS: if(gsm_decode(avctx->priv_data,buf,data) || gsm_decode(avctx->priv_data,buf+33,((int16_t*)data)+GSM_FRAME_SIZE)) return -1; *data_size = GSM_FRAME_SIZE*sizeof(int16_t)*2; } return avctx->block_align; } AVCodec libgsm_decoder = { "libgsm", AVMEDIA_TYPE_AUDIO, CODEC_ID_GSM, 0, libgsm_init, NULL, libgsm_close, libgsm_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"), }; AVCodec libgsm_ms_decoder = { "libgsm_ms", AVMEDIA_TYPE_AUDIO, CODEC_ID_GSM_MS, 0, libgsm_init, NULL, libgsm_close, libgsm_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), };
123linslouis-android-video-cutter
jni/libavcodec/libgsm.c
C
asf20
5,483
/* * Sun Rasterfile (.sun/.ras/im{1,8,24}/.sunras) image decoder * Copyright (c) 2007, 2008 Ivo van Poorten * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "avcodec.h" #define RT_OLD 0 #define RT_STANDARD 1 #define RT_BYTE_ENCODED 2 #define RT_FORMAT_RGB 3 #define RT_FORMAT_TIFF 4 #define RT_FORMAT_IFF 5 typedef struct SUNRASTContext { AVFrame picture; } SUNRASTContext; static av_cold int sunrast_init(AVCodecContext *avctx) { SUNRASTContext *s = avctx->priv_data; avcodec_get_frame_defaults(&s->picture); avctx->coded_frame= &s->picture; return 0; } static int sunrast_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; SUNRASTContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; unsigned int w, h, depth, type, maptype, maplength, stride, x, y, len, alen; uint8_t *ptr; const uint8_t *bufstart = buf; if (AV_RB32(buf) != 0x59a66a95) { av_log(avctx, AV_LOG_ERROR, "this is not sunras encoded data\n"); return -1; } w = AV_RB32(buf+4); h = AV_RB32(buf+8); depth = AV_RB32(buf+12); type = AV_RB32(buf+20); maptype = AV_RB32(buf+24); maplength = AV_RB32(buf+28); if (type == RT_FORMAT_TIFF || type == RT_FORMAT_IFF) { av_log(avctx, AV_LOG_ERROR, "unsupported (compression) type\n"); return -1; } if (type > RT_FORMAT_IFF) { av_log(avctx, AV_LOG_ERROR, "invalid (compression) type\n"); return -1; } if (maptype & ~1) { av_log(avctx, AV_LOG_ERROR, "invalid colormap type\n"); return -1; } buf += 32; switch (depth) { case 1: avctx->pix_fmt = PIX_FMT_MONOWHITE; break; case 8: avctx->pix_fmt = PIX_FMT_PAL8; break; case 24: avctx->pix_fmt = (type == RT_FORMAT_RGB) ? PIX_FMT_RGB24 : PIX_FMT_BGR24; break; default: av_log(avctx, AV_LOG_ERROR, "invalid depth\n"); return -1; } if (p->data[0]) avctx->release_buffer(avctx, p); if (avcodec_check_dimensions(avctx, w, h)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = FF_I_TYPE; if (depth != 8 && maplength) { av_log(avctx, AV_LOG_WARNING, "useless colormap found or file is corrupted, trying to recover\n"); } else if (depth == 8) { unsigned int len = maplength / 3; if (!maplength) { av_log(avctx, AV_LOG_ERROR, "colormap expected\n"); return -1; } if (maplength % 3 || maplength > 768) { av_log(avctx, AV_LOG_WARNING, "invalid colormap length\n"); return -1; } ptr = p->data[1]; for (x=0; x<len; x++, ptr+=4) *(uint32_t *)ptr = (buf[x]<<16) + (buf[len+x]<<8) + buf[len+len+x]; } buf += maplength; ptr = p->data[0]; stride = p->linesize[0]; /* scanlines are aligned on 16 bit boundaries */ len = (depth * w + 7) >> 3; alen = len + (len&1); if (type == RT_BYTE_ENCODED) { int value, run; uint8_t *end = ptr + h*stride; x = 0; while (ptr != end) { run = 1; if ((value = *buf++) == 0x80) { run = *buf++ + 1; if (run != 1) value = *buf++; } while (run--) { if (x < len) ptr[x] = value; if (++x >= alen) { x = 0; ptr += stride; if (ptr == end) break; } } } } else { for (y=0; y<h; y++) { memcpy(ptr, buf, len); ptr += stride; buf += alen; } } *picture = s->picture; *data_size = sizeof(AVFrame); return buf - bufstart; } static av_cold int sunrast_end(AVCodecContext *avctx) { SUNRASTContext *s = avctx->priv_data; if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); return 0; } AVCodec sunrast_decoder = { "sunrast", AVMEDIA_TYPE_VIDEO, CODEC_ID_SUNRAST, sizeof(SUNRASTContext), sunrast_init, NULL, sunrast_end, sunrast_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("Sun Rasterfile image"), };
123linslouis-android-video-cutter
jni/libavcodec/sunrast.c
C
asf20
5,498
/* * DSP utils * Copyright (c) 2000, 2001, 2002 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * DSP utils. * note, many functions in here may use MMX which trashes the FPU state, it is * absolutely necessary to call emms_c() between dsp & float/double code */ #ifndef AVCODEC_DSPUTIL_H #define AVCODEC_DSPUTIL_H #include "libavutil/intreadwrite.h" #include "avcodec.h" //#define DEBUG /* dct code */ typedef short DCTELEM; void fdct_ifast (DCTELEM *data); void fdct_ifast248 (DCTELEM *data); void ff_jpeg_fdct_islow (DCTELEM *data); void ff_fdct248_islow (DCTELEM *data); void j_rev_dct (DCTELEM *data); void j_rev_dct4 (DCTELEM *data); void j_rev_dct2 (DCTELEM *data); void j_rev_dct1 (DCTELEM *data); void ff_wmv2_idct_c(DCTELEM *data); void ff_fdct_mmx(DCTELEM *block); void ff_fdct_mmx2(DCTELEM *block); void ff_fdct_sse2(DCTELEM *block); void ff_h264_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride); void ff_h264_idct_add_c(uint8_t *dst, DCTELEM *block, int stride); void ff_h264_idct8_dc_add_c(uint8_t *dst, DCTELEM *block, int stride); void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride); void ff_h264_lowres_idct_add_c(uint8_t *dst, int stride, DCTELEM *block); void ff_h264_lowres_idct_put_c(uint8_t *dst, int stride, DCTELEM *block); void ff_h264_idct_add16_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]); void ff_h264_idct_add16intra_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]); void ff_h264_idct8_add4_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]); void ff_h264_idct_add8_c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]); void ff_vector_fmul_window_c(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len); void ff_float_to_int16_c(int16_t *dst, const float *src, long len); void ff_float_to_int16_interleave_c(int16_t *dst, const float **src, long len, int channels); /* encoding scans */ extern const uint8_t ff_alternate_horizontal_scan[64]; extern const uint8_t ff_alternate_vertical_scan[64]; extern const uint8_t ff_zigzag_direct[64]; extern const uint8_t ff_zigzag248_direct[64]; /* pixel operations */ #define MAX_NEG_CROP 1024 /* temporary */ extern uint32_t ff_squareTbl[512]; extern uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP]; /* VP3 DSP functions */ void ff_vp3_idct_c(DCTELEM *block/* align 16*/); void ff_vp3_idct_put_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/); void ff_vp3_idct_add_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/); void ff_vp3_idct_dc_add_c(uint8_t *dest/*align 8*/, int line_size, const DCTELEM *block/*align 16*/); void ff_vp3_v_loop_filter_c(uint8_t *src, int stride, int *bounding_values); void ff_vp3_h_loop_filter_c(uint8_t *src, int stride, int *bounding_values); /* VP6 DSP functions */ void ff_vp6_filter_diag4_c(uint8_t *dst, uint8_t *src, int stride, const int16_t *h_weights, const int16_t *v_weights); /* Bink functions */ void ff_bink_idct_c (DCTELEM *block); void ff_bink_idct_add_c(uint8_t *dest, int linesize, DCTELEM *block); void ff_bink_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block); /* CAVS functions */ void ff_put_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride); void ff_avg_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride); void ff_put_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride); void ff_avg_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride); /* VC1 functions */ void ff_put_vc1_mspel_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int rnd); void ff_avg_vc1_mspel_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int rnd); /* EA functions */ void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block); /* 1/2^n downscaling functions from imgconvert.c */ void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height); void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height); void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height); void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height); void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height); /* minimum alignment rules ;) If you notice errors in the align stuff, need more alignment for some ASM code for some CPU or need to use a function with less aligned data then send a mail to the ffmpeg-devel mailing list, ... !warning These alignments might not match reality, (missing attribute((align)) stuff somewhere possible). I (Michael) did not check them, these are just the alignments which I think could be reached easily ... !future video codecs might need functions with less strict alignment */ /* void get_pixels_c(DCTELEM *block, const uint8_t *pixels, int line_size); void diff_pixels_c(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride); void put_pixels_clamped_c(const DCTELEM *block, uint8_t *pixels, int line_size); void add_pixels_clamped_c(const DCTELEM *block, uint8_t *pixels, int line_size); void clear_blocks_c(DCTELEM *blocks); */ /* add and put pixel (decoding) */ // blocksizes for op_pixels_func are 8x4,8x8 16x8 16x16 //h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4 typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h); typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h); typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride); typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y); typedef void (*op_fill_func)(uint8_t *block/*align width (8 or 16)*/, uint8_t value, int line_size, int h); #define DEF_OLD_QPEL(name)\ void ff_put_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\ void ff_put_no_rnd_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride);\ void ff_avg_ ## name (uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride); DEF_OLD_QPEL(qpel16_mc11_old_c) DEF_OLD_QPEL(qpel16_mc31_old_c) DEF_OLD_QPEL(qpel16_mc12_old_c) DEF_OLD_QPEL(qpel16_mc32_old_c) DEF_OLD_QPEL(qpel16_mc13_old_c) DEF_OLD_QPEL(qpel16_mc33_old_c) DEF_OLD_QPEL(qpel8_mc11_old_c) DEF_OLD_QPEL(qpel8_mc31_old_c) DEF_OLD_QPEL(qpel8_mc12_old_c) DEF_OLD_QPEL(qpel8_mc32_old_c) DEF_OLD_QPEL(qpel8_mc13_old_c) DEF_OLD_QPEL(qpel8_mc33_old_c) #define CALL_2X_PIXELS(a, b, n)\ static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\ b(block , pixels , line_size, h);\ b(block+n, pixels+n, line_size, h);\ } /* motion estimation */ // h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2 // although currently h<4 is not used as functions with width <8 are neither used nor implemented typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/; /** * Scantable. */ typedef struct ScanTable{ const uint8_t *scantable; uint8_t permutated[64]; uint8_t raster_end[64]; #if ARCH_PPC /** Used by dct_quantize_altivec to find last-non-zero */ DECLARE_ALIGNED(16, uint8_t, inverse)[64]; #endif } ScanTable; void ff_init_scantable(uint8_t *, ScanTable *st, const uint8_t *src_scantable); void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h); /** * DSPContext. */ typedef struct DSPContext { /* pixel ops : interface with DCT */ void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size); void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride); void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); void (*put_signed_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); void (*put_pixels_nonclamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); void (*add_pixels_clamped)(const DCTELEM *block/*align 16*/, uint8_t *pixels/*align 8*/, int line_size); void (*add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size); void (*add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size); int (*sum_abs_dctelem)(DCTELEM *block/*align 16*/); /** * translational global motion compensation. */ void (*gmc1)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x16, int y16, int rounder); /** * global motion compensation. */ void (*gmc )(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height); void (*clear_block)(DCTELEM *block/*align 16*/); void (*clear_blocks)(DCTELEM *blocks/*align 16*/); int (*pix_sum)(uint8_t * pix, int line_size); int (*pix_norm1)(uint8_t * pix, int line_size); // 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4 me_cmp_func sad[6]; /* identical to pix_absAxA except additional void * */ me_cmp_func sse[6]; me_cmp_func hadamard8_diff[6]; me_cmp_func dct_sad[6]; me_cmp_func quant_psnr[6]; me_cmp_func bit[6]; me_cmp_func rd[6]; me_cmp_func vsad[6]; me_cmp_func vsse[6]; me_cmp_func nsse[6]; me_cmp_func w53[6]; me_cmp_func w97[6]; me_cmp_func dct_max[6]; me_cmp_func dct264_sad[6]; me_cmp_func me_pre_cmp[6]; me_cmp_func me_cmp[6]; me_cmp_func me_sub_cmp[6]; me_cmp_func mb_cmp[6]; me_cmp_func ildct_cmp[6]; //only width 16 used me_cmp_func frame_skip_cmp[6]; //only width 8 used int (*ssd_int8_vs_int16)(const int8_t *pix1, const int16_t *pix2, int size); /** * Halfpel motion compensation with rounding (a+b+1)>>1. * this is an array[4][4] of motion compensation functions for 4 * horizontal blocksizes (8,16) and the 4 halfpel positions<br> * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] * @param block destination where the result is stored * @param pixels source * @param line_size number of bytes in a horizontal line of block * @param h height */ op_pixels_func put_pixels_tab[4][4]; /** * Halfpel motion compensation with rounding (a+b+1)>>1. * This is an array[4][4] of motion compensation functions for 4 * horizontal blocksizes (8,16) and the 4 halfpel positions<br> * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] * @param block destination into which the result is averaged (a+b+1)>>1 * @param pixels source * @param line_size number of bytes in a horizontal line of block * @param h height */ op_pixels_func avg_pixels_tab[4][4]; /** * Halfpel motion compensation with no rounding (a+b)>>1. * this is an array[2][4] of motion compensation functions for 2 * horizontal blocksizes (8,16) and the 4 halfpel positions<br> * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] * @param block destination where the result is stored * @param pixels source * @param line_size number of bytes in a horizontal line of block * @param h height */ op_pixels_func put_no_rnd_pixels_tab[4][4]; /** * Halfpel motion compensation with no rounding (a+b)>>1. * this is an array[2][4] of motion compensation functions for 2 * horizontal blocksizes (8,16) and the 4 halfpel positions<br> * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] * @param block destination into which the result is averaged (a+b)>>1 * @param pixels source * @param line_size number of bytes in a horizontal line of block * @param h height */ op_pixels_func avg_no_rnd_pixels_tab[4][4]; void (*put_no_rnd_pixels_l2[2])(uint8_t *block/*align width (8 or 16)*/, const uint8_t *a/*align 1*/, const uint8_t *b/*align 1*/, int line_size, int h); /** * Thirdpel motion compensation with rounding (a+b+1)>>1. * this is an array[12] of motion compensation functions for the 9 thirdpe * positions<br> * *pixels_tab[ xthirdpel + 4*ythirdpel ] * @param block destination where the result is stored * @param pixels source * @param line_size number of bytes in a horizontal line of block * @param h height */ tpel_mc_func put_tpel_pixels_tab[11]; //FIXME individual func ptr per width? tpel_mc_func avg_tpel_pixels_tab[11]; //FIXME individual func ptr per width? qpel_mc_func put_qpel_pixels_tab[2][16]; qpel_mc_func avg_qpel_pixels_tab[2][16]; qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]; qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16]; qpel_mc_func put_mspel_pixels_tab[8]; /** * h264 Chroma MC */ h264_chroma_mc_func put_h264_chroma_pixels_tab[3]; h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]; /* This is really one func used in VC-1 decoding */ h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]; h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]; qpel_mc_func put_h264_qpel_pixels_tab[4][16]; qpel_mc_func avg_h264_qpel_pixels_tab[4][16]; qpel_mc_func put_2tap_qpel_pixels_tab[4][16]; qpel_mc_func avg_2tap_qpel_pixels_tab[4][16]; /* AVS specific */ qpel_mc_func put_cavs_qpel_pixels_tab[2][16]; qpel_mc_func avg_cavs_qpel_pixels_tab[2][16]; void (*cavs_filter_lv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2); void (*cavs_filter_lh)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2); void (*cavs_filter_cv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2); void (*cavs_filter_ch)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2); void (*cavs_idct8_add)(uint8_t *dst, DCTELEM *block, int stride); me_cmp_func pix_abs[2][4]; /* huffyuv specific */ void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w); void (*add_bytes_l2)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 16*/, int w); void (*diff_bytes)(uint8_t *dst/*align 16*/, uint8_t *src1/*align 16*/, uint8_t *src2/*align 1*/,int w); /** * subtract huffyuv's variant of median prediction * note, this might read from src1[-1], src2[-1] */ void (*sub_hfyu_median_prediction)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top); void (*add_hfyu_median_prediction)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top); int (*add_hfyu_left_prediction)(uint8_t *dst, const uint8_t *src, int w, int left); void (*add_hfyu_left_prediction_bgr32)(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha); /* this might write to dst[w] */ void (*add_png_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp); void (*bswap_buf)(uint32_t *dst, const uint32_t *src, int w); void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale); void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale); void (*h261_loop_filter)(uint8_t *src, int stride); void (*x8_v_loop_filter)(uint8_t *src, int stride, int qscale); void (*x8_h_loop_filter)(uint8_t *src, int stride, int qscale); void (*vp3_idct_dc_add)(uint8_t *dest/*align 8*/, int line_size, const DCTELEM *block/*align 16*/); void (*vp3_v_loop_filter)(uint8_t *src, int stride, int *bounding_values); void (*vp3_h_loop_filter)(uint8_t *src, int stride, int *bounding_values); void (*vp6_filter_diag4)(uint8_t *dst, uint8_t *src, int stride, const int16_t *h_weights,const int16_t *v_weights); /* assume len is a multiple of 4, and arrays are 16-byte aligned */ void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize); void (*ac3_downmix)(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len); /* no alignment needed */ void (*lpc_compute_autocorr)(const int32_t *data, int len, int lag, double *autoc); /* assume len is a multiple of 8, and arrays are 16-byte aligned */ void (*vector_fmul)(float *dst, const float *src, int len); void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len); /* assume len is a multiple of 8, and src arrays are 16-byte aligned */ void (*vector_fmul_add)(float *dst, const float *src0, const float *src1, const float *src2, int len); /* assume len is a multiple of 4, and arrays are 16-byte aligned */ void (*vector_fmul_window)(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len); /* assume len is a multiple of 8, and arrays are 16-byte aligned */ void (*int32_to_float_fmul_scalar)(float *dst, const int *src, float mul, int len); void (*vector_clipf)(float *dst /* align 16 */, const float *src /* align 16 */, float min, float max, int len /* align 16 */); /** * Multiply a vector of floats by a scalar float. Source and * destination vectors must overlap exactly or not at all. * @param dst result vector, 16-byte aligned * @param src input vector, 16-byte aligned * @param mul scalar value * @param len length of vector, multiple of 4 */ void (*vector_fmul_scalar)(float *dst, const float *src, float mul, int len); /** * Multiply a vector of floats by concatenated short vectors of * floats and by a scalar float. Source and destination vectors * must overlap exactly or not at all. * [0]: short vectors of length 2, 8-byte aligned * [1]: short vectors of length 4, 16-byte aligned * @param dst output vector, 16-byte aligned * @param src input vector, 16-byte aligned * @param sv array of pointers to short vectors * @param mul scalar value * @param len number of elements in src and dst, multiple of 4 */ void (*vector_fmul_sv_scalar[2])(float *dst, const float *src, const float **sv, float mul, int len); /** * Multiply short vectors of floats by a scalar float, store * concatenated result. * [0]: short vectors of length 2, 8-byte aligned * [1]: short vectors of length 4, 16-byte aligned * @param dst output vector, 16-byte aligned * @param sv array of pointers to short vectors * @param mul scalar value * @param len number of output elements, multiple of 4 */ void (*sv_fmul_scalar[2])(float *dst, const float **sv, float mul, int len); /** * Calculate the scalar product of two vectors of floats. * @param v1 first vector, 16-byte aligned * @param v2 second vector, 16-byte aligned * @param len length of vectors, multiple of 4 */ float (*scalarproduct_float)(const float *v1, const float *v2, int len); /** * Calculate the sum and difference of two vectors of floats. * @param v1 first input vector, sum output, 16-byte aligned * @param v2 second input vector, difference output, 16-byte aligned * @param len length of vectors, multiple of 4 */ void (*butterflies_float)(float *restrict v1, float *restrict v2, int len); /* C version: convert floats from the range [384.0,386.0] to ints in [-32768,32767] * simd versions: convert floats from [-32768.0,32767.0] without rescaling and arrays are 16byte aligned */ void (*float_to_int16)(int16_t *dst, const float *src, long len); void (*float_to_int16_interleave)(int16_t *dst, const float **src, long len, int channels); /* (I)DCT */ void (*fdct)(DCTELEM *block/* align 16*/); void (*fdct248)(DCTELEM *block/* align 16*/); /* IDCT really*/ void (*idct)(DCTELEM *block/* align 16*/); /** * block -> idct -> clip to unsigned 8 bit -> dest. * (-1392, 0, 0, ...) -> idct -> (-174, -174, ...) -> put -> (0, 0, ...) * @param line_size size in bytes of a horizontal line of dest */ void (*idct_put)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/); /** * block -> idct -> add dest -> clip to unsigned 8 bit -> dest. * @param line_size size in bytes of a horizontal line of dest */ void (*idct_add)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/); /** * idct input permutation. * several optimized IDCTs need a permutated input (relative to the normal order of the reference * IDCT) * this permutation must be performed before the idct_put/add, note, normally this can be merged * with the zigzag/alternate scan<br> * an example to avoid confusion: * - (->decode coeffs -> zigzag reorder -> dequant -> reference idct ->...) * - (x -> referece dct -> reference idct -> x) * - (x -> referece dct -> simple_mmx_perm = idct_permutation -> simple_idct_mmx -> x) * - (->decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant -> simple_idct_mmx ->...) */ uint8_t idct_permutation[64]; int idct_permutation_type; #define FF_NO_IDCT_PERM 1 #define FF_LIBMPEG2_IDCT_PERM 2 #define FF_SIMPLE_IDCT_PERM 3 #define FF_TRANSPOSE_IDCT_PERM 4 #define FF_PARTTRANS_IDCT_PERM 5 #define FF_SSE2_IDCT_PERM 6 int (*try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale); void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale); #define BASIS_SHIFT 16 #define RECON_SHIFT 6 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w); #define EDGE_WIDTH 16 void (*prefetch)(void *mem, int stride, int h); void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height); /* mlp/truehd functions */ void (*mlp_filter_channel)(int32_t *state, const int32_t *coeff, int firorder, int iirorder, unsigned int filter_shift, int32_t mask, int blocksize, int32_t *sample_buffer); /* vc1 functions */ void (*vc1_inv_trans_8x8)(DCTELEM *b); void (*vc1_inv_trans_8x4)(uint8_t *dest, int line_size, DCTELEM *block); void (*vc1_inv_trans_4x8)(uint8_t *dest, int line_size, DCTELEM *block); void (*vc1_inv_trans_4x4)(uint8_t *dest, int line_size, DCTELEM *block); void (*vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, DCTELEM *block); void (*vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, DCTELEM *block); void (*vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, DCTELEM *block); void (*vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, DCTELEM *block); void (*vc1_v_overlap)(uint8_t* src, int stride); void (*vc1_h_overlap)(uint8_t* src, int stride); void (*vc1_v_loop_filter4)(uint8_t *src, int stride, int pq); void (*vc1_h_loop_filter4)(uint8_t *src, int stride, int pq); void (*vc1_v_loop_filter8)(uint8_t *src, int stride, int pq); void (*vc1_h_loop_filter8)(uint8_t *src, int stride, int pq); void (*vc1_v_loop_filter16)(uint8_t *src, int stride, int pq); void (*vc1_h_loop_filter16)(uint8_t *src, int stride, int pq); /* put 8x8 block with bicubic interpolation and quarterpel precision * last argument is actually round value instead of height */ op_pixels_func put_vc1_mspel_pixels_tab[16]; op_pixels_func avg_vc1_mspel_pixels_tab[16]; /* intrax8 functions */ void (*x8_spatial_compensation[12])(uint8_t *src , uint8_t *dst, int linesize); void (*x8_setup_spatial_compensation)(uint8_t *src, uint8_t *dst, int linesize, int * range, int * sum, int edges); /** * Calculate scalar product of two vectors. * @param len length of vectors, should be multiple of 16 * @param shift number of bits to discard from product */ int32_t (*scalarproduct_int16)(int16_t *v1, int16_t *v2/*align 16*/, int len, int shift); /* ape functions */ /** * Calculate scalar product of v1 and v2, * and v1[i] += v3[i] * mul * @param len length of vectors, should be multiple of 16 */ int32_t (*scalarproduct_and_madd_int16)(int16_t *v1/*align 16*/, int16_t *v2, int16_t *v3, int len, int mul); /* rv30 functions */ qpel_mc_func put_rv30_tpel_pixels_tab[4][16]; qpel_mc_func avg_rv30_tpel_pixels_tab[4][16]; /* rv40 functions */ qpel_mc_func put_rv40_qpel_pixels_tab[4][16]; qpel_mc_func avg_rv40_qpel_pixels_tab[4][16]; h264_chroma_mc_func put_rv40_chroma_pixels_tab[3]; h264_chroma_mc_func avg_rv40_chroma_pixels_tab[3]; /* bink functions */ op_fill_func fill_block_tab[2]; void (*scale_block)(const uint8_t src[64]/*align 8*/, uint8_t *dst/*align 8*/, int linesize); } DSPContext; void dsputil_static_init(void); void dsputil_init(DSPContext* p, AVCodecContext *avctx); int ff_check_alignment(void); /** * permute block according to permuatation. * @param last last non zero element in scantable order */ void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last); void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type); #define BYTE_VEC32(c) ((c)*0x01010101UL) static inline uint32_t rnd_avg32(uint32_t a, uint32_t b) { return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1); } static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b) { return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1); } static inline int get_penalty_factor(int lambda, int lambda2, int type){ switch(type&0xFF){ default: case FF_CMP_SAD: return lambda>>FF_LAMBDA_SHIFT; case FF_CMP_DCT: return (3*lambda)>>(FF_LAMBDA_SHIFT+1); case FF_CMP_W53: return (4*lambda)>>(FF_LAMBDA_SHIFT); case FF_CMP_W97: return (2*lambda)>>(FF_LAMBDA_SHIFT); case FF_CMP_SATD: case FF_CMP_DCT264: return (2*lambda)>>FF_LAMBDA_SHIFT; case FF_CMP_RD: case FF_CMP_PSNR: case FF_CMP_SSE: case FF_CMP_NSSE: return lambda2>>FF_LAMBDA_SHIFT; case FF_CMP_BIT: return 1; } } /** * Empty mmx state. * this must be called between any dsp function and float/double code. * for example sin(); dsp->idct_put(); emms_c(); cos() */ #define emms_c() /* should be defined by architectures supporting one or more MultiMedia extension */ int mm_support(void); extern int mm_flags; void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx); void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx); void dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx); void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx); void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx); void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx); void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx); void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx); void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx); void ff_dsputil_init_dwt(DSPContext *c); void ff_cavsdsp_init(DSPContext* c, AVCodecContext *avctx); void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx); void ff_rv40dsp_init(DSPContext* c, AVCodecContext *avctx); void ff_vc1dsp_init(DSPContext* c, AVCodecContext *avctx); void ff_intrax8dsp_init(DSPContext* c, AVCodecContext *avctx); void ff_mlp_init(DSPContext* c, AVCodecContext *avctx); void ff_mlp_init_x86(DSPContext* c, AVCodecContext *avctx); #if HAVE_MMX #undef emms_c static inline void emms(void) { __asm__ volatile ("emms;":::"memory"); } #define emms_c() \ {\ if (mm_flags & FF_MM_MMX)\ emms();\ } #elif ARCH_ARM #if HAVE_NEON # define STRIDE_ALIGN 16 #endif #elif ARCH_PPC #define STRIDE_ALIGN 16 #elif HAVE_MMI #define STRIDE_ALIGN 16 #else #define mm_flags 0 #define mm_support() 0 #endif #ifndef STRIDE_ALIGN # define STRIDE_ALIGN 8 #endif #define LOCAL_ALIGNED(a, t, v, s, ...) \ uint8_t la_##v[sizeof(t s __VA_ARGS__) + (a)]; \ t (*v) __VA_ARGS__ = (void *)FFALIGN((uintptr_t)la_##v, a) #if HAVE_LOCAL_ALIGNED_8 # define LOCAL_ALIGNED_8(t, v, s, ...) DECLARE_ALIGNED(8, t, v) s __VA_ARGS__ #else # define LOCAL_ALIGNED_8(t, v, s, ...) LOCAL_ALIGNED(8, t, v, s, __VA_ARGS__) #endif #if HAVE_LOCAL_ALIGNED_16 # define LOCAL_ALIGNED_16(t, v, s, ...) DECLARE_ALIGNED(16, t, v) s __VA_ARGS__ #else # define LOCAL_ALIGNED_16(t, v, s, ...) LOCAL_ALIGNED(16, t, v, s, __VA_ARGS__) #endif /* PSNR */ void get_psnr(uint8_t *orig_image[3], uint8_t *coded_image[3], int orig_linesize[3], int coded_linesize, AVCodecContext *avctx); #define WRAPPER8_16(name8, name16)\ static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\ return name8(s, dst , src , stride, h)\ +name8(s, dst+8 , src+8 , stride, h);\ } #define WRAPPER8_16_SQ(name8, name16)\ static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\ int score=0;\ score +=name8(s, dst , src , stride, 8);\ score +=name8(s, dst+8 , src+8 , stride, 8);\ if(h==16){\ dst += 8*stride;\ src += 8*stride;\ score +=name8(s, dst , src , stride, 8);\ score +=name8(s, dst+8 , src+8 , stride, 8);\ }\ return score;\ } static inline void copy_block2(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h) { int i; for(i=0; i<h; i++) { AV_WN16(dst , AV_RN16(src )); dst+=dstStride; src+=srcStride; } } static inline void copy_block4(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h) { int i; for(i=0; i<h; i++) { AV_WN32(dst , AV_RN32(src )); dst+=dstStride; src+=srcStride; } } static inline void copy_block8(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h) { int i; for(i=0; i<h; i++) { AV_WN32(dst , AV_RN32(src )); AV_WN32(dst+4 , AV_RN32(src+4 )); dst+=dstStride; src+=srcStride; } } static inline void copy_block9(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h) { int i; for(i=0; i<h; i++) { AV_WN32(dst , AV_RN32(src )); AV_WN32(dst+4 , AV_RN32(src+4 )); dst[8]= src[8]; dst+=dstStride; src+=srcStride; } } static inline void copy_block16(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h) { int i; for(i=0; i<h; i++) { AV_WN32(dst , AV_RN32(src )); AV_WN32(dst+4 , AV_RN32(src+4 )); AV_WN32(dst+8 , AV_RN32(src+8 )); AV_WN32(dst+12, AV_RN32(src+12)); dst+=dstStride; src+=srcStride; } } static inline void copy_block17(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h) { int i; for(i=0; i<h; i++) { AV_WN32(dst , AV_RN32(src )); AV_WN32(dst+4 , AV_RN32(src+4 )); AV_WN32(dst+8 , AV_RN32(src+8 )); AV_WN32(dst+12, AV_RN32(src+12)); dst[16]= src[16]; dst+=dstStride; src+=srcStride; } } #endif /* AVCODEC_DSPUTIL_H */
123linslouis-android-video-cutter
jni/libavcodec/dsputil.h
C
asf20
32,984
/* * BMP image format encoder * Copyright (c) 2006, 2007 Michel Bardiaux * Copyright (c) 2009 Daniel Verkamp <daniel at drv.nu> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "avcodec.h" #include "bytestream.h" #include "bmp.h" static const uint32_t monoblack_pal[] = { 0x000000, 0xFFFFFF }; static const uint32_t rgb565_masks[] = { 0xF800, 0x07E0, 0x001F }; static av_cold int bmp_encode_init(AVCodecContext *avctx){ BMPContext *s = avctx->priv_data; avcodec_get_frame_defaults((AVFrame*)&s->picture); avctx->coded_frame = (AVFrame*)&s->picture; return 0; } static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ BMPContext *s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p= (AVFrame*)&s->picture; int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize; const uint32_t *pal = NULL; int pad_bytes_per_row, bit_count, pal_entries = 0, compression = BMP_RGB; uint8_t *ptr; unsigned char* buf0 = buf; *p = *pict; p->pict_type= FF_I_TYPE; p->key_frame= 1; switch (avctx->pix_fmt) { case PIX_FMT_BGR24: bit_count = 24; break; case PIX_FMT_RGB555: bit_count = 16; break; case PIX_FMT_RGB565: bit_count = 16; compression = BMP_BITFIELDS; pal = rgb565_masks; // abuse pal to hold color masks pal_entries = 3; break; case PIX_FMT_RGB8: case PIX_FMT_BGR8: case PIX_FMT_RGB4_BYTE: case PIX_FMT_BGR4_BYTE: case PIX_FMT_GRAY8: case PIX_FMT_PAL8: bit_count = 8; pal = (uint32_t *)p->data[1]; break; case PIX_FMT_MONOBLACK: bit_count = 1; pal = monoblack_pal; break; default: return -1; } if (pal && !pal_entries) pal_entries = 1 << bit_count; n_bytes_per_row = ((int64_t)avctx->width * (int64_t)bit_count + 7LL) >> 3LL; pad_bytes_per_row = (4 - n_bytes_per_row) & 3; n_bytes_image = avctx->height * (n_bytes_per_row + pad_bytes_per_row); // STRUCTURE.field refer to the MSVC documentation for BITMAPFILEHEADER // and related pages. #define SIZE_BITMAPFILEHEADER 14 #define SIZE_BITMAPINFOHEADER 40 hsize = SIZE_BITMAPFILEHEADER + SIZE_BITMAPINFOHEADER + (pal_entries << 2); n_bytes = n_bytes_image + hsize; if(n_bytes>buf_size) { av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", n_bytes, buf_size); return -1; } bytestream_put_byte(&buf, 'B'); // BITMAPFILEHEADER.bfType bytestream_put_byte(&buf, 'M'); // do. bytestream_put_le32(&buf, n_bytes); // BITMAPFILEHEADER.bfSize bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved1 bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved2 bytestream_put_le32(&buf, hsize); // BITMAPFILEHEADER.bfOffBits bytestream_put_le32(&buf, SIZE_BITMAPINFOHEADER); // BITMAPINFOHEADER.biSize bytestream_put_le32(&buf, avctx->width); // BITMAPINFOHEADER.biWidth bytestream_put_le32(&buf, avctx->height); // BITMAPINFOHEADER.biHeight bytestream_put_le16(&buf, 1); // BITMAPINFOHEADER.biPlanes bytestream_put_le16(&buf, bit_count); // BITMAPINFOHEADER.biBitCount bytestream_put_le32(&buf, compression); // BITMAPINFOHEADER.biCompression bytestream_put_le32(&buf, n_bytes_image); // BITMAPINFOHEADER.biSizeImage bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biXPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biYPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrUsed bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrImportant for (i = 0; i < pal_entries; i++) bytestream_put_le32(&buf, pal[i] & 0xFFFFFF); // BMP files are bottom-to-top so we start from the end... ptr = p->data[0] + (avctx->height - 1) * p->linesize[0]; buf = buf0 + hsize; for(i = 0; i < avctx->height; i++) { if (bit_count == 16) { const uint16_t *src = (const uint16_t *) ptr; uint16_t *dst = (uint16_t *) buf; for(n = 0; n < avctx->width; n++) AV_WL16(dst + n, src[n]); } else { memcpy(buf, ptr, n_bytes_per_row); } buf += n_bytes_per_row; memset(buf, 0, pad_bytes_per_row); buf += pad_bytes_per_row; ptr -= p->linesize[0]; // ... and go back } return n_bytes; } AVCodec bmp_encoder = { "bmp", AVMEDIA_TYPE_VIDEO, CODEC_ID_BMP, sizeof(BMPContext), bmp_encode_init, bmp_encode_frame, NULL, //encode_end, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_RGB555, PIX_FMT_RGB565, PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_MONOBLACK, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("BMP image"), };
123linslouis-android-video-cutter
jni/libavcodec/bmpenc.c
C
asf20
5,878
/* * Copyright (C) 2007 Marco Gerards <marco@gnu.org> * Copyright (C) 2009 David Conrad * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_DIRAC_H #define AVCODEC_DIRAC_H /** * @file * Interfaces to Dirac Decoder/Encoder * @author Marco Gerards <marco@gnu.org> */ #include "avcodec.h" #include "get_bits.h" typedef struct { unsigned width; unsigned height; uint8_t chroma_format; ///< 0: 444 1: 422 2: 420 uint8_t interlaced; uint8_t top_field_first; uint8_t frame_rate_index; ///< index into dirac_frame_rate[] uint8_t aspect_ratio_index; ///< index into dirac_aspect_ratio[] uint16_t clean_width; uint16_t clean_height; uint16_t clean_left_offset; uint16_t clean_right_offset; uint8_t pixel_range_index; ///< index into dirac_pixel_range_presets[] uint8_t color_spec_index; ///< index into dirac_color_spec_presets[] } dirac_source_params; int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb, dirac_source_params *source); #endif /* AVCODEC_DIRAC_H */
123linslouis-android-video-cutter
jni/libavcodec/dirac.h
C
asf20
1,841
/* * copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_SYNTH_FILTER_H #define AVCODEC_SYNTH_FILTER_H #include "fft.h" typedef struct SynthFilterContext { void (*synth_filter_float)(FFTContext *imdct, float *synth_buf_ptr, int *synth_buf_offset, float synth_buf2[32], const float window[512], float out[32], const float in[32], float scale, float bias); } SynthFilterContext; void ff_synth_filter_init(SynthFilterContext *c); void ff_synth_filter_init_arm(SynthFilterContext *c); #endif /* AVCODEC_SYNTH_FILTER_H */
123linslouis-android-video-cutter
jni/libavcodec/synth_filter.h
C
asf20
1,436
/* * Floating point AAN IDCT * Copyright (c) 2008 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_FAANIDCT_H #define AVCODEC_FAANIDCT_H #include <stdint.h> #include "dsputil.h" void ff_faanidct(DCTELEM block[64]); void ff_faanidct_add(uint8_t *dest, int line_size, DCTELEM block[64]); void ff_faanidct_put(uint8_t *dest, int line_size, DCTELEM block[64]); #endif /* AVCODEC_FAANIDCT_H */
123linslouis-android-video-cutter
jni/libavcodec/faanidct.h
C
asf20
1,164
/* * Apple MJPEG-B decoder * Copyright (c) 2002 Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Apple MJPEG-B decoder. */ #include "avcodec.h" #include "mjpeg.h" #include "mjpegdec.h" static uint32_t read_offs(AVCodecContext *avctx, GetBitContext *gb, uint32_t size, const char *err_msg){ uint32_t offs= get_bits_long(gb, 32); if(offs >= size){ av_log(avctx, AV_LOG_WARNING, err_msg, offs, size); return 0; } return offs; } static int mjpegb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MJpegDecodeContext *s = avctx->priv_data; const uint8_t *buf_end, *buf_ptr; AVFrame *picture = data; GetBitContext hgb; /* for the header */ uint32_t dqt_offs, dht_offs, sof_offs, sos_offs, second_field_offs; uint32_t field_size, sod_offs; buf_ptr = buf; buf_end = buf + buf_size; read_header: /* reset on every SOI */ s->restart_interval = 0; s->restart_count = 0; s->mjpb_skiptosod = 0; init_get_bits(&hgb, buf_ptr, /*buf_size*/(buf_end - buf_ptr)*8); skip_bits(&hgb, 32); /* reserved zeros */ if (get_bits_long(&hgb, 32) != MKBETAG('m','j','p','g')) { av_log(avctx, AV_LOG_WARNING, "not mjpeg-b (bad fourcc)\n"); return 0; } field_size = get_bits_long(&hgb, 32); /* field size */ av_log(avctx, AV_LOG_DEBUG, "field size: 0x%x\n", field_size); skip_bits(&hgb, 32); /* padded field size */ second_field_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "second_field_offs is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "second field offs: 0x%x\n", second_field_offs); dqt_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dqt is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "dqt offs: 0x%x\n", dqt_offs); if (dqt_offs) { init_get_bits(&s->gb, buf_ptr+dqt_offs, (buf_end - (buf_ptr+dqt_offs))*8); s->start_code = DQT; ff_mjpeg_decode_dqt(s); } dht_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dht is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "dht offs: 0x%x\n", dht_offs); if (dht_offs) { init_get_bits(&s->gb, buf_ptr+dht_offs, (buf_end - (buf_ptr+dht_offs))*8); s->start_code = DHT; ff_mjpeg_decode_dht(s); } sof_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sof is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "sof offs: 0x%x\n", sof_offs); if (sof_offs) { init_get_bits(&s->gb, buf_ptr+sof_offs, (buf_end - (buf_ptr+sof_offs))*8); s->start_code = SOF0; if (ff_mjpeg_decode_sof(s) < 0) return -1; } sos_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sos is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "sos offs: 0x%x\n", sos_offs); sod_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sof is %d and size is %d\n"); av_log(avctx, AV_LOG_DEBUG, "sod offs: 0x%x\n", sod_offs); if (sos_offs) { // init_get_bits(&s->gb, buf+sos_offs, (buf_end - (buf+sos_offs))*8); init_get_bits(&s->gb, buf_ptr+sos_offs, field_size*8); s->mjpb_skiptosod = (sod_offs - sos_offs - show_bits(&s->gb, 16)); s->start_code = SOS; ff_mjpeg_decode_sos(s); } if (s->interlaced) { s->bottom_field ^= 1; /* if not bottom field, do not output image yet */ if (s->bottom_field != s->interlace_polarity && second_field_offs) { buf_ptr = buf + second_field_offs; second_field_offs = 0; goto read_header; } } //XXX FIXME factorize, this looks very similar to the EOI code *picture= s->picture; *data_size = sizeof(AVFrame); if(!s->lossless){ picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]); picture->qstride= 0; picture->qscale_table= s->qscale_table; memset(picture->qscale_table, picture->quality, (s->width+15)/16); if(avctx->debug & FF_DEBUG_QP) av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality); picture->quality*= FF_QP2LAMBDA; } return buf_ptr - buf; } AVCodec mjpegb_decoder = { "mjpegb", AVMEDIA_TYPE_VIDEO, CODEC_ID_MJPEGB, sizeof(MJpegDecodeContext), ff_mjpeg_decode_init, NULL, ff_mjpeg_decode_end, mjpegb_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("Apple MJPEG-B"), };
123linslouis-android-video-cutter
jni/libavcodec/mjpegbdec.c
C
asf20
5,337
/* * adaptive and fixed codebook vector operations for ACELP-based codecs * * Copyright (c) 2008 Vladimir Voroshilov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_ACELP_VECTORS_H #define AVCODEC_ACELP_VECTORS_H #include <stdint.h> /** Sparse representation for the algebraic codebook (fixed) vector */ typedef struct { int n; int x[10]; float y[10]; int no_repeat_mask; int pitch_lag; float pitch_fac; } AMRFixed; /** * Track|Pulse| Positions * ------------------------------------------------------------------------- * 1 | 0 | 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75 * ------------------------------------------------------------------------- * 2 | 1 | 1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61, 66, 71, 76 * ------------------------------------------------------------------------- * 3 | 2 | 2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 77 * ------------------------------------------------------------------------- * * Table contains only first the pulse indexes. * * Used in G.729 @8k, G.729 @4.4k, AMR @7.95k, AMR @7.40k */ extern const uint8_t ff_fc_4pulses_8bits_tracks_13[16]; /** * Track|Pulse| Positions * ------------------------------------------------------------------------- * 4 | 3 | 3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58, 63, 68, 73, 78 * | | 4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59, 64, 69, 74, 79 * ------------------------------------------------------------------------- * * @remark Track in the table should be read top-to-bottom, left-to-right. * * Used in G.729 @8k, G.729 @4.4k, AMR @7.95k, AMR @7.40k */ extern const uint8_t ff_fc_4pulses_8bits_track_4[32]; /** * Track|Pulse| Positions * ----------------------------------------- * 1 | 0 | 1, 6, 11, 16, 21, 26, 31, 36 * | | 3, 8, 13, 18, 23, 28, 33, 38 * ----------------------------------------- * * @remark Track in the table should be read top-to-bottom, left-to-right. * * @note (EE) Reference G.729D code also uses gray decoding for each * pulse index before looking up the value in the table. * * Used in G.729 @6.4k (with gray coding), AMR @5.9k (without gray coding) */ extern const uint8_t ff_fc_2pulses_9bits_track1[16]; extern const uint8_t ff_fc_2pulses_9bits_track1_gray[16]; /** * Track|Pulse| Positions * ----------------------------------------- * 2 | 1 | 0, 7, 14, 20, 27, 34, 1, 21 * | | 2, 9, 15, 22, 29, 35, 6, 26 * | | 4,10, 17, 24, 30, 37, 11, 31 * | | 5,12, 19, 25, 32, 39, 16, 36 * ----------------------------------------- * * @remark Track in the table should be read top-to-bottom, left-to-right. * * @note (EE.1) This table (from the reference code) does not comply with * the specification. * The specification contains the following table: * * Track|Pulse| Positions * ----------------------------------------- * 2 | 1 | 0, 5, 10, 15, 20, 25, 30, 35 * | | 1, 6, 11, 16, 21, 26, 31, 36 * | | 2, 7, 12, 17, 22, 27, 32, 37 * | | 4, 9, 14, 19, 24, 29, 34, 39 * * ----------------------------------------- * * @note (EE.2) Reference G.729D code also uses gray decoding for each * pulse index before looking up the value in the table. * * Used in G.729 @6.4k (with gray coding) */ extern const uint8_t ff_fc_2pulses_9bits_track2_gray[32]; /** * b60 hamming windowed sinc function coefficients */ extern const float ff_b60_sinc[61]; /** * Table of pow(0.7,n) */ extern const float ff_pow_0_7[10]; /** * Table of pow(0.75,n) */ extern const float ff_pow_0_75[10]; /** * Table of pow(0.55,n) */ extern const float ff_pow_0_55[10]; /** * Decode fixed-codebook vector (3.8 and D.5.8 of G.729, 5.7.1 of AMR). * @param fc_v [out] decoded fixed codebook vector (2.13) * @param tab1 table used for first pulse_count pulses * @param tab2 table used for last pulse * @param pulse_indexes fixed codebook indexes * @param pulse_signs signs of the excitation pulses (0 bit value * means negative sign) * @param bits number of bits per one pulse index * @param pulse_count number of pulses decoded using first table * @param bits length of one pulse index in bits * * Used in G.729 @8k, G.729 @4.4k, G.729 @6.4k, AMR @7.95k, AMR @7.40k */ void ff_acelp_fc_pulse_per_track(int16_t* fc_v, const uint8_t *tab1, const uint8_t *tab2, int pulse_indexes, int pulse_signs, int pulse_count, int bits); /** * Decode the algebraic codebook index to pulse positions and signs and * construct the algebraic codebook vector for MODE_12k2. * * @note: The positions and signs are explicitly coded in MODE_12k2. * * @param fixed_index positions of the ten pulses * @param fixed_sparse pointer to the algebraic codebook vector * @param gray_decode gray decoding table * @param half_pulse_count number of couples of pulses * @param bits length of one pulse index in bits */ void ff_decode_10_pulses_35bits(const int16_t *fixed_index, AMRFixed *fixed_sparse, const uint8_t *gray_decode, int half_pulse_count, int bits); /** * weighted sum of two vectors with rounding. * @param out [out] result of addition * @param in_a first vector * @param in_b second vector * @param weight_coeff_a first vector weight coefficient * @param weight_coeff_a second vector weight coefficient * @param rounder this value will be added to the sum of the two vectors * @param shift result will be shifted to right by this value * @param length vectors length * * @note It is safe to pass the same buffer for out and in_a or in_b. * * out[i] = (in_a[i]*weight_a + in_b[i]*weight_b + rounder) >> shift */ void ff_acelp_weighted_vector_sum(int16_t* out, const int16_t *in_a, const int16_t *in_b, int16_t weight_coeff_a, int16_t weight_coeff_b, int16_t rounder, int shift, int length); /** * float implementation of weighted sum of two vectors. * @param out [out] result of addition * @param in_a first vector * @param in_b second vector * @param weight_coeff_a first vector weight coefficient * @param weight_coeff_a second vector weight coefficient * @param length vectors length * * @note It is safe to pass the same buffer for out and in_a or in_b. */ void ff_weighted_vector_sumf(float *out, const float *in_a, const float *in_b, float weight_coeff_a, float weight_coeff_b, int length); /** * Adaptive gain control (as used in AMR postfiltering) * * @param out output buffer for filtered speech data * @param in the input speech buffer (may be the same as out) * @param speech_energ input energy * @param size the input buffer size * @param alpha exponential filter factor * @param gain_mem a pointer to the filter memory (single float of size) */ void ff_adaptive_gain_control(float *out, const float *in, float speech_energ, int size, float alpha, float *gain_mem); /** * Set the sum of squares of a signal by scaling * * @param out output samples * @param in input samples * @param sum_of_squares new sum of squares * @param n number of samples * * @note If the input is zero (or its energy underflows), the output is zero. * This is the behavior of AGC in the AMR reference decoder. The QCELP * reference decoder seems to have undefined behavior. * * TIA/EIA/IS-733 2.4.8.3-2/3/4/5, 2.4.8.6 * 3GPP TS 26.090 6.1 (6) */ void ff_scale_vector_to_given_sum_of_squares(float *out, const float *in, float sum_of_squares, const int n); /** * Add fixed vector to an array from a sparse representation * * @param out fixed vector with pitch sharpening * @param in sparse fixed vector * @param scale number to multiply the fixed vector by * @param size the output vector size */ void ff_set_fixed_vector(float *out, const AMRFixed *in, float scale, int size); /** * Clear array values set by set_fixed_vector * * @param out fixed vector to be cleared * @param in sparse fixed vector * @param size the output vector size */ void ff_clear_fixed_vector(float *out, const AMRFixed *in, int size); #endif /* AVCODEC_ACELP_VECTORS_H */
123linslouis-android-video-cutter
jni/libavcodec/acelp_vectors.h
C
asf20
9,627
/* * Header file for hardcoded MDCT tables * * Copyright (c) 2009 Reimar Döffinger <Reimar.Doeffinger@gmx.de> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <assert.h> // do not use libavutil/mathematics.h since this is compiled both // for the host and the target and config.h is only valid for the target #include <math.h> #include "../libavutil/attributes.h" #if !CONFIG_HARDCODED_TABLES SINETABLE( 32); SINETABLE( 64); SINETABLE( 128); SINETABLE( 256); SINETABLE( 512); SINETABLE(1024); SINETABLE(2048); SINETABLE(4096); #else #include "libavcodec/mdct_tables.h" #endif SINETABLE_CONST float * const ff_sine_windows[] = { NULL, NULL, NULL, NULL, NULL, // unused ff_sine_32 , ff_sine_64 , ff_sine_128, ff_sine_256, ff_sine_512, ff_sine_1024, ff_sine_2048, ff_sine_4096 }; // Generate a sine window. av_cold void ff_sine_window_init(float *window, int n) { int i; for(i = 0; i < n; i++) window[i] = sinf((i + 0.5) * (M_PI / (2.0 * n))); } av_cold void ff_init_ff_sine_windows(int index) { assert(index >= 0 && index < FF_ARRAY_ELEMS(ff_sine_windows)); #if !CONFIG_HARDCODED_TABLES ff_sine_window_init(ff_sine_windows[index], 1 << index); #endif }
123linslouis-android-video-cutter
jni/libavcodec/mdct_tablegen.h
C
asf20
1,921
/* * copyright (c) 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * avcodec API use example. * * Note that this library only handles codecs (mpeg, mpeg4, etc...), * not file formats (avi, vob, etc...). See library 'libavformat' for the * format handling */ #include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef HAVE_AV_CONFIG_H #undef HAVE_AV_CONFIG_H #endif #include "libavcodec/avcodec.h" #include "libavutil/mathematics.h" #define INBUF_SIZE 4096 #define AUDIO_INBUF_SIZE 20480 #define AUDIO_REFILL_THRESH 4096 /* * Audio encoding example */ static void audio_encode_example(const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int frame_size, i, j, out_size, outbuf_size; FILE *f; short *samples; float t, tincr; uint8_t *outbuf; printf("Audio encoding\n"); /* find the MP2 encoder */ codec = avcodec_find_encoder(CODEC_ID_MP2); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } c= avcodec_alloc_context(); /* put sample parameters */ c->bit_rate = 64000; c->sample_rate = 44100; c->channels = 2; /* open it */ if (avcodec_open(c, codec) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } /* the codec gives us the frame size, in samples */ frame_size = c->frame_size; samples = malloc(frame_size * 2 * c->channels); outbuf_size = 10000; outbuf = malloc(outbuf_size); f = fopen(filename, "wb"); if (!f) { fprintf(stderr, "could not open %s\n", filename); exit(1); } /* encode a single tone sound */ t = 0; tincr = 2 * M_PI * 440.0 / c->sample_rate; for(i=0;i<200;i++) { for(j=0;j<frame_size;j++) { samples[2*j] = (int)(sin(t) * 10000); samples[2*j+1] = samples[2*j]; t += tincr; } /* encode the samples */ out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples); fwrite(outbuf, 1, out_size, f); } fclose(f); free(outbuf); free(samples); avcodec_close(c); av_free(c); } /* * Audio decoding. */ static void audio_decode_example(const char *outfilename, const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int out_size, len; FILE *f, *outfile; uint8_t *outbuf; uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; AVPacket avpkt; av_init_packet(&avpkt); printf("Audio decoding\n"); /* find the mpeg audio decoder */ codec = avcodec_find_decoder(CODEC_ID_MP2); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } c= avcodec_alloc_context(); /* open it */ if (avcodec_open(c, codec) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } outbuf = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "could not open %s\n", filename); exit(1); } outfile = fopen(outfilename, "wb"); if (!outfile) { av_free(c); exit(1); } /* decode until eof */ avpkt.data = inbuf; avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); while (avpkt.size > 0) { out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE; len = avcodec_decode_audio3(c, (short *)outbuf, &out_size, &avpkt); if (len < 0) { fprintf(stderr, "Error while decoding\n"); exit(1); } if (out_size > 0) { /* if a frame has been decoded, output it */ fwrite(outbuf, 1, out_size, outfile); } avpkt.size -= len; avpkt.data += len; if (avpkt.size < AUDIO_REFILL_THRESH) { /* Refill the input buffer, to avoid trying to decode * incomplete frames. Instead of this, one could also use * a parser, or use a proper container format through * libavformat. */ memmove(inbuf, avpkt.data, avpkt.size); avpkt.data = inbuf; len = fread(avpkt.data + avpkt.size, 1, AUDIO_INBUF_SIZE - avpkt.size, f); if (len > 0) avpkt.size += len; } } fclose(outfile); fclose(f); free(outbuf); avcodec_close(c); av_free(c); } /* * Video encoding example */ static void video_encode_example(const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int i, out_size, size, x, y, outbuf_size; FILE *f; AVFrame *picture; uint8_t *outbuf, *picture_buf; printf("Video encoding\n"); /* find the mpeg1 video encoder */ codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } c= avcodec_alloc_context(); picture= avcodec_alloc_frame(); /* put sample parameters */ c->bit_rate = 400000; /* resolution must be a multiple of two */ c->width = 352; c->height = 288; /* frames per second */ c->time_base= (AVRational){1,25}; c->gop_size = 10; /* emit one intra frame every ten frames */ c->max_b_frames=1; c->pix_fmt = PIX_FMT_YUV420P; /* open it */ if (avcodec_open(c, codec) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } f = fopen(filename, "wb"); if (!f) { fprintf(stderr, "could not open %s\n", filename); exit(1); } /* alloc image and output buffer */ outbuf_size = 100000; outbuf = malloc(outbuf_size); size = c->width * c->height; picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */ picture->data[0] = picture_buf; picture->data[1] = picture->data[0] + size; picture->data[2] = picture->data[1] + size / 4; picture->linesize[0] = c->width; picture->linesize[1] = c->width / 2; picture->linesize[2] = c->width / 2; /* encode 1 second of video */ for(i=0;i<25;i++) { fflush(stdout); /* prepare a dummy image */ /* Y */ for(y=0;y<c->height;y++) { for(x=0;x<c->width;x++) { picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3; } } /* Cb and Cr */ for(y=0;y<c->height/2;y++) { for(x=0;x<c->width/2;x++) { picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2; picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5; } } /* encode the image */ out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture); printf("encoding frame %3d (size=%5d)\n", i, out_size); fwrite(outbuf, 1, out_size, f); } /* get the delayed frames */ for(; out_size; i++) { fflush(stdout); out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL); printf("write frame %3d (size=%5d)\n", i, out_size); fwrite(outbuf, 1, out_size, f); } /* add sequence end code to have a real mpeg file */ outbuf[0] = 0x00; outbuf[1] = 0x00; outbuf[2] = 0x01; outbuf[3] = 0xb7; fwrite(outbuf, 1, 4, f); fclose(f); free(picture_buf); free(outbuf); avcodec_close(c); av_free(c); av_free(picture); printf("\n"); } /* * Video decoding example */ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, char *filename) { FILE *f; int i; f=fopen(filename,"w"); fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255); for(i=0;i<ysize;i++) fwrite(buf + i * wrap,1,xsize,f); fclose(f); } static void video_decode_example(const char *outfilename, const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int frame, got_picture, len; FILE *f; AVFrame *picture; uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; char buf[1024]; AVPacket avpkt; av_init_packet(&avpkt); /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */ memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE); printf("Video decoding\n"); /* find the mpeg1 video decoder */ codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } c= avcodec_alloc_context(); picture= avcodec_alloc_frame(); if(codec->capabilities&CODEC_CAP_TRUNCATED) c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ /* For some codecs, such as msmpeg4 and mpeg4, width and height MUST be initialized there because this information is not available in the bitstream. */ /* open it */ if (avcodec_open(c, codec) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } /* the codec gives us the frame size, in samples */ f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "could not open %s\n", filename); exit(1); } frame = 0; for(;;) { avpkt.size = fread(inbuf, 1, INBUF_SIZE, f); if (avpkt.size == 0) break; /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio) and this is the only method to use them because you cannot know the compressed data size before analysing it. BUT some other codecs (msmpeg4, mpeg4) are inherently frame based, so you must call them with all the data for one frame exactly. You must also initialize 'width' and 'height' before initializing them. */ /* NOTE2: some codecs allow the raw parameters (frame size, sample rate) to be changed at any frame. We handle this, so you should also take care of it */ /* here, we use a stream based decoder (mpeg1video), so we feed decoder and see if it could decode a frame */ avpkt.data = inbuf; while (avpkt.size > 0) { len = avcodec_decode_video2(c, picture, &got_picture, &avpkt); if (len < 0) { fprintf(stderr, "Error while decoding frame %d\n", frame); exit(1); } if (got_picture) { printf("saving frame %3d\n", frame); fflush(stdout); /* the picture is allocated by the decoder. no need to free it */ snprintf(buf, sizeof(buf), outfilename, frame); pgm_save(picture->data[0], picture->linesize[0], c->width, c->height, buf); frame++; } avpkt.size -= len; avpkt.data += len; } } /* some codecs, such as MPEG, transmit the I and P frame with a latency of one frame. You must do the following to have a chance to get the last frame of the video */ avpkt.data = NULL; avpkt.size = 0; len = avcodec_decode_video2(c, picture, &got_picture, &avpkt); if (got_picture) { printf("saving last frame %3d\n", frame); fflush(stdout); /* the picture is allocated by the decoder. no need to free it */ snprintf(buf, sizeof(buf), outfilename, frame); pgm_save(picture->data[0], picture->linesize[0], c->width, c->height, buf); frame++; } fclose(f); avcodec_close(c); av_free(c); av_free(picture); printf("\n"); } int main(int argc, char **argv) { const char *filename; /* must be called before using avcodec lib */ avcodec_init(); /* register all the codecs */ avcodec_register_all(); if (argc <= 1) { audio_encode_example("/tmp/test.mp2"); audio_decode_example("/tmp/test.sw", "/tmp/test.mp2"); video_encode_example("/tmp/test.mpg"); filename = "/tmp/test.mpg"; } else { filename = argv[1]; } // audio_decode_example("/tmp/test.sw", filename); video_decode_example("/tmp/test%d.pgm", filename); return 0; }
123linslouis-android-video-cutter
jni/libavcodec/api-example.c
C
asf20
12,842
/* * Sunplus JPEG decoder (SP5X) * Copyright (c) 2003 Alex Beregszaszi * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Sunplus JPEG decoder (SP5X). */ #include "avcodec.h" #include "mjpeg.h" #include "mjpegdec.h" #include "sp5x.h" static int sp5x_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AVPacket avpkt_recoded; #if 0 MJpegDecodeContext *s = avctx->priv_data; #endif const int qscale = 5; const uint8_t *buf_ptr; uint8_t *recoded; int i = 0, j = 0; if (!avctx->width || !avctx->height) return -1; buf_ptr = buf; #if 1 recoded = av_mallocz(buf_size + 1024); if (!recoded) return -1; /* SOI */ recoded[j++] = 0xFF; recoded[j++] = 0xD8; memcpy(recoded+j, &sp5x_data_dqt[0], sizeof(sp5x_data_dqt)); memcpy(recoded+j+5, &sp5x_quant_table[qscale * 2], 64); memcpy(recoded+j+70, &sp5x_quant_table[(qscale * 2) + 1], 64); j += sizeof(sp5x_data_dqt); memcpy(recoded+j, &sp5x_data_dht[0], sizeof(sp5x_data_dht)); j += sizeof(sp5x_data_dht); memcpy(recoded+j, &sp5x_data_sof[0], sizeof(sp5x_data_sof)); AV_WB16(recoded+j+5, avctx->coded_height); AV_WB16(recoded+j+7, avctx->coded_width); j += sizeof(sp5x_data_sof); memcpy(recoded+j, &sp5x_data_sos[0], sizeof(sp5x_data_sos)); j += sizeof(sp5x_data_sos); if(avctx->codec_id==CODEC_ID_AMV) for (i = 2; i < buf_size-2 && j < buf_size+1024-2; i++) recoded[j++] = buf[i]; else for (i = 14; i < buf_size && j < buf_size+1024-2; i++) { recoded[j++] = buf[i]; if (buf[i] == 0xff) recoded[j++] = 0; } /* EOI */ recoded[j++] = 0xFF; recoded[j++] = 0xD9; avctx->flags &= ~CODEC_FLAG_EMU_EDGE; av_init_packet(&avpkt_recoded); avpkt_recoded.data = recoded; avpkt_recoded.size = j; i = ff_mjpeg_decode_frame(avctx, data, data_size, &avpkt_recoded); av_free(recoded); #else /* SOF */ s->bits = 8; s->width = avctx->coded_width; s->height = avctx->coded_height; s->nb_components = 3; s->component_id[0] = 0; s->h_count[0] = 2; s->v_count[0] = 2; s->quant_index[0] = 0; s->component_id[1] = 1; s->h_count[1] = 1; s->v_count[1] = 1; s->quant_index[1] = 1; s->component_id[2] = 2; s->h_count[2] = 1; s->v_count[2] = 1; s->quant_index[2] = 1; s->h_max = 2; s->v_max = 2; s->qscale_table = av_mallocz((s->width+15)/16); avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV420P : PIX_FMT_YUVJ420; s->interlaced = 0; s->picture.reference = 0; if (avctx->get_buffer(avctx, &s->picture) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->picture.pict_type = FF_I_TYPE; s->picture.key_frame = 1; for (i = 0; i < 3; i++) s->linesize[i] = s->picture.linesize[i] << s->interlaced; /* DQT */ for (i = 0; i < 64; i++) { j = s->scantable.permutated[i]; s->quant_matrixes[0][j] = sp5x_quant_table[(qscale * 2) + i]; } s->qscale[0] = FFMAX( s->quant_matrixes[0][s->scantable.permutated[1]], s->quant_matrixes[0][s->scantable.permutated[8]]) >> 1; for (i = 0; i < 64; i++) { j = s->scantable.permutated[i]; s->quant_matrixes[1][j] = sp5x_quant_table[(qscale * 2) + 1 + i]; } s->qscale[1] = FFMAX( s->quant_matrixes[1][s->scantable.permutated[1]], s->quant_matrixes[1][s->scantable.permutated[8]]) >> 1; /* DHT */ /* SOS */ s->comp_index[0] = 0; s->nb_blocks[0] = s->h_count[0] * s->v_count[0]; s->h_scount[0] = s->h_count[0]; s->v_scount[0] = s->v_count[0]; s->dc_index[0] = 0; s->ac_index[0] = 0; s->comp_index[1] = 1; s->nb_blocks[1] = s->h_count[1] * s->v_count[1]; s->h_scount[1] = s->h_count[1]; s->v_scount[1] = s->v_count[1]; s->dc_index[1] = 1; s->ac_index[1] = 1; s->comp_index[2] = 2; s->nb_blocks[2] = s->h_count[2] * s->v_count[2]; s->h_scount[2] = s->h_count[2]; s->v_scount[2] = s->v_count[2]; s->dc_index[2] = 1; s->ac_index[2] = 1; for (i = 0; i < 3; i++) s->last_dc[i] = 1024; s->mb_width = (s->width * s->h_max * 8 -1) / (s->h_max * 8); s->mb_height = (s->height * s->v_max * 8 -1) / (s->v_max * 8); init_get_bits(&s->gb, buf+14, (buf_size-14)*8); return mjpeg_decode_scan(s); #endif return i; } AVCodec sp5x_decoder = { "sp5x", AVMEDIA_TYPE_VIDEO, CODEC_ID_SP5X, sizeof(MJpegDecodeContext), ff_mjpeg_decode_init, NULL, ff_mjpeg_decode_end, sp5x_decode_frame, CODEC_CAP_DR1, NULL, .long_name = NULL_IF_CONFIG_SMALL("Sunplus JPEG (SP5X)"), }; AVCodec amv_decoder = { "amv", AVMEDIA_TYPE_VIDEO, CODEC_ID_AMV, sizeof(MJpegDecodeContext), ff_mjpeg_decode_init, NULL, ff_mjpeg_decode_end, sp5x_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("AMV Video"), };
123linslouis-android-video-cutter
jni/libavcodec/sp5xdec.c
C
asf20
5,906